repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
steer | steer-master/latent_ode/lib/latent_ode.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence, Independent
from lib.base_models import VAE_Baseline
class LatentODE(VAE_Baseline):
def __init__(self, input_dim, latent_dim, encoder_z0, decoder, diffeq_solver,
z0_prior, device, obsrv_std = None,
use_binary_classif = False, use_poisson_proc = False,
linear_classifier = False,
classif_per_tp = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(LatentODE, self).__init__(
input_dim = input_dim, latent_dim = latent_dim,
z0_prior = z0_prior,
device = device, obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
use_poisson_proc = use_poisson_proc,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.encoder_z0 = encoder_z0
self.diffeq_solver = diffeq_solver
self.decoder = decoder
self.use_poisson_proc = use_poisson_proc
def get_reconstruction(self, time_steps_to_predict, truth, truth_time_steps,
mask = None, n_traj_samples = 1, run_backwards = True, mode = None):
if isinstance(self.encoder_z0, Encoder_z0_ODE_RNN) or \
isinstance(self.encoder_z0, Encoder_z0_RNN):
truth_w_mask = truth
if mask is not None:
truth_w_mask = torch.cat((truth, mask), -1)
first_point_mu, first_point_std = self.encoder_z0(
truth_w_mask, truth_time_steps, run_backwards = run_backwards)
means_z0 = first_point_mu.repeat(n_traj_samples, 1, 1)
sigma_z0 = first_point_std.repeat(n_traj_samples, 1, 1)
first_point_enc = utils.sample_standard_gaussian(means_z0, sigma_z0)
else:
raise Exception("Unknown encoder type {}".format(type(self.encoder_z0).__name__))
first_point_std = first_point_std.abs()
assert(torch.sum(first_point_std < 0) == 0.)
if self.use_poisson_proc:
n_traj_samples, n_traj, n_dims = first_point_enc.size()
# append a vector of zeros to compute the integral of lambda
zeros = torch.zeros([n_traj_samples, n_traj,self.input_dim]).to(get_device(truth))
first_point_enc_aug = torch.cat((first_point_enc, zeros), -1)
means_z0_aug = torch.cat((means_z0, zeros), -1)
else:
first_point_enc_aug = first_point_enc
means_z0_aug = means_z0
assert(not torch.isnan(time_steps_to_predict).any())
assert(not torch.isnan(first_point_enc).any())
assert(not torch.isnan(first_point_enc_aug).any())
# Shape of sol_y [n_traj_samples, n_samples, n_timepoints, n_latents]
sol_y = self.diffeq_solver(first_point_enc_aug, time_steps_to_predict)
if self.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = self.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
assert(torch.sum(int_lambda[:,:,0,:]) == 0.)
assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
pred_x = self.decoder(sol_y)
all_extra_info = {
"first_point": (first_point_mu, first_point_std, first_point_enc),
"latent_traj": sol_y.detach()
}
if self.use_poisson_proc:
# intergral of lambda from the last step of ODE Solver
all_extra_info["int_lambda"] = int_lambda[:,:,-1,:]
all_extra_info["log_lambda_y"] = log_lambda_y
if self.use_binary_classif:
if self.classif_per_tp:
all_extra_info["label_predictions"] = self.classifier(sol_y)
else:
all_extra_info["label_predictions"] = self.classifier(first_point_enc).squeeze(-1)
return pred_x, all_extra_info
def sample_traj_from_prior(self, time_steps_to_predict, n_traj_samples = 1):
# input_dim = starting_point.size()[-1]
# starting_point = starting_point.view(1,1,input_dim)
# Sample z0 from prior
starting_point_enc = self.z0_prior.sample([n_traj_samples, 1, self.latent_dim]).squeeze(-1)
starting_point_enc_aug = starting_point_enc
if self.use_poisson_proc:
n_traj_samples, n_traj, n_dims = starting_point_enc.size()
# append a vector of zeros to compute the integral of lambda
zeros = torch.zeros(n_traj_samples, n_traj,self.input_dim).to(self.device)
starting_point_enc_aug = torch.cat((starting_point_enc, zeros), -1)
sol_y = self.diffeq_solver.sample_traj_from_prior(starting_point_enc_aug, time_steps_to_predict,
n_traj_samples = 3)
if self.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = self.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
return self.decoder(sol_y)
| 4,826 | 33.478571 | 99 | py |
steer | steer-master/latent_ode/lib/likelihood_eval.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import gc
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence, Independent
def gaussian_log_likelihood(mu_2d, data_2d, obsrv_std, indices = None):
n_data_points = mu_2d.size()[-1]
if n_data_points > 0:
gaussian = Independent(Normal(loc = mu_2d, scale = obsrv_std.repeat(n_data_points)), 1)
log_prob = gaussian.log_prob(data_2d)
log_prob = log_prob / n_data_points
else:
log_prob = torch.zeros([1]).to(get_device(data_2d)).squeeze()
return log_prob
def poisson_log_likelihood(masked_log_lambdas, masked_data, indices, int_lambdas):
# masked_log_lambdas and masked_data
n_data_points = masked_data.size()[-1]
if n_data_points > 0:
log_prob = torch.sum(masked_log_lambdas) - int_lambdas[indices]
#log_prob = log_prob / n_data_points
else:
log_prob = torch.zeros([1]).to(get_device(masked_data)).squeeze()
return log_prob
def compute_binary_CE_loss(label_predictions, mortality_label):
#print("Computing binary classification loss: compute_CE_loss")
mortality_label = mortality_label.reshape(-1)
if len(label_predictions.size()) == 1:
label_predictions = label_predictions.unsqueeze(0)
n_traj_samples = label_predictions.size(0)
label_predictions = label_predictions.reshape(n_traj_samples, -1)
idx_not_nan = 1 - torch.isnan(mortality_label)
if len(idx_not_nan) == 0.:
print("All are labels are NaNs!")
ce_loss = torch.Tensor(0.).to(get_device(mortality_label))
label_predictions = label_predictions[:,idx_not_nan]
mortality_label = mortality_label[idx_not_nan]
if torch.sum(mortality_label == 0.) == 0 or torch.sum(mortality_label == 1.) == 0:
print("Warning: all examples in a batch belong to the same class -- please increase the batch size.")
assert(not torch.isnan(label_predictions).any())
assert(not torch.isnan(mortality_label).any())
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
mortality_label = mortality_label.repeat(n_traj_samples, 1)
ce_loss = nn.BCEWithLogitsLoss()(label_predictions, mortality_label)
# divide by number of patients in a batch
ce_loss = ce_loss / n_traj_samples
return ce_loss
def compute_multiclass_CE_loss(label_predictions, true_label, mask):
#print("Computing multi-class classification loss: compute_multiclass_CE_loss")
if (len(label_predictions.size()) == 3):
label_predictions = label_predictions.unsqueeze(0)
n_traj_samples, n_traj, n_tp, n_dims = label_predictions.size()
# assert(not torch.isnan(label_predictions).any())
# assert(not torch.isnan(true_label).any())
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
true_label = true_label.repeat(n_traj_samples, 1, 1)
label_predictions = label_predictions.reshape(n_traj_samples * n_traj * n_tp, n_dims)
true_label = true_label.reshape(n_traj_samples * n_traj * n_tp, n_dims)
# choose time points with at least one measurement
mask = torch.sum(mask, -1) > 0
# repeat the mask for each label to mark that the label for this time point is present
pred_mask = mask.repeat(n_dims, 1,1).permute(1,2,0)
label_mask = mask
pred_mask = pred_mask.repeat(n_traj_samples,1,1,1)
label_mask = label_mask.repeat(n_traj_samples,1,1,1)
pred_mask = pred_mask.reshape(n_traj_samples * n_traj * n_tp, n_dims)
label_mask = label_mask.reshape(n_traj_samples * n_traj * n_tp, 1)
if (label_predictions.size(-1) > 1) and (true_label.size(-1) > 1):
assert(label_predictions.size(-1) == true_label.size(-1))
# targets are in one-hot encoding -- convert to indices
_, true_label = true_label.max(-1)
res = []
for i in range(true_label.size(0)):
pred_masked = torch.masked_select(label_predictions[i], pred_mask[i].byte())
labels = torch.masked_select(true_label[i], label_mask[i].byte())
pred_masked = pred_masked.reshape(-1, n_dims)
if (len(labels) == 0):
continue
ce_loss = nn.CrossEntropyLoss()(pred_masked, labels.long())
res.append(ce_loss)
ce_loss = torch.stack(res, 0).to(get_device(label_predictions))
ce_loss = torch.mean(ce_loss)
# # divide by number of patients in a batch
# ce_loss = ce_loss / n_traj_samples
return ce_loss
def compute_masked_likelihood(mu, data, mask, likelihood_func):
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
res = []
for i in range(n_traj_samples):
for k in range(n_traj):
for j in range(n_dims):
data_masked = torch.masked_select(data[i,k,:,j], mask[i,k,:,j].byte())
#assert(torch.sum(data_masked == 0.) < 10)
mu_masked = torch.masked_select(mu[i,k,:,j], mask[i,k,:,j].byte())
log_prob = likelihood_func(mu_masked, data_masked, indices = (i,k,j))
res.append(log_prob)
# shape: [n_traj*n_traj_samples, 1]
res = torch.stack(res, 0).to(get_device(data))
res = res.reshape((n_traj_samples, n_traj, n_dims))
# Take mean over the number of dimensions
res = torch.mean(res, -1) # !!!!!!!!!!! changed from sum to mean
res = res.transpose(0,1)
return res
def masked_gaussian_log_density(mu, data, obsrv_std, mask = None):
# these cases are for plotting through plot_estim_density
if (len(mu.size()) == 3):
# add additional dimension for gp samples
mu = mu.unsqueeze(0)
if (len(data.size()) == 2):
# add additional dimension for gp samples and time step
data = data.unsqueeze(0).unsqueeze(2)
elif (len(data.size()) == 3):
# add additional dimension for gp samples
data = data.unsqueeze(0)
n_traj_samples, n_traj, n_timepoints, n_dims = mu.size()
assert(data.size()[-1] == n_dims)
# Shape after permutation: [n_traj, n_traj_samples, n_timepoints, n_dims]
if mask is None:
mu_flat = mu.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
data_flat = data.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
res = gaussian_log_likelihood(mu_flat, data_flat, obsrv_std)
res = res.reshape(n_traj_samples, n_traj).transpose(0,1)
else:
# Compute the likelihood per patient so that we don't priorize patients with more measurements
func = lambda mu, data, indices: gaussian_log_likelihood(mu, data, obsrv_std = obsrv_std, indices = indices)
res = compute_masked_likelihood(mu, data, mask, func)
return res
def mse(mu, data, indices = None):
n_data_points = mu.size()[-1]
if n_data_points > 0:
mse = nn.MSELoss()(mu, data)
else:
mse = torch.zeros([1]).to(get_device(data)).squeeze()
return mse
def compute_mse(mu, data, mask = None):
# these cases are for plotting through plot_estim_density
if (len(mu.size()) == 3):
# add additional dimension for gp samples
mu = mu.unsqueeze(0)
if (len(data.size()) == 2):
# add additional dimension for gp samples and time step
data = data.unsqueeze(0).unsqueeze(2)
elif (len(data.size()) == 3):
# add additional dimension for gp samples
data = data.unsqueeze(0)
n_traj_samples, n_traj, n_timepoints, n_dims = mu.size()
assert(data.size()[-1] == n_dims)
# Shape after permutation: [n_traj, n_traj_samples, n_timepoints, n_dims]
if mask is None:
mu_flat = mu.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
data_flat = data.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
res = mse(mu_flat, data_flat)
else:
# Compute the likelihood per patient so that we don't priorize patients with more measurements
res = compute_masked_likelihood(mu, data, mask, mse)
return res
def compute_poisson_proc_likelihood(truth, pred_y, info, mask = None):
# Compute Poisson likelihood
# https://math.stackexchange.com/questions/344487/log-likelihood-of-a-realization-of-a-poisson-process
# Sum log lambdas across all time points
if mask is None:
poisson_log_l = torch.sum(info["log_lambda_y"], 2) - info["int_lambda"]
# Sum over data dims
poisson_log_l = torch.mean(poisson_log_l, -1)
else:
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
mask_repeated = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
int_lambda = info["int_lambda"]
f = lambda log_lam, data, indices: poisson_log_likelihood(log_lam, data, indices, int_lambda)
poisson_log_l = compute_masked_likelihood(info["log_lambda_y"], truth_repeated, mask_repeated, f)
poisson_log_l = poisson_log_l.permute(1,0)
# Take mean over n_traj
#poisson_log_l = torch.mean(poisson_log_l, 1)
# poisson_log_l shape: [n_traj_samples, n_traj]
return poisson_log_l
| 9,166 | 33.592453 | 114 | py |
steer | steer-master/latent_ode/lib/utils.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import logging
import pickle
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import math
import glob
import re
from shutil import copyfile
import sklearn as sk
import subprocess
import datetime
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
def get_logger(logpath, filepath, package_files=[],
displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode='w')
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
return logger
def inf_generator(iterable):
"""Allows training with DataLoaders in a single infinite loop:
for i, (x, y) in enumerate(inf_generator(train_loader)):
"""
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def dump_pickle(data, filename):
with open(filename, 'wb') as pkl_file:
pickle.dump(data, pkl_file)
def load_pickle(filename):
with open(filename, 'rb') as pkl_file:
filecontent = pickle.load(pkl_file)
return filecontent
def make_dataset(dataset_type = "spiral",**kwargs):
if dataset_type == "spiral":
data_path = "data/spirals.pickle"
dataset = load_pickle(data_path)["dataset"]
chiralities = load_pickle(data_path)["chiralities"]
elif dataset_type == "chiralspiral":
data_path = "data/chiral-spirals.pickle"
dataset = load_pickle(data_path)["dataset"]
chiralities = load_pickle(data_path)["chiralities"]
else:
raise Exception("Unknown dataset type " + dataset_type)
return dataset, chiralities
def split_last_dim(data):
last_dim = data.size()[-1]
last_dim = last_dim//2
if len(data.size()) == 3:
res = data[:,:,:last_dim], data[:,:,last_dim:]
if len(data.size()) == 2:
res = data[:,:last_dim], data[:,last_dim:]
return res
def init_network_weights(net, std = 0.1):
for m in net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, val=0)
def flatten(x, dim):
return x.reshape(x.size()[:dim] + (-1, ))
def subsample_timepoints(data, time_steps, mask, n_tp_to_sample = None):
# n_tp_to_sample: number of time points to subsample. If not None, sample exactly n_tp_to_sample points
if n_tp_to_sample is None:
return data, time_steps, mask
n_tp_in_batch = len(time_steps)
if n_tp_to_sample > 1:
# Subsample exact number of points
assert(n_tp_to_sample <= n_tp_in_batch)
n_tp_to_sample = int(n_tp_to_sample)
for i in range(data.size(0)):
missing_idx = sorted(np.random.choice(np.arange(n_tp_in_batch), n_tp_in_batch - n_tp_to_sample, replace = False))
data[i, missing_idx] = 0.
if mask is not None:
mask[i, missing_idx] = 0.
elif (n_tp_to_sample <= 1) and (n_tp_to_sample > 0):
# Subsample percentage of points from each time series
percentage_tp_to_sample = n_tp_to_sample
for i in range(data.size(0)):
# take mask for current training sample and sum over all features -- figure out which time points don't have any measurements at all in this batch
current_mask = mask[i].sum(-1).cpu()
non_missing_tp = np.where(current_mask > 0)[0]
n_tp_current = len(non_missing_tp)
n_to_sample = int(n_tp_current * percentage_tp_to_sample)
subsampled_idx = sorted(np.random.choice(non_missing_tp, n_to_sample, replace = False))
tp_to_set_to_zero = np.setdiff1d(non_missing_tp, subsampled_idx)
data[i, tp_to_set_to_zero] = 0.
if mask is not None:
mask[i, tp_to_set_to_zero] = 0.
return data, time_steps, mask
def cut_out_timepoints(data, time_steps, mask, n_points_to_cut = None):
# n_points_to_cut: number of consecutive time points to cut out
if n_points_to_cut is None:
return data, time_steps, mask
n_tp_in_batch = len(time_steps)
if n_points_to_cut < 1:
raise Exception("Number of time points to cut out must be > 1")
assert(n_points_to_cut <= n_tp_in_batch)
n_points_to_cut = int(n_points_to_cut)
for i in range(data.size(0)):
start = np.random.choice(np.arange(5, n_tp_in_batch - n_points_to_cut-5), replace = False)
data[i, start : (start + n_points_to_cut)] = 0.
if mask is not None:
mask[i, start : (start + n_points_to_cut)] = 0.
return data, time_steps, mask
def get_device(tensor):
device = torch.device("cpu")
if tensor.is_cuda:
device = tensor.get_device()
return device
def sample_standard_gaussian(mu, sigma):
device = get_device(mu)
d = torch.distributions.normal.Normal(torch.Tensor([0.]).to(device), torch.Tensor([1.]).to(device))
r = d.sample(mu.size()).squeeze(-1)
return r * sigma.float() + mu.float()
def split_train_test(data, train_fraq = 0.8):
n_samples = data.size(0)
data_train = data[:int(n_samples * train_fraq)]
data_test = data[int(n_samples * train_fraq):]
return data_train, data_test
def split_train_test_data_and_time(data, time_steps, train_fraq = 0.8):
n_samples = data.size(0)
data_train = data[:int(n_samples * train_fraq)]
data_test = data[int(n_samples * train_fraq):]
assert(len(time_steps.size()) == 2)
train_time_steps = time_steps[:, :int(n_samples * train_fraq)]
test_time_steps = time_steps[:, int(n_samples * train_fraq):]
return data_train, data_test, train_time_steps, test_time_steps
def get_next_batch(dataloader):
# Make the union of all time points and perform normalization across the whole dataset
data_dict = dataloader.__next__()
batch_dict = get_dict_template()
# remove the time points where there are no observations in this batch
non_missing_tp = torch.sum(data_dict["observed_data"],(0,2)) != 0.
batch_dict["observed_data"] = data_dict["observed_data"][:, non_missing_tp]
batch_dict["observed_tp"] = data_dict["observed_tp"][non_missing_tp]
# print("observed data")
# print(batch_dict["observed_data"].size())
if ("observed_mask" in data_dict) and (data_dict["observed_mask"] is not None):
batch_dict["observed_mask"] = data_dict["observed_mask"][:, non_missing_tp]
batch_dict[ "data_to_predict"] = data_dict["data_to_predict"]
batch_dict["tp_to_predict"] = data_dict["tp_to_predict"]
non_missing_tp = torch.sum(data_dict["data_to_predict"],(0,2)) != 0.
batch_dict["data_to_predict"] = data_dict["data_to_predict"][:, non_missing_tp]
batch_dict["tp_to_predict"] = data_dict["tp_to_predict"][non_missing_tp]
# print("data_to_predict")
# print(batch_dict["data_to_predict"].size())
if ("mask_predicted_data" in data_dict) and (data_dict["mask_predicted_data"] is not None):
batch_dict["mask_predicted_data"] = data_dict["mask_predicted_data"][:, non_missing_tp]
if ("labels" in data_dict) and (data_dict["labels"] is not None):
batch_dict["labels"] = data_dict["labels"]
batch_dict["mode"] = data_dict["mode"]
return batch_dict
def get_ckpt_model(ckpt_path, model, device):
if not os.path.exists(ckpt_path):
raise Exception("Checkpoint " + ckpt_path + " does not exist.")
# Load checkpoint.
checkpt = torch.load(ckpt_path)
ckpt_args = checkpt['args']
state_dict = checkpt['state_dict']
model_dict = model.state_dict()
# 1. filter out unnecessary keys
state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(state_dict)
# 3. load the new state dict
model.load_state_dict(state_dict)
model.to(device)
def update_learning_rate(optimizer, decay_rate = 0.999, lowest = 1e-3):
for param_group in optimizer.param_groups:
lr = param_group['lr']
lr = max(lr * decay_rate, lowest)
param_group['lr'] = lr
def linspace_vector(start, end, n_points):
# start is either one value or a vector
size = np.prod(start.size())
assert(start.size() == end.size())
if size == 1:
# start and end are 1d-tensors
res = torch.linspace(start, end, n_points)
else:
# start and end are vectors
res = torch.Tensor()
for i in range(0, start.size(0)):
res = torch.cat((res,
torch.linspace(start[i], end[i], n_points)),0)
res = torch.t(res.reshape(start.size(0), n_points))
return res
def reverse(tensor):
idx = [i for i in range(tensor.size(0)-1, -1, -1)]
return tensor[idx]
def create_net(n_inputs, n_outputs, n_layers = 1,
n_units = 100, nonlinear = nn.Tanh):
layers = [nn.Linear(n_inputs, n_units)]
for i in range(n_layers):
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_units))
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_outputs))
return nn.Sequential(*layers)
def get_item_from_pickle(pickle_file, item_name):
from_pickle = load_pickle(pickle_file)
if item_name in from_pickle:
return from_pickle[item_name]
return None
def get_dict_template():
return {"observed_data": None,
"observed_tp": None,
"data_to_predict": None,
"tp_to_predict": None,
"observed_mask": None,
"mask_predicted_data": None,
"labels": None
}
def normalize_data(data):
reshaped = data.reshape(-1, data.size(-1))
att_min = torch.min(reshaped, 0)[0]
att_max = torch.max(reshaped, 0)[0]
# we don't want to divide by zero
att_max[ att_max == 0.] = 1.
if (att_max != 0.).all():
data_norm = (data - att_min) / att_max
else:
raise Exception("Zero!")
if torch.isnan(data_norm).any():
raise Exception("nans!")
return data_norm, att_min, att_max
def normalize_masked_data(data, mask, att_min, att_max):
# we don't want to divide by zero
att_max[ att_max == 0.] = 1.
if (att_max != 0.).all():
data_norm = (data - att_min) / att_max
else:
raise Exception("Zero!")
if torch.isnan(data_norm).any():
raise Exception("nans!")
# set masked out elements back to zero
data_norm[mask == 0] = 0
return data_norm, att_min, att_max
def shift_outputs(outputs, first_datapoint = None):
outputs = outputs[:,:,:-1,:]
if first_datapoint is not None:
n_traj, n_dims = first_datapoint.size()
first_datapoint = first_datapoint.reshape(1, n_traj, 1, n_dims)
outputs = torch.cat((first_datapoint, outputs), 2)
return outputs
def split_data_extrap(data_dict, dataset = ""):
device = get_device(data_dict["data"])
n_observed_tp = data_dict["data"].size(1) // 2
if dataset == "hopper":
n_observed_tp = data_dict["data"].size(1) // 3
split_dict = {"observed_data": data_dict["data"][:,:n_observed_tp,:].clone(),
"observed_tp": data_dict["time_steps"][:n_observed_tp].clone(),
"data_to_predict": data_dict["data"][:,n_observed_tp:,:].clone(),
"tp_to_predict": data_dict["time_steps"][n_observed_tp:].clone()}
split_dict["observed_mask"] = None
split_dict["mask_predicted_data"] = None
split_dict["labels"] = None
if ("mask" in data_dict) and (data_dict["mask"] is not None):
split_dict["observed_mask"] = data_dict["mask"][:, :n_observed_tp].clone()
split_dict["mask_predicted_data"] = data_dict["mask"][:, n_observed_tp:].clone()
if ("labels" in data_dict) and (data_dict["labels"] is not None):
split_dict["labels"] = data_dict["labels"].clone()
split_dict["mode"] = "extrap"
return split_dict
def split_data_interp(data_dict):
device = get_device(data_dict["data"])
split_dict = {"observed_data": data_dict["data"].clone(),
"observed_tp": data_dict["time_steps"].clone(),
"data_to_predict": data_dict["data"].clone(),
"tp_to_predict": data_dict["time_steps"].clone()}
split_dict["observed_mask"] = None
split_dict["mask_predicted_data"] = None
split_dict["labels"] = None
if "mask" in data_dict and data_dict["mask"] is not None:
split_dict["observed_mask"] = data_dict["mask"].clone()
split_dict["mask_predicted_data"] = data_dict["mask"].clone()
if ("labels" in data_dict) and (data_dict["labels"] is not None):
split_dict["labels"] = data_dict["labels"].clone()
split_dict["mode"] = "interp"
return split_dict
def add_mask(data_dict):
data = data_dict["observed_data"]
mask = data_dict["observed_mask"]
if mask is None:
mask = torch.ones_like(data).to(get_device(data))
data_dict["observed_mask"] = mask
return data_dict
def subsample_observed_data(data_dict, n_tp_to_sample = None, n_points_to_cut = None):
# n_tp_to_sample -- if not None, randomly subsample the time points. The resulting timeline has n_tp_to_sample points
# n_points_to_cut -- if not None, cut out consecutive points on the timeline. The resulting timeline has (N - n_points_to_cut) points
if n_tp_to_sample is not None:
# Randomly subsample time points
data, time_steps, mask = subsample_timepoints(
data_dict["observed_data"].clone(),
time_steps = data_dict["observed_tp"].clone(),
mask = (data_dict["observed_mask"].clone() if data_dict["observed_mask"] is not None else None),
n_tp_to_sample = n_tp_to_sample)
if n_points_to_cut is not None:
# Remove consecutive time points
data, time_steps, mask = cut_out_timepoints(
data_dict["observed_data"].clone(),
time_steps = data_dict["observed_tp"].clone(),
mask = (data_dict["observed_mask"].clone() if data_dict["observed_mask"] is not None else None),
n_points_to_cut = n_points_to_cut)
new_data_dict = {}
for key in data_dict.keys():
new_data_dict[key] = data_dict[key]
new_data_dict["observed_data"] = data.clone()
new_data_dict["observed_tp"] = time_steps.clone()
new_data_dict["observed_mask"] = mask.clone()
if n_points_to_cut is not None:
# Cut the section in the data to predict as well
# Used only for the demo on the periodic function
new_data_dict["data_to_predict"] = data.clone()
new_data_dict["tp_to_predict"] = time_steps.clone()
new_data_dict["mask_predicted_data"] = mask.clone()
return new_data_dict
def split_and_subsample_batch(data_dict, args, data_type = "train"):
if data_type == "train":
# Training set
if args.extrap:
processed_dict = split_data_extrap(data_dict, dataset = args.dataset)
else:
processed_dict = split_data_interp(data_dict)
else:
# Test set
if args.extrap:
processed_dict = split_data_extrap(data_dict, dataset = args.dataset)
else:
processed_dict = split_data_interp(data_dict)
# add mask
processed_dict = add_mask(processed_dict)
# Subsample points or cut out the whole section of the timeline
if (args.sample_tp is not None) or (args.cut_tp is not None):
processed_dict = subsample_observed_data(processed_dict,
n_tp_to_sample = args.sample_tp,
n_points_to_cut = args.cut_tp)
# if (args.sample_tp is not None):
# processed_dict = subsample_observed_data(processed_dict,
# n_tp_to_sample = args.sample_tp)
return processed_dict
def compute_loss_all_batches(model,
test_dataloader, args,
n_batches, experimentID, device,
n_traj_samples = 1, kl_coef = 1.,
max_samples_for_eval = None):
total = {}
total["loss"] = 0
total["likelihood"] = 0
total["mse"] = 0
total["kl_first_p"] = 0
total["std_first_p"] = 0
total["pois_likelihood"] = 0
total["ce_loss"] = 0
n_test_batches = 0
classif_predictions = torch.Tensor([]).to(device)
all_test_labels = torch.Tensor([]).to(device)
for i in range(n_batches):
print("Computing loss... " + str(i))
batch_dict = get_next_batch(test_dataloader)
results = model.compute_all_losses(batch_dict,
n_traj_samples = n_traj_samples, kl_coef = kl_coef)
if args.classif:
n_labels = model.n_labels #batch_dict["labels"].size(-1)
n_traj_samples = results["label_predictions"].size(0)
classif_predictions = torch.cat((classif_predictions,
results["label_predictions"].reshape(n_traj_samples, -1, n_labels)),1)
all_test_labels = torch.cat((all_test_labels,
batch_dict["labels"].reshape(-1, n_labels)),0)
for key in total.keys():
if key in results:
var = results[key]
if isinstance(var, torch.Tensor):
var = var.detach()
total[key] += var
n_test_batches += 1
# for speed
if max_samples_for_eval is not None:
if n_batches * batch_size >= max_samples_for_eval:
break
if n_test_batches > 0:
for key, value in total.items():
total[key] = total[key] / n_test_batches
if args.classif:
if args.dataset == "physionet":
#all_test_labels = all_test_labels.reshape(-1)
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
all_test_labels = all_test_labels.repeat(n_traj_samples,1,1)
idx_not_nan = 1 - torch.isnan(all_test_labels)
classif_predictions = classif_predictions[idx_not_nan]
all_test_labels = all_test_labels[idx_not_nan]
dirname = "plots/" + str(experimentID) + "/"
os.makedirs(dirname, exist_ok=True)
total["auc"] = 0.
if torch.sum(all_test_labels) != 0.:
print("Number of labeled examples: {}".format(len(all_test_labels.reshape(-1))))
print("Number of examples with mortality 1: {}".format(torch.sum(all_test_labels == 1.)))
# Cannot compute AUC with only 1 class
total["auc"] = sk.metrics.roc_auc_score(all_test_labels.cpu().numpy().reshape(-1),
classif_predictions.cpu().numpy().reshape(-1))
else:
print("Warning: Couldn't compute AUC -- all examples are from the same class")
if args.dataset == "activity":
all_test_labels = all_test_labels.repeat(n_traj_samples,1,1)
labeled_tp = torch.sum(all_test_labels, -1) > 0.
all_test_labels = all_test_labels[labeled_tp]
classif_predictions = classif_predictions[labeled_tp]
# classif_predictions and all_test_labels are in on-hot-encoding -- convert to class ids
_, pred_class_id = torch.max(classif_predictions, -1)
_, class_labels = torch.max(all_test_labels, -1)
pred_class_id = pred_class_id.reshape(-1)
total["accuracy"] = sk.metrics.accuracy_score(
class_labels.cpu().numpy(),
pred_class_id.cpu().numpy())
return total
def check_mask(data, mask):
#check that "mask" argument indeed contains a mask for data
n_zeros = torch.sum(mask == 0.).cpu().numpy()
n_ones = torch.sum(mask == 1.).cpu().numpy()
# mask should contain only zeros and ones
assert((n_zeros + n_ones) == np.prod(list(mask.size())))
# all masked out elements should be zeros
assert(torch.sum(data[mask == 0.] != 0.) == 0)
| 18,626 | 28.660828 | 149 | py |
steer | steer-master/latent_ode/lib/encoder_decoder.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from torch.distributions import Categorical, Normal
import lib.utils as utils
from torch.nn.modules.rnn import LSTM, GRU
from lib.utils import get_device
# GRU description:
# http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/
class GRU_unit(nn.Module):
def __init__(self, latent_dim, input_dim,
update_gate = None,
reset_gate = None,
new_state_net = None,
n_units = 100,
device = torch.device("cpu")):
super(GRU_unit, self).__init__()
if update_gate is None:
self.update_gate = nn.Sequential(
nn.Linear(latent_dim * 2 + input_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim),
nn.Sigmoid())
utils.init_network_weights(self.update_gate)
else:
self.update_gate = update_gate
if reset_gate is None:
self.reset_gate = nn.Sequential(
nn.Linear(latent_dim * 2 + input_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim),
nn.Sigmoid())
utils.init_network_weights(self.reset_gate)
else:
self.reset_gate = reset_gate
if new_state_net is None:
self.new_state_net = nn.Sequential(
nn.Linear(latent_dim * 2 + input_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim * 2))
utils.init_network_weights(self.new_state_net)
else:
self.new_state_net = new_state_net
def forward(self, y_mean, y_std, x, masked_update = True):
y_concat = torch.cat([y_mean, y_std, x], -1)
update_gate = self.update_gate(y_concat)
reset_gate = self.reset_gate(y_concat)
concat = torch.cat([y_mean * reset_gate, y_std * reset_gate, x], -1)
new_state, new_state_std = utils.split_last_dim(self.new_state_net(concat))
new_state_std = new_state_std.abs()
new_y = (1-update_gate) * new_state + update_gate * y_mean
new_y_std = (1-update_gate) * new_state_std + update_gate * y_std
assert(not torch.isnan(new_y).any())
if masked_update:
# IMPORTANT: assumes that x contains both data and mask
# update only the hidden states for hidden state only if at least one feature is present for the current time point
n_data_dims = x.size(-1)//2
mask = x[:, :, n_data_dims:]
utils.check_mask(x[:, :, :n_data_dims], mask)
mask = (torch.sum(mask, -1, keepdim = True) > 0).float()
assert(not torch.isnan(mask).any())
new_y = mask * new_y + (1-mask) * y_mean
new_y_std = mask * new_y_std + (1-mask) * y_std
if torch.isnan(new_y).any():
print("new_y is nan!")
print(mask)
print(y_mean)
print(prev_new_y)
exit()
new_y_std = new_y_std.abs()
return new_y, new_y_std
class Encoder_z0_RNN(nn.Module):
def __init__(self, latent_dim, input_dim, lstm_output_size = 20,
use_delta_t = True, device = torch.device("cpu")):
super(Encoder_z0_RNN, self).__init__()
self.gru_rnn_output_size = lstm_output_size
self.latent_dim = latent_dim
self.input_dim = input_dim
self.device = device
self.use_delta_t = use_delta_t
self.hiddens_to_z0 = nn.Sequential(
nn.Linear(self.gru_rnn_output_size, 50),
nn.Tanh(),
nn.Linear(50, latent_dim * 2),)
utils.init_network_weights(self.hiddens_to_z0)
input_dim = self.input_dim
if use_delta_t:
self.input_dim += 1
self.gru_rnn = GRU(self.input_dim, self.gru_rnn_output_size).to(device)
def forward(self, data, time_steps, run_backwards = True):
# IMPORTANT: assumes that 'data' already has mask concatenated to it
# data shape: [n_traj, n_tp, n_dims]
# shape required for rnn: (seq_len, batch, input_size)
# t0: not used here
n_traj = data.size(0)
assert(not torch.isnan(data).any())
assert(not torch.isnan(time_steps).any())
data = data.permute(1,0,2)
if run_backwards:
# Look at data in the reverse order: from later points to the first
data = utils.reverse(data)
if self.use_delta_t:
delta_t = time_steps[1:] - time_steps[:-1]
if run_backwards:
# we are going backwards in time with
delta_t = utils.reverse(delta_t)
# append zero delta t in the end
delta_t = torch.cat((delta_t, torch.zeros(1).to(self.device)))
delta_t = delta_t.unsqueeze(1).repeat((1,n_traj)).unsqueeze(-1)
data = torch.cat((delta_t, data),-1)
outputs, _ = self.gru_rnn(data)
# LSTM output shape: (seq_len, batch, num_directions * hidden_size)
last_output = outputs[-1]
self.extra_info ={"rnn_outputs": outputs, "time_points": time_steps}
mean, std = utils.split_last_dim(self.hiddens_to_z0(last_output))
std = std.abs()
assert(not torch.isnan(mean).any())
assert(not torch.isnan(std).any())
return mean.unsqueeze(0), std.unsqueeze(0)
class Encoder_z0_ODE_RNN(nn.Module):
# Derive z0 by running ode backwards.
# For every y_i we have two versions: encoded from data and derived from ODE by running it backwards from t_i+1 to t_i
# Compute a weighted sum of y_i from data and y_i from ode. Use weighted y_i as an initial value for ODE runing from t_i to t_i-1
# Continue until we get to z0
def __init__(self, latent_dim, input_dim, z0_diffeq_solver = None,
z0_dim = None, GRU_update = None,
n_gru_units = 100,
device = torch.device("cpu")):
super(Encoder_z0_ODE_RNN, self).__init__()
if z0_dim is None:
self.z0_dim = latent_dim
else:
self.z0_dim = z0_dim
if GRU_update is None:
self.GRU_update = GRU_unit(latent_dim, input_dim,
n_units = n_gru_units,
device=device).to(device)
else:
self.GRU_update = GRU_update
self.z0_diffeq_solver = z0_diffeq_solver
self.latent_dim = latent_dim
self.input_dim = input_dim
self.device = device
self.extra_info = None
self.transform_z0 = nn.Sequential(
nn.Linear(latent_dim * 2, 100),
nn.Tanh(),
nn.Linear(100, self.z0_dim * 2),)
utils.init_network_weights(self.transform_z0)
def forward(self, data, time_steps, run_backwards = True, save_info = False):
# data, time_steps -- observations and their time stamps
# IMPORTANT: assumes that 'data' already has mask concatenated to it
assert(not torch.isnan(data).any())
assert(not torch.isnan(time_steps).any())
n_traj, n_tp, n_dims = data.size()
if len(time_steps) == 1:
prev_y = torch.zeros((1, n_traj, self.latent_dim)).to(self.device)
prev_std = torch.zeros((1, n_traj, self.latent_dim)).to(self.device)
xi = data[:,0,:].unsqueeze(0)
last_yi, last_yi_std = self.GRU_update(prev_y, prev_std, xi)
extra_info = None
else:
last_yi, last_yi_std, _, extra_info = self.run_odernn(
data, time_steps, run_backwards = run_backwards,
save_info = save_info)
means_z0 = last_yi.reshape(1, n_traj, self.latent_dim)
std_z0 = last_yi_std.reshape(1, n_traj, self.latent_dim)
mean_z0, std_z0 = utils.split_last_dim( self.transform_z0( torch.cat((means_z0, std_z0), -1)))
std_z0 = std_z0.abs()
if save_info:
self.extra_info = extra_info
return mean_z0, std_z0
def run_odernn(self, data, time_steps,
run_backwards = True, save_info = False):
# IMPORTANT: assumes that 'data' already has mask concatenated to it
n_traj, n_tp, n_dims = data.size()
extra_info = []
t0 = time_steps[-1]
if run_backwards:
t0 = time_steps[0]
device = get_device(data)
prev_y = torch.zeros((1, n_traj, self.latent_dim)).to(device)
prev_std = torch.zeros((1, n_traj, self.latent_dim)).to(device)
prev_t, t_i = time_steps[-1] + 0.01, time_steps[-1]
interval_length = time_steps[-1] - time_steps[0]
minimum_step = interval_length / 50
#print("minimum step: {}".format(minimum_step))
assert(not torch.isnan(data).any())
assert(not torch.isnan(time_steps).any())
latent_ys = []
# Run ODE backwards and combine the y(t) estimates using gating
time_points_iter = range(0, len(time_steps))
if run_backwards:
time_points_iter = reversed(time_points_iter)
for i in time_points_iter:
if (prev_t - t_i) < minimum_step:
time_points = torch.stack((prev_t, t_i))
inc = self.z0_diffeq_solver.ode_func(prev_t, prev_y) * (t_i - prev_t)
assert(not torch.isnan(inc).any())
ode_sol = prev_y + inc
ode_sol = torch.stack((prev_y, ode_sol), 2).to(device)
assert(not torch.isnan(ode_sol).any())
else:
n_intermediate_tp = max(2, ((prev_t - t_i) / minimum_step).int())
time_points = utils.linspace_vector(prev_t, t_i, n_intermediate_tp)
ode_sol = self.z0_diffeq_solver(prev_y, time_points)
assert(not torch.isnan(ode_sol).any())
if torch.mean(ode_sol[:, :, 0, :] - prev_y) >= 0.001:
print("Error: first point of the ODE is not equal to initial value")
print(torch.mean(ode_sol[:, :, 0, :] - prev_y))
exit()
#assert(torch.mean(ode_sol[:, :, 0, :] - prev_y) < 0.001)
yi_ode = ode_sol[:, :, -1, :]
xi = data[:,i,:].unsqueeze(0)
yi, yi_std = self.GRU_update(yi_ode, prev_std, xi)
prev_y, prev_std = yi, yi_std
prev_t, t_i = time_steps[i], time_steps[i-1]
latent_ys.append(yi)
if save_info:
d = {"yi_ode": yi_ode.detach(), #"yi_from_data": yi_from_data,
"yi": yi.detach(), "yi_std": yi_std.detach(),
"time_points": time_points.detach(), "ode_sol": ode_sol.detach()}
extra_info.append(d)
latent_ys = torch.stack(latent_ys, 1)
assert(not torch.isnan(yi).any())
assert(not torch.isnan(yi_std).any())
return yi, yi_std, latent_ys, extra_info
class Decoder(nn.Module):
def __init__(self, latent_dim, input_dim):
super(Decoder, self).__init__()
# decode data from latent space where we are solving an ODE back to the data space
decoder = nn.Sequential(
nn.Linear(latent_dim, input_dim),)
utils.init_network_weights(decoder)
self.decoder = decoder
def forward(self, data):
return self.decoder(data)
| 9,918 | 28.520833 | 130 | py |
steer | steer-master/latent_ode/lib/base_models.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.nn.modules.rnn import GRUCell, LSTMCell, RNNCellBase
from torch.distributions.normal import Normal
from torch.distributions import Independent
from torch.nn.parameter import Parameter
def create_classifier(z0_dim, n_labels):
return nn.Sequential(
nn.Linear(z0_dim, 300),
nn.ReLU(),
nn.Linear(300, 300),
nn.ReLU(),
nn.Linear(300, n_labels),)
class Baseline(nn.Module):
def __init__(self, input_dim, latent_dim, device,
obsrv_std = 0.01, use_binary_classif = False,
classif_per_tp = False,
use_poisson_proc = False,
linear_classifier = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(Baseline, self).__init__()
self.input_dim = input_dim
self.latent_dim = latent_dim
self.n_labels = n_labels
self.obsrv_std = torch.Tensor([obsrv_std]).to(device)
self.device = device
self.use_binary_classif = use_binary_classif
self.classif_per_tp = classif_per_tp
self.use_poisson_proc = use_poisson_proc
self.linear_classifier = linear_classifier
self.train_classif_w_reconstr = train_classif_w_reconstr
z0_dim = latent_dim
if use_poisson_proc:
z0_dim += latent_dim
if use_binary_classif:
if linear_classifier:
self.classifier = nn.Sequential(
nn.Linear(z0_dim, n_labels))
else:
self.classifier = create_classifier(z0_dim, n_labels)
utils.init_network_weights(self.classifier)
def get_gaussian_likelihood(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute likelihood of the data under the predictions
log_density_data = masked_gaussian_log_density(pred_y, truth,
obsrv_std = self.obsrv_std, mask = mask)
log_density_data = log_density_data.permute(1,0)
# Compute the total density
# Take mean over n_traj_samples
log_density = torch.mean(log_density_data, 0)
# shape: [n_traj]
return log_density
def get_mse(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute likelihood of the data under the predictions
log_density_data = compute_mse(pred_y, truth, mask = mask)
# shape: [1]
return torch.mean(log_density_data)
def compute_all_losses(self, batch_dict,
n_tp_to_sample = None, n_traj_samples = 1, kl_coef = 1.):
# Condition on subsampled points
# Make predictions for all the points
pred_x, info = self.get_reconstruction(batch_dict["tp_to_predict"],
batch_dict["observed_data"], batch_dict["observed_tp"],
mask = batch_dict["observed_mask"], n_traj_samples = n_traj_samples,
mode = batch_dict["mode"])
# Compute likelihood of all the points
likelihood = self.get_gaussian_likelihood(batch_dict["data_to_predict"], pred_x,
mask = batch_dict["mask_predicted_data"])
mse = self.get_mse(batch_dict["data_to_predict"], pred_x,
mask = batch_dict["mask_predicted_data"])
################################
# Compute CE loss for binary classification on Physionet
# Use only last attribute -- mortatility in the hospital
device = get_device(batch_dict["data_to_predict"])
ce_loss = torch.Tensor([0.]).to(device)
if (batch_dict["labels"] is not None) and self.use_binary_classif:
if (batch_dict["labels"].size(-1) == 1) or (len(batch_dict["labels"].size()) == 1):
ce_loss = compute_binary_CE_loss(
info["label_predictions"],
batch_dict["labels"])
else:
ce_loss = compute_multiclass_CE_loss(
info["label_predictions"],
batch_dict["labels"],
mask = batch_dict["mask_predicted_data"])
if torch.isnan(ce_loss):
print("label pred")
print(info["label_predictions"])
print("labels")
print( batch_dict["labels"])
raise Exception("CE loss is Nan!")
pois_log_likelihood = torch.Tensor([0.]).to(get_device(batch_dict["data_to_predict"]))
if self.use_poisson_proc:
pois_log_likelihood = compute_poisson_proc_likelihood(
batch_dict["data_to_predict"], pred_x,
info, mask = batch_dict["mask_predicted_data"])
# Take mean over n_traj
pois_log_likelihood = torch.mean(pois_log_likelihood, 1)
loss = - torch.mean(likelihood)
if self.use_poisson_proc:
loss = loss - 0.1 * pois_log_likelihood
if self.use_binary_classif:
if self.train_classif_w_reconstr:
loss = loss + ce_loss * 100
else:
loss = ce_loss
# Take mean over the number of samples in a batch
results = {}
results["loss"] = torch.mean(loss)
results["likelihood"] = torch.mean(likelihood).detach()
results["mse"] = torch.mean(mse).detach()
results["pois_likelihood"] = torch.mean(pois_log_likelihood).detach()
results["ce_loss"] = torch.mean(ce_loss).detach()
results["kl"] = 0.
results["kl_first_p"] = 0.
results["std_first_p"] = 0.
if batch_dict["labels"] is not None and self.use_binary_classif:
results["label_predictions"] = info["label_predictions"].detach()
return results
class VAE_Baseline(nn.Module):
def __init__(self, input_dim, latent_dim,
z0_prior, device,
obsrv_std = 0.01,
use_binary_classif = False,
classif_per_tp = False,
use_poisson_proc = False,
linear_classifier = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(VAE_Baseline, self).__init__()
self.input_dim = input_dim
self.latent_dim = latent_dim
self.device = device
self.n_labels = n_labels
self.obsrv_std = torch.Tensor([obsrv_std]).to(device)
self.z0_prior = z0_prior
self.use_binary_classif = use_binary_classif
self.classif_per_tp = classif_per_tp
self.use_poisson_proc = use_poisson_proc
self.linear_classifier = linear_classifier
self.train_classif_w_reconstr = train_classif_w_reconstr
z0_dim = latent_dim
if use_poisson_proc:
z0_dim += latent_dim
if use_binary_classif:
if linear_classifier:
self.classifier = nn.Sequential(
nn.Linear(z0_dim, n_labels))
else:
self.classifier = create_classifier(z0_dim, n_labels)
utils.init_network_weights(self.classifier)
def get_gaussian_likelihood(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
n_traj, n_tp, n_dim = truth.size()
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
log_density_data = masked_gaussian_log_density(pred_y, truth_repeated,
obsrv_std = self.obsrv_std, mask = mask)
log_density_data = log_density_data.permute(1,0)
log_density = torch.mean(log_density_data, 1)
# shape: [n_traj_samples]
return log_density
def get_mse(self, truth, pred_y, mask = None):
# pred_y shape [n_traj_samples, n_traj, n_tp, n_dim]
# truth shape [n_traj, n_tp, n_dim]
n_traj, n_tp, n_dim = truth.size()
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
if mask is not None:
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute likelihood of the data under the predictions
log_density_data = compute_mse(pred_y, truth_repeated, mask = mask)
# shape: [1]
return torch.mean(log_density_data)
def compute_all_losses(self, batch_dict, n_traj_samples = 1, kl_coef = 1.):
# Condition on subsampled points
# Make predictions for all the points
pred_y, info = self.get_reconstruction(batch_dict["tp_to_predict"],
batch_dict["observed_data"], batch_dict["observed_tp"],
mask = batch_dict["observed_mask"], n_traj_samples = n_traj_samples,
mode = batch_dict["mode"])
#print("get_reconstruction done -- computing likelihood")
fp_mu, fp_std, fp_enc = info["first_point"]
fp_std = fp_std.abs()
fp_distr = Normal(fp_mu, fp_std)
assert(torch.sum(fp_std < 0) == 0.)
kldiv_z0 = kl_divergence(fp_distr, self.z0_prior)
if torch.isnan(kldiv_z0).any():
print(fp_mu)
print(fp_std)
raise Exception("kldiv_z0 is Nan!")
# Mean over number of latent dimensions
# kldiv_z0 shape: [n_traj_samples, n_traj, n_latent_dims] if prior is a mixture of gaussians (KL is estimated)
# kldiv_z0 shape: [1, n_traj, n_latent_dims] if prior is a standard gaussian (KL is computed exactly)
# shape after: [n_traj_samples]
kldiv_z0 = torch.mean(kldiv_z0,(1,2))
# Compute likelihood of all the points
rec_likelihood = self.get_gaussian_likelihood(
batch_dict["data_to_predict"], pred_y,
mask = batch_dict["mask_predicted_data"])
mse = self.get_mse(
batch_dict["data_to_predict"], pred_y,
mask = batch_dict["mask_predicted_data"])
pois_log_likelihood = torch.Tensor([0.]).to(get_device(batch_dict["data_to_predict"]))
if self.use_poisson_proc:
pois_log_likelihood = compute_poisson_proc_likelihood(
batch_dict["data_to_predict"], pred_y,
info, mask = batch_dict["mask_predicted_data"])
# Take mean over n_traj
pois_log_likelihood = torch.mean(pois_log_likelihood, 1)
################################
# Compute CE loss for binary classification on Physionet
device = get_device(batch_dict["data_to_predict"])
ce_loss = torch.Tensor([0.]).to(device)
if (batch_dict["labels"] is not None) and self.use_binary_classif:
if (batch_dict["labels"].size(-1) == 1) or (len(batch_dict["labels"].size()) == 1):
ce_loss = compute_binary_CE_loss(
info["label_predictions"],
batch_dict["labels"])
else:
ce_loss = compute_multiclass_CE_loss(
info["label_predictions"],
batch_dict["labels"],
mask = batch_dict["mask_predicted_data"])
# IWAE loss
loss = - torch.logsumexp(rec_likelihood - kl_coef * kldiv_z0,0)
if torch.isnan(loss):
loss = - torch.mean(rec_likelihood - kl_coef * kldiv_z0,0)
if self.use_poisson_proc:
loss = loss - 0.1 * pois_log_likelihood
if self.use_binary_classif:
if self.train_classif_w_reconstr:
loss = loss + ce_loss * 100
else:
loss = ce_loss
results = {}
results["loss"] = torch.mean(loss)
results["likelihood"] = torch.mean(rec_likelihood).detach()
results["mse"] = torch.mean(mse).detach()
results["pois_likelihood"] = torch.mean(pois_log_likelihood).detach()
results["ce_loss"] = torch.mean(ce_loss).detach()
results["kl_first_p"] = torch.mean(kldiv_z0).detach()
results["std_first_p"] = torch.mean(fp_std).detach()
if batch_dict["labels"] is not None and self.use_binary_classif:
results["label_predictions"] = info["label_predictions"].detach()
return results
| 11,032 | 31.072674 | 112 | py |
steer | steer-master/latent_ode/lib/parse_datasets.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import numpy as np
import torch
import torch.nn as nn
import lib.utils as utils
from lib.diffeq_solver import DiffeqSolver
from generate_timeseries import Periodic_1d
from torch.distributions import uniform
from torch.utils.data import DataLoader
from mujoco_physics import HopperPhysics
from physionet import PhysioNet, variable_time_collate_fn, get_data_min_max
from person_activity import PersonActivity, variable_time_collate_fn_activity
from sklearn import model_selection
import random
#####################################################################################################
def parse_datasets(args, device):
def basic_collate_fn(batch, time_steps, args = args, device = device, data_type = "train"):
batch = torch.stack(batch)
data_dict = {
"data": batch,
"time_steps": time_steps}
data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)
return data_dict
dataset_name = args.dataset
n_total_tp = args.timepoints + args.extrap
max_t_extrap = args.max_t / args.timepoints * n_total_tp
##################################################################
# MuJoCo dataset
if dataset_name == "hopper":
dataset_obj = HopperPhysics(root='data', download=True, generate=False, device = device)
dataset = dataset_obj.get_dataset()[:args.n]
dataset = dataset.to(device)
n_tp_data = dataset[:].shape[1]
# Time steps that are used later on for exrapolation
time_steps = torch.arange(start=0, end = n_tp_data, step=1).float().to(device)
time_steps = time_steps / len(time_steps)
dataset = dataset.to(device)
time_steps = time_steps.to(device)
if not args.extrap:
# Creating dataset for interpolation
# sample time points from different parts of the timeline,
# so that the model learns from different parts of hopper trajectory
n_traj = len(dataset)
n_tp_data = dataset.shape[1]
n_reduced_tp = args.timepoints
# sample time points from different parts of the timeline,
# so that the model learns from different parts of hopper trajectory
start_ind = np.random.randint(0, high=n_tp_data - n_reduced_tp +1, size=n_traj)
end_ind = start_ind + n_reduced_tp
sliced = []
for i in range(n_traj):
sliced.append(dataset[i, start_ind[i] : end_ind[i], :])
dataset = torch.stack(sliced).to(device)
time_steps = time_steps[:n_reduced_tp]
# Split into train and test by the time sequences
train_y, test_y = utils.split_train_test(dataset, train_fraq = 0.8)
n_samples = len(dataset)
input_dim = dataset.size(-1)
batch_size = min(args.batch_size, args.n)
train_dataloader = DataLoader(train_y, batch_size = batch_size, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps, data_type = "train"))
test_dataloader = DataLoader(test_y, batch_size = n_samples, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps, data_type = "test"))
data_objects = {"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader)}
return data_objects
##################################################################
# Physionet dataset
if dataset_name == "physionet":
train_dataset_obj = PhysioNet('data/physionet', train=True,
quantization = args.quantization,
download=True, n_samples = min(10000, args.n),
device = device)
# Use custom collate_fn to combine samples with arbitrary time observations.
# Returns the dataset along with mask and time steps
test_dataset_obj = PhysioNet('data/physionet', train=False,
quantization = args.quantization,
download=True, n_samples = min(10000, args.n),
device = device)
# Combine and shuffle samples from physionet Train and physionet Test
total_dataset = train_dataset_obj[:len(train_dataset_obj)]
if not args.classif:
# Concatenate samples from original Train and Test sets
# Only 'training' physionet samples are have labels. Therefore, if we do classifiction task, we don't need physionet 'test' samples.
total_dataset = total_dataset + test_dataset_obj[:len(test_dataset_obj)]
# Shuffle and split
train_data, test_data = model_selection.train_test_split(total_dataset, train_size= 0.8,
random_state = 42, shuffle = True)
record_id, tt, vals, mask, labels = train_data[0]
n_samples = len(total_dataset)
input_dim = vals.size(-1)
batch_size = min(min(len(train_dataset_obj), args.batch_size), args.n)
data_min, data_max = get_data_min_max(total_dataset)
train_dataloader = DataLoader(train_data, batch_size= batch_size, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn(batch, args, device, data_type = "train",
data_min = data_min, data_max = data_max))
test_dataloader = DataLoader(test_data, batch_size = n_samples, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn(batch, args, device, data_type = "test",
data_min = data_min, data_max = data_max))
attr_names = train_dataset_obj.params
data_objects = {"dataset_obj": train_dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"attr": attr_names, #optional
"classif_per_tp": False, #optional
"n_labels": 1} #optional
return data_objects
##################################################################
# Human activity dataset
if dataset_name == "activity":
n_samples = min(10000, args.n)
dataset_obj = PersonActivity('data/PersonActivity',
download=True, n_samples = n_samples, device = device)
print(dataset_obj)
# Use custom collate_fn to combine samples with arbitrary time observations.
# Returns the dataset along with mask and time steps
# Shuffle and split
train_data, test_data = model_selection.train_test_split(dataset_obj, train_size= 0.8,
random_state = 42, shuffle = True)
train_data = [train_data[i] for i in np.random.choice(len(train_data), len(train_data))]
test_data = [test_data[i] for i in np.random.choice(len(test_data), len(test_data))]
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
batch_size = min(min(len(dataset_obj), args.batch_size), args.n)
train_dataloader = DataLoader(train_data, batch_size= batch_size, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn_activity(batch, args, device, data_type = "train"))
test_dataloader = DataLoader(test_data, batch_size=n_samples, shuffle=False,
collate_fn= lambda batch: variable_time_collate_fn_activity(batch, args, device, data_type = "test"))
data_objects = {"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"classif_per_tp": True, #optional
"n_labels": labels.size(-1)}
return data_objects
########### 1d datasets ###########
# Sampling args.timepoints time points in the interval [0, args.max_t]
# Sample points for both training sequence and explapolation (test)
distribution = uniform.Uniform(torch.Tensor([0.0]),torch.Tensor([max_t_extrap]))
time_steps_extrap = distribution.sample(torch.Size([n_total_tp-1]))[:,0]
time_steps_extrap = torch.cat((torch.Tensor([0.0]), time_steps_extrap))
time_steps_extrap = torch.sort(time_steps_extrap)[0]
dataset_obj = None
##################################################################
# Sample a periodic function
if dataset_name == "periodic":
dataset_obj = Periodic_1d(
init_freq = None, init_amplitude = 1.,
final_amplitude = 1., final_freq = None,
z0 = 1.)
##################################################################
if dataset_obj is None:
raise Exception("Unknown dataset: {}".format(dataset_name))
dataset = dataset_obj.sample_traj(time_steps_extrap, n_samples = args.n,
noise_weight = args.noise_weight)
# Process small datasets
dataset = dataset.to(device)
time_steps_extrap = time_steps_extrap.to(device)
train_y, test_y = utils.split_train_test(dataset, train_fraq = 0.8)
n_samples = len(dataset)
input_dim = dataset.size(-1)
batch_size = min(args.batch_size, args.n)
train_dataloader = DataLoader(train_y, batch_size = batch_size, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps_extrap, data_type = "train"))
test_dataloader = DataLoader(test_y, batch_size = args.n, shuffle=False,
collate_fn= lambda batch: basic_collate_fn(batch, time_steps_extrap, data_type = "test"))
data_objects = {#"dataset_obj": dataset_obj,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader)}
return data_objects
| 9,406 | 37.871901 | 135 | py |
steer | steer-master/stiff_ode_experiments/stiff_ode_demo.py | import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=120) #default=1000)
parser.add_argument('--batch_time', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--ntest', type=int, default=10)
parser.add_argument('--n_units', type=int, default=500)
parser.add_argument('--min_length', type=float, default=0.001)
parser.add_argument('--normal_std', type=float, default=0.01)
parser.add_argument('--stiffness_ratio', type=float, default=1000.0)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
parser.add_argument('--version', type=str, choices=['standard','steer','normal'], default='steer')
args = parser.parse_args()
torch.manual_seed(6)
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
from torchdiffeq import odeint_adjoint_stochastic_end_v3 as odeint_stochastic_end_v3
from torchdiffeq import odeint_adjoint_stochastic_end_normal as odeint_stochastic_end_normal
else:
from torchdiffeq import odeint_stochastic_end_v3
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
true_y0 = torch.tensor([0.])
t = torch.linspace(0., 15., args.data_size)
test_t = torch.linspace(0., 25., args.data_size)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]])
class Lambda(nn.Module):
def forward(self, t, y):
t = t.unsqueeze(0)
equation = -1*y*args.stiffness_ratio + 3*args.stiffness_ratio - 2*args.stiffness_ratio * torch.exp(-1*t)
#equation = -1*y*args.stiffness_ratio + 3*args.stiffness_ratio - 2*args.stiffness_ratio * torch.exp(-1*t)# - 2*args.stiffness_ratio * torch.exp(-10000*t)
#equation = -1000*y + 3000 - 2000 * torch.exp(-t) + 1000 * torch.sin(t)
return equation
with torch.no_grad():
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
true_y_test = odeint(Lambda(), true_y0, test_t, method='dopri5')
def get_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0, batch_t, batch_y
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('steer')
import matplotlib.pyplot as plt
def visualize(true_y, pred_y, odefunc, test_t, itr):
if args.viz:
plt.clf()
plt.xlabel('t')
plt.ylabel('y')
plt.plot(test_t.numpy(), true_y.numpy()[:, 0], 'g-', label='True')
plt.plot(test_t.numpy(), pred_y.numpy()[:, 0], 'b--' , label='Predicted' )
plt.ylim((-1, 25))
plt.legend(loc="upper right")
plt.tight_layout()
plt.savefig('steer/{:04d}'.format(itr))
plt.draw()
plt.pause(0.001)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, args.n_units),
nn.Tanh(),
nn.Linear(args.n_units, args.n_units),
nn.Tanh(),
nn.Linear(args.n_units, 1),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
t=t.unsqueeze(0)
t = t.view(1,1)
y = y.view(y.size(0),1)
t = t.expand_as(y)
equation = torch.cat([t,y],1)
result = self.net(equation)
if y.size(0)==1:
result = result.squeeze()
return result
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc()
optimizer = optim.RMSprop(func.parameters(), lr=1e-4)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
if args.version=='standard':
pred_y = odeint(func, batch_y0, batch_t)
elif args.version=='steer':
pred_y = odeint_stochastic_end_v3(func, batch_y0, batch_t,min_length=args.min_length,mode='train')
elif args.version=='normal':
pred_y = odeint_stochastic_end_normal(func, batch_y0, batch_t,std=args.normal_std,mode='train')
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, test_t)
loss = torch.mean(torch.abs(pred_y - true_y_test))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
visualize(true_y_test, pred_y, func, test_t, ii )
ii += 1
end = time.time()
| 5,987 | 32.452514 | 161 | py |
steer | steer-master/torchdiffeq/setup.py | import setuptools
setuptools.setup(
name="torchdiffeq",
version="0.0.1",
author="Ricky Tian Qi Chen",
author_email="rtqichen@cs.toronto.edu",
description="ODE solvers and adjoint sensitivity analysis in PyTorch.",
url="https://github.com/arnabgho/torchdiffeq",
packages=['torchdiffeq', 'torchdiffeq._impl'],
install_requires=['torch>=0.4.1'],
classifiers=(
"Programming Language :: Python :: 3"),)
| 443 | 30.714286 | 75 | py |
steer | steer-master/torchdiffeq/tests/gradient_tests.py | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestGradient(unittest.TestCase):
def test_midpoint(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='midpoint')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_rk4(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_dopri5(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='dopri5')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adams(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='adams')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adaptive_heun(self):
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='adaptive_heun')
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adjoint(self):
"""
Test against dopri5
"""
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method='dopri5')
ys = func(y0, t_points)
torch.manual_seed(0)
gradys = torch.rand_like(ys)
ys.backward(gradys)
# reg_y0_grad = y0.grad
reg_t_grad = t_points.grad
reg_a_grad = f.a.grad
reg_b_grad = f.b.grad
f, y0, t_points, _ = construct_problem(TEST_DEVICE)
func = lambda y0, t_points: torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
ys = func(y0, t_points)
ys.backward(gradys)
# adj_y0_grad = y0.grad
adj_t_grad = t_points.grad
adj_a_grad = f.a.grad
adj_b_grad = f.b.grad
# self.assertLess(max_abs(reg_y0_grad - adj_y0_grad), eps)
self.assertLess(max_abs(reg_t_grad - adj_t_grad), eps)
self.assertLess(max_abs(reg_a_grad - adj_a_grad), eps)
self.assertLess(max_abs(reg_b_grad - adj_b_grad), eps)
class TestCompareAdjointGradient(unittest.TestCase):
def problem(self):
class Odefunc(torch.nn.Module):
def __init__(self):
super(Odefunc, self).__init__()
self.A = torch.nn.Parameter(torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]))
self.unused_module = torch.nn.Linear(2, 5)
def forward(self, t, y):
return torch.mm(y**3, self.A)
y0 = torch.tensor([[2., 0.]]).to(TEST_DEVICE).requires_grad_(True)
t_points = torch.linspace(0., 25., 10).to(TEST_DEVICE).requires_grad_(True)
func = Odefunc().to(TEST_DEVICE)
return func, y0, t_points
def test_dopri5_adjoint_against_dopri5(self):
func, y0, t_points = self.problem()
ys = torchdiffeq.odeint_adjoint(func, y0, t_points, method='dopri5')
gradys = torch.rand_like(ys) * 0.1
ys.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = t_points.grad
adj_A_grad = func.A.grad
self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
self.assertEqual(max_abs(func.unused_module.bias.grad), 0)
func, y0, t_points = self.problem()
ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
ys.backward(gradys)
self.assertLess(max_abs(y0.grad - adj_y0_grad), 3e-4)
self.assertLess(max_abs(t_points.grad - adj_t_grad), 1e-4)
self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-3)
def test_adams_adjoint_against_dopri5(self):
func, y0, t_points = self.problem()
ys_ = torchdiffeq.odeint_adjoint(func, y0, t_points, method='adams')
gradys = torch.rand_like(ys_) * 0.1
ys_.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = t_points.grad
adj_A_grad = func.A.grad
self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
self.assertEqual(max_abs(func.unused_module.bias.grad), 0)
func, y0, t_points = self.problem()
ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
ys.backward(gradys)
self.assertLess(max_abs(y0.grad - adj_y0_grad), 5e-2)
self.assertLess(max_abs(t_points.grad - adj_t_grad), 5e-4)
self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-2)
if __name__ == '__main__':
unittest.main()
| 5,019 | 33.14966 | 96 | py |
steer | steer-master/torchdiffeq/tests/api_tests.py | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestCollectionState(unittest.TestCase):
def test_dopri5(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='dopri5')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_dopri5_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='dopri5')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adams(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adams')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adams_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adams')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adaptive_heun(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
tuple_y0 = (y0, y0)
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method='adaptive_heun')
max_error0 = (sol - tuple_y[0]).max()
max_error1 = (sol - tuple_y[1]).max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_adaptive_heun_gradient(self):
f, y0, t_points, sol = construct_problem(TEST_DEVICE)
tuple_f = lambda t, y: (f(t, y[0]), f(t, y[1]))
for i in range(2):
func = lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method='adaptive_heun')[i]
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
if __name__ == '__main__':
unittest.main()
| 2,805 | 32.011765 | 114 | py |
steer | steer-master/torchdiffeq/tests/odeint_tests.py | import unittest
import torch
import torchdiffeq
import problems
error_tol = 1e-4
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
def rel_error(true, estimate):
return max_abs((true - estimate) / true)
class TestSolverError(unittest.TestCase):
def test_euler(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='euler')
self.assertLess(rel_error(sol, y), error_tol)
def test_midpoint(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='midpoint')
self.assertLess(rel_error(sol, y), error_tol)
def test_rk4(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertLess(rel_error(sol, y), error_tol)
def test_explicit_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE)
y = torchdiffeq.odeint(f, y0, t_points, method='explicit_adams')
self.assertLess(rel_error(sol, y), error_tol)
def test_adams(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, ode=ode)
y = torchdiffeq.odeint(f, y0, t_points, method='adams')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_dopri5(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, ode=ode)
y = torchdiffeq.odeint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adaptive_heun(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, ode=ode)
y = torchdiffeq.odeint(f, y0, t_points, method='adaptive_heun')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adjoint(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
class TestSolverBackwardsInTimeError(unittest.TestCase):
def test_euler(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='euler')
self.assertLess(rel_error(sol, y), error_tol)
def test_midpoint(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='midpoint')
self.assertLess(rel_error(sol, y), error_tol)
def test_rk4(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='rk4')
self.assertLess(rel_error(sol, y), error_tol)
def test_explicit_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='explicit_adams')
self.assertLess(rel_error(sol, y), error_tol)
def test_adams(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='adams')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_dopri5(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adaptive_heun(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points, method='adaptive_heun')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
def test_adjoint(self):
for ode in problems.PROBLEMS.keys():
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint_adjoint(f, y0, t_points, method='dopri5')
with self.subTest(ode=ode):
self.assertLess(rel_error(sol, y), error_tol)
class TestNoIntegration(unittest.TestCase):
def test_midpoint(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='midpoint')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_rk4(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='rk4')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_explicit_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='explicit_adams')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_adams(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='adams')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_dopri5(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='dopri5')
self.assertLess(max_abs(sol[0] - y), error_tol)
def test_dopri5(self):
f, y0, t_points, sol = problems.construct_problem(TEST_DEVICE, reverse=True)
y = torchdiffeq.odeint(f, y0, t_points[0:1], method='adaptive_heun')
self.assertLess(max_abs(sol[0] - y), error_tol)
if __name__ == '__main__':
unittest.main()
| 6,526 | 35.875706 | 88 | py |
steer | steer-master/torchdiffeq/tests/problems.py | import math
import numpy as np
import scipy.linalg
import torch
class ConstantODE(torch.nn.Module):
def __init__(self, device):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2).to(device))
self.b = torch.nn.Parameter(torch.tensor(3.0).to(device))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b))**5
def y_exact(self, t):
return self.a * t + self.b
class SineODE(torch.nn.Module):
def __init__(self, device):
super(SineODE, self).__init__()
def forward(self, t, y):
return 2 * y / t + t**4 * torch.sin(2 * t) - t**2 + 4 * t**3
def y_exact(self, t):
return -0.5 * t**4 * torch.cos(2 * t) + 0.5 * t**3 * torch.sin(2 * t) + 0.25 * t**2 * torch.cos(
2 * t
) - t**3 + 2 * t**4 + (math.pi - 0.25) * t**2
class LinearODE(torch.nn.Module):
def __init__(self, device, dim=10):
super(LinearODE, self).__init__()
self.dim = dim
U = torch.randn(dim, dim).to(device) * 0.1
A = 2 * U - (U + U.transpose(0, 1))
self.A = torch.nn.Parameter(A)
self.initial_val = np.ones((dim, 1))
def forward(self, t, y):
return torch.mm(self.A, y.reshape(self.dim, 1)).reshape(-1)
def y_exact(self, t):
t = t.detach().cpu().numpy()
A_np = self.A.detach().cpu().numpy()
ans = []
for t_i in t:
ans.append(np.matmul(scipy.linalg.expm(A_np * t_i), self.initial_val))
return torch.stack([torch.tensor(ans_) for ans_ in ans]).reshape(len(t), self.dim)
PROBLEMS = {'constant': ConstantODE, 'linear': LinearODE, 'sine': SineODE}
def construct_problem(device, npts=10, ode='constant', reverse=False):
f = PROBLEMS[ode](device)
t_points = torch.linspace(1, 8, npts).to(device).requires_grad_(True)
sol = f.y_exact(t_points)
def _flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
return x[tuple(indices)]
if reverse:
t_points = _flip(t_points, 0).clone().detach()
sol = _flip(sol, 0).clone().detach()
return f, sol[0].detach(), t_points, sol
if __name__ == '__main__':
f = SineODE('cpu')
t_points = torch.linspace(1, 8, 100).to('cpu').requires_grad_(True)
sol = f.y_exact(t_points)
import matplotlib.pyplot as plt
plt.plot(t_points.detach().cpu().numpy(), sol.detach().cpu().numpy())
plt.show()
| 2,533 | 28.126437 | 104 | py |
steer | steer-master/torchdiffeq/tests/DETEST/run.py | import time
import numpy as np
from scipy.stats.mstats import gmean
import torch
from torchdiffeq import odeint
import detest
torch.set_default_tensor_type(torch.DoubleTensor)
class NFEDiffEq:
def __init__(self, diffeq):
self.diffeq = diffeq
self.nfe = 0
def __call__(self, t, y):
self.nfe += 1
return self.diffeq(t, y)
def main():
sol = dict()
for method in ['dopri5', 'adams']:
for tol in [1e-3, 1e-6, 1e-9]:
print('======= {} | tol={:e} ======='.format(method, tol))
nfes = []
times = []
errs = []
for c in ['A', 'B', 'C', 'D', 'E']:
for i in ['1', '2', '3', '4', '5']:
diffeq, init, _ = getattr(detest, c + i)()
t0, y0 = init()
diffeq = NFEDiffEq(diffeq)
if not c + i in sol:
sol[c + i] = odeint(
diffeq, y0, torch.stack([t0, torch.tensor(20.)]), atol=1e-12, rtol=1e-12, method='dopri5'
)[1]
diffeq.nfe = 0
start_time = time.time()
est = odeint(diffeq, y0, torch.stack([t0, torch.tensor(20.)]), atol=tol, rtol=tol, method=method)
time_spent = time.time() - start_time
error = torch.sqrt(torch.mean((sol[c + i] - est[1])**2))
errs.append(error.item())
nfes.append(diffeq.nfe)
times.append(time_spent)
print('{}: NFE {} | Time {} | Err {:e}'.format(c + i, diffeq.nfe, time_spent, error.item()))
print('Total NFE {} | Total Time {} | GeomAvg Error {:e}'.format(np.sum(nfes), np.sum(times), gmean(errs)))
if __name__ == '__main__':
main()
| 1,843 | 29.733333 | 119 | py |
steer | steer-master/torchdiffeq/tests/DETEST/detest.py | import math
import torch
####################################
# Problem Class A. Single equations.
####################################
def A1():
diffeq = lambda t, y: -y
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(-t)
return diffeq, init, solution
def A2():
diffeq = lambda t, y: -y**3 / 2
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: 1 / torch.sqrt(t + 1)
return diffeq, init, solution
def A3():
diffeq = lambda t, y: y * torch.cos(t)
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(torch.sin(t))
return diffeq, init, solution
def A4():
diffeq = lambda t, y: y / 4 * (1 - y / 20)
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: 20 / (1 + 19 * torch.exp(-t / 4))
return diffeq, init, solution
def A5():
diffeq = lambda t, y: (y - t) / (y + t)
init = lambda: (torch.tensor(0.), torch.tensor(4.))
return diffeq, init, None
#################################
# Problem Class B. Small systems.
#################################
def B1():
def diffeq(t, y):
dy0 = 2 * (y[0] - y[0] * y[1])
dy1 = -(y[1] - y[0] * y[1])
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([1., 3.])
return diffeq, init, None
def B2():
A = torch.tensor([[-1., 1., 0.], [1., -2., 1.], [0., 1., -1.]])
def diffeq(t, y):
dy = torch.mv(A, y)
return dy
def init():
return torch.tensor(0.), torch.tensor([2., 0., 1.])
return diffeq, init, None
def B3():
def diffeq(t, y):
dy0 = -y[0]
dy1 = y[0] - y[1] * y[1]
dy2 = y[1] * y[1]
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([1., 0., 0.])
return diffeq, init, None
def B4():
def diffeq(t, y):
a = torch.sqrt(y[0] * y[0] + y[1] * y[1])
dy0 = -y[1] - y[0] * y[2] / a
dy1 = y[0] - y[1] * y[2] / a
dy2 = y[0] / a
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([3., 0., 0.])
return diffeq, init, None
def B5():
def diffeq(t, y):
dy0 = y[1] * y[2]
dy1 = -y[0] * y[2]
dy2 = -0.51 * y[0] * y[1]
return torch.stack([dy0, dy1, dy2])
def init():
return torch.tensor(0.), torch.tensor([0., 1., 1.])
return diffeq, init, None
####################################
# Problem Class C. Moderate systems.
####################################
def C1():
A = torch.zeros(10, 10)
A.view(-1)[:-1:11] = -1
A.view(-1)[10::11] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(10)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C2():
A = torch.zeros(10, 10)
A.view(-1)[:-1:11] = torch.linspace(-1, -9, 9)
A.view(-1)[10::11] = torch.linspace(1, 9, 9)
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(10)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C3():
n = 10
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C4():
n = 51
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
def C5():
k2 = torch.tensor(2.95912208286)
m0 = torch.tensor(1.00000597682)
m = torch.tensor([
0.000954786104043,
0.000285583733151,
0.0000437273164546,
0.0000517759138449,
0.00000277777777778,
]).view(1, 5)
def diffeq(t, y):
# y is 2 x 3 x 5
# y[0] contains y, y[0] contains y'
# second axis indexes space (x,y,z).
# third axis indexes 5 bodies.
dy = y[1, :, :]
y = y[0]
r = torch.sqrt(torch.sum(y**2, 0)).view(1, 5)
d = torch.sqrt(torch.sum((y[:, :, None] - y[:, None, :])**2, 0))
F = m.view(1, 1, 5) * ((y[:, None, :] - y[:, :, None]) / (d * d * d).view(1, 5, 5) + y.view(3, 1, 5) /
(r * r * r).view(1, 1, 5))
F.view(3, 5 * 5)[:, ::6] = 0
ddy = k2 * (-(m0 + m) * y / (r * r * r)) + F.sum(2)
return torch.stack([dy, ddy], 0)
def init():
y0 = torch.tensor([
3.42947415189, 3.35386959711, 1.35494901715, 6.64145542550, 5.97156957878, 2.18231499728, 11.2630437207,
14.6952576794, 6.27960525067, -30.1552268759, 165699966404, 1.43785752721, -21.1238353380, 28.4465098142,
15.388265967
]).view(5, 3).transpose(0, 1)
dy0 = torch.tensor([
-.557160570446, .505696783289, .230578543901, -.415570776342, .365682722812, .169143213293, -.325325669158,
.189706021964, .0877265322780, -.0240476254170, -.287659532608, -.117219543175, -.176860753121,
-.216393453025, -.0148647893090
]).view(5, 3).transpose(0, 1)
return torch.tensor(0.), torch.stack([y0, dy0], 0)
return diffeq, init, None
###################################
# Problem Class D. Orbit equations.
###################################
def _DTemplate(eps):
def diffeq(t, y):
r = (y[0]**2 + y[1]**2)**(3 / 2)
dy0 = y[2]
dy1 = y[3]
dy2 = -y[0] / r
dy3 = -y[1] / r
return torch.stack([dy0, dy1, dy2, dy3])
def init():
return torch.tensor(0.), torch.tensor([1 - eps, 0, 0, math.sqrt((1 + eps) / (1 - eps))])
return diffeq, init, None
D1 = lambda: _DTemplate(0.1)
D2 = lambda: _DTemplate(0.3)
D3 = lambda: _DTemplate(0.5)
D4 = lambda: _DTemplate(0.7)
D5 = lambda: _DTemplate(0.9)
##########################################
# Problem Class E. Higher order equations.
##########################################
def E1():
def diffeq(t, y):
dy0 = y[1]
dy1 = -(y[1] / (t + 1) + (1 - 0.25 / (t + 1)**2) * y[0])
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([.671396707141803, .0954005144474744])
return diffeq, init, None
def E2():
def diffeq(t, y):
dy0 = y[1]
dy1 = (1 - y[0]**2) * y[1] - y[0]
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([2., 0.])
return diffeq, init, None
def E3():
def diffeq(t, y):
dy0 = y[1]
dy1 = y[0]**3 / 6 - y[0] + 2 * torch.sin(2.78535 * t)
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([0., 0.])
return diffeq, init, None
def E4():
def diffeq(t, y):
dy0 = y[1]
dy1 = .32 - .4 * y[1]**2
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([30., 0.])
return diffeq, init, None
def E5():
def diffeq(t, y):
dy0 = y[1]
dy1 = torch.sqrt(1 + y[1]**2) / (25 - t)
return torch.stack([dy0, dy1])
def init():
return torch.tensor(0.), torch.tensor([0., 0.])
return diffeq, init, None
###################
# Helper functions.
###################
def _to_tensor(x):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x
| 7,740 | 22.107463 | 119 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_normal.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_normal(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',std = 0.001 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if t.size(0)!=2:
return odeint(func,y0,actual_t)
integration_time = t.type_as(y0)#integration_time.type_as(x)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
#range_time = abs(t[1]-t[0]) - min_length
#print("range_time")
#print(range_time)
#print("shrink_std")
#print(shrink_std)
#print("shrink_proportion")
#print(shrink_proportion)
#m = normal.Normal(t[0]+shrink_proportion, shrink_std)
#m = uniform.Uniform(t[1] - shrink_std , t[1] + shrink_std)
m = normal.Normal(t[1] , std)
integration_time[0]=t[0]
integration_time[1]= m.sample() #max(m.sample(), t[0] + min_length)#m.sample() #
#integration_time[1]= max(m.sample(), t[0] + min_length)#m.sample() #
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev and mode=='train':
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,776 | 35.511737 | 175 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_normal.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end_normal(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, shrink_proportion = 0.5, shrink_std = 0.02 , mode='train', std=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
#range_time = abs(t[1]-t[0]) - min_length
#m = normal.Normal(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
#m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
m = normal.Normal(t[1] , std)
integration_time[0]=t[0]
integration_time[1]= m.sample() #.clamp( t[1] - range_time , t[1] + range_time)
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev:
integration_time = reverse_time(integration_time)
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
| 4,034 | 35.351351 | 181 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adjoint.py | import torch
import torch.nn as nn
from . import odeint
from .misc import _flatten, _flatten_convert_none_to_zeros
class OdeintAdjointMethod(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
assert len(args) >= 8, 'Internal error: all arguments required.'
y0, func, t, flat_params, rtol, atol, method, options = \
args[:-7], args[-7], args[-6], args[-5], args[-4], args[-3], args[-2], args[-1]
ctx.func, ctx.rtol, ctx.atol, ctx.method, ctx.options = func, rtol, atol, method, options
with torch.no_grad():
ans = odeint(func, y0, t, rtol=rtol, atol=atol, method=method, options=options)
ctx.save_for_backward(t, flat_params, *ans)
return ans
@staticmethod
def backward(ctx, *grad_output):
t, flat_params, *ans = ctx.saved_tensors
ans = tuple(ans)
func, rtol, atol, method, options = ctx.func, ctx.rtol, ctx.atol, ctx.method, ctx.options
n_tensors = len(ans)
f_params = tuple(func.parameters())
# TODO: use a nn.Module and call odeint_adjoint to implement higher order derivatives.
def augmented_dynamics(t, y_aug):
# Dynamics of the original system augmented with
# the adjoint wrt y, and an integrator wrt t and args.
y, adj_y = y_aug[:n_tensors], y_aug[n_tensors:2 * n_tensors] # Ignore adj_time and adj_params.
with torch.set_grad_enabled(True):
t = t.to(y[0].device).detach().requires_grad_(True)
y = tuple(y_.detach().requires_grad_(True) for y_ in y)
func_eval = func(t, y)
vjp_t, *vjp_y_and_params = torch.autograd.grad(
func_eval, (t,) + y + f_params,
tuple(-adj_y_ for adj_y_ in adj_y), allow_unused=True, retain_graph=True
)
vjp_y = vjp_y_and_params[:n_tensors]
vjp_params = vjp_y_and_params[n_tensors:]
# autograd.grad returns None if no gradient, set to zero.
vjp_t = torch.zeros_like(t) if vjp_t is None else vjp_t
vjp_y = tuple(torch.zeros_like(y_) if vjp_y_ is None else vjp_y_ for vjp_y_, y_ in zip(vjp_y, y))
vjp_params = _flatten_convert_none_to_zeros(vjp_params, f_params)
if len(f_params) == 0:
vjp_params = torch.tensor(0.).to(vjp_y[0])
return (*func_eval, *vjp_y, vjp_t, vjp_params)
T = ans[0].shape[0]
with torch.no_grad():
adj_y = tuple(grad_output_[-1] for grad_output_ in grad_output)
adj_params = torch.zeros_like(flat_params)
adj_time = torch.tensor(0.).to(t)
time_vjps = []
for i in range(T - 1, 0, -1):
ans_i = tuple(ans_[i] for ans_ in ans)
grad_output_i = tuple(grad_output_[i] for grad_output_ in grad_output)
func_i = func(t[i], ans_i)
# Compute the effect of moving the current time measurement point.
dLd_cur_t = sum(
torch.dot(func_i_.reshape(-1), grad_output_i_.reshape(-1)).reshape(1)
for func_i_, grad_output_i_ in zip(func_i, grad_output_i)
)
adj_time = adj_time - dLd_cur_t
time_vjps.append(dLd_cur_t)
# Run the augmented system backwards in time.
if adj_params.numel() == 0:
adj_params = torch.tensor(0.).to(adj_y[0])
aug_y0 = (*ans_i, *adj_y, adj_time, adj_params)
aug_ans = odeint(
augmented_dynamics, aug_y0,
torch.tensor([t[i], t[i - 1]]), rtol=rtol, atol=atol, method=method, options=options
)
# Unpack aug_ans.
adj_y = aug_ans[n_tensors:2 * n_tensors]
adj_time = aug_ans[2 * n_tensors]
adj_params = aug_ans[2 * n_tensors + 1]
adj_y = tuple(adj_y_[1] if len(adj_y_) > 0 else adj_y_ for adj_y_ in adj_y)
if len(adj_time) > 0: adj_time = adj_time[1]
if len(adj_params) > 0: adj_params = adj_params[1]
adj_y = tuple(adj_y_ + grad_output_[i - 1] for adj_y_, grad_output_ in zip(adj_y, grad_output))
del aug_y0, aug_ans
time_vjps.append(adj_time)
time_vjps = torch.cat(time_vjps[::-1])
return (*adj_y, None, time_vjps, adj_params, None, None, None, None, None)
def odeint_adjoint(func, y0, t, rtol=1e-6, atol=1e-12, method=None, options=None):
# We need this in order to access the variables inside this module,
# since we have no other way of getting variables along the execution path.
if not isinstance(func, nn.Module):
raise ValueError('func is required to be an instance of nn.Module.')
tensor_input = False
if torch.is_tensor(y0):
class TupleFunc(nn.Module):
def __init__(self, base_func):
super(TupleFunc, self).__init__()
self.base_func = base_func
def forward(self, t, y):
return (self.base_func(t, y[0]),)
tensor_input = True
y0 = (y0,)
func = TupleFunc(func)
flat_params = _flatten(func.parameters())
ys = OdeintAdjointMethod.apply(*y0, func, t, flat_params, rtol, atol, method, options)
if tensor_input:
ys = ys[0]
return ys
| 5,471 | 39.835821 | 111 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_skip_step.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_skip_step(func, y0, actual_t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
integration_time = t.type_as(y0)#integration_time.type_as(x)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
rand_points[0]=t[0]
rand_points[num_skips-1]=t[1]
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1]
print("integration_time")
print(integration_time)
out = odeint( func, y0, integration_time)
first = out[0]
for i in range(1,rand_points.shape[0]-1):
integration_time[0]=rand_points[i] + skip
integration_time[1]=rand_points[i+1]
if (integration_time[1] - integration_time[0]) > skip :
out = odeint( func, out[1], integration_time)
print("integration_time_inside")
print(integration_time)
result = out.clone()
result[0] = first
return result
#def odeint_skip_step(func, y0, actual_t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# t = actual_t.clone()
# integration_time = t.type_as(y0)#integration_time.type_as(x)
# range_time = t[1]-t[0]
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=2*num_skips + 2))
# rand_points[0]=t[0]
# rand_points[2*num_skips+1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
#
# out = odeint( func, y0, integration_time)
# first = out[0]
#
# for i in range(1,rand_points.shape[0]-1):
# if i % 2 == 1:
# continue
# integration_time[0]=rand_points[i]
# integration_time[1]=rand_points[i+1]
# out = odeint( func, out[1], integration_time)
#
# result = out.clone()
# result[0] = first
# return result
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 10,205 | 37.659091 | 132 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v2_inference.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_v2_inference(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
num_times = actual_t.size(0)
t = actual_t.clone()
# output shape : (timesteps, batch_size, dimension)
result = y0.unsqueeze(0)
integration_time = torch.Tensor([0.0,1.0])
t = torch.Tensor([0.0,1.0])
integration_time = integration_time.type_as(y0)#integration_time.type_as(x)
t = t.type_as(y0)#integration_time.type_as(x)
for i in range(num_times-1):
t[0] = actual_t[i]
t[1] = actual_t[i+1]
if abs(t[1]-t[0])<min_length:
continue
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = (t[1]-t[0]) * shrink_proportion
integration_time[0]=t[0]
integration_time[1]= t[0] + range_time
#print("integration_time")
#print(integration_time)
#print(t[1])
#print(t[0])
if rev:
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
#print("out.size()")
#print(out.size())
y0 = out[1].reshape_as(y0)
#print("result.size()")
#print(result.size())
#print("y0.size()")
#print(y0.size())
y0=y0.unsqueeze(0)
result = torch.cat((result,y0),0)
y0=y0.squeeze(0)
return result
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,970 | 34.744395 | 185 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_v3.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end_v3(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, shrink_proportion = 0.5, shrink_std = 0.02 , mode='train', min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = abs(t[1]-t[0]) - min_length
#m = normal.Normal(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
#m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
m = uniform.Uniform(t[1] - range_time , t[1] + range_time)
integration_time[0]=t[0]
integration_time[1]= m.sample()
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev:
integration_time = reverse_time(integration_time)
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
| 4,017 | 35.198198 | 184 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end(func, y0, actual_t, rtol=1e-7, atol=1e-9, num_skips = 5, skip_proportion = 0.01, method=None, options=None ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
integration_time = t.type_as(y0)#integration_time.type_as(x)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points = np.sort(np.random.uniform(t[0], t[1],size=2*num_skips + 2))
rand_points[0]=t[0]
rand_points[2*num_skips+1]=t[1] + skip
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1]
#print("rand_points")
#print(rand_points)
#print("..................")
#print("t")
#print(t)
#print("--------------------")
#print("integration_time")
#print(integration_time)
#print("====================")
out = odeint( func, y0, integration_time)
return out
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,165 | 35.01005 | 137 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adaptive_heun.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
_ADAPTIVE_HEUN_TABLEAU = _ButcherTableau(
alpha=[1.],
beta=[
[1.],
],
c_sol=[0.5, 0.5],
c_error=[
0.5,
-0.5,
],
)
AH_C_MID = [
0.5, 0.
]
def _interp_fit_adaptive_heun(y0, y1, k, dt, tableau=_ADAPTIVE_HEUN_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
dt = dt.type_as(y0[0])
y_mid = tuple(y0_ + _scaled_dot_product(dt, AH_C_MID, k_) for y0_, k_ in zip(y0, k))
f0 = tuple(k_[0] for k_ in k)
f1 = tuple(k_[-1] for k_ in k)
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _abs_square(x):
return torch.mul(x, x)
def _ta_append(list_of_tensors, value):
"""Append a value to the end of a list of PyTorch tensors."""
list_of_tensors.append(value)
return list_of_tensors
class AdaptiveHeunSolver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
f0 = self.func(t[0].type_as(self.y0[0]), self.y0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 1, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_heun_step(self.rk_state)
n_steps += 1
return _interp_evaluate(self.rk_state.interp_coeff, self.rk_state.t0, self.rk_state.t1, next_t)
def _adaptive_heun_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_ADAPTIVE_HEUN_TABLEAU)
########################################################
# Error Ratio #
########################################################
mean_sq_error_ratio = _compute_error_ratio(y1_error, atol=self.atol, rtol=self.rtol, y0=y0, y1=y1)
accept_step = (torch.tensor(mean_sq_error_ratio) <= 1).all()
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
interp_coeff = _interp_fit_adaptive_heun(y0, y1, k, dt) if accept_step else interp_coeff
dt_next = _optimal_step_size(
dt, mean_sq_error_ratio, safety=self.safety, ifactor=self.ifactor, dfactor=self.dfactor, order=5
)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, interp_coeff)
return rk_state
| 4,839 | 42.214286 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v3.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_v3(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',min_length=0.001 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if t.size(0)!=2:
return odeint(func,y0,actual_t)
integration_time = t.type_as(y0)#integration_time.type_as(x)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = abs(t[1]-t[0]) - min_length
#print("range_time")
#print(range_time)
#print("shrink_std")
#print(shrink_std)
#print("shrink_proportion")
#print(shrink_proportion)
#m = normal.Normal(t[0]+shrink_proportion, shrink_std)
#m = uniform.Uniform(t[1] - shrink_std , t[1] + shrink_std)
m = uniform.Uniform(t[1] - range_time , t[1] + range_time)
integration_time[0]=t[0]
integration_time[1]= m.sample() #max(m.sample(), t[0] + min_length)#m.sample() #
#integration_time[1]= max(m.sample(), t[0] + min_length)#m.sample() #
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev and mode=='train':
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,805 | 35.647887 | 176 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/bosh3.py | import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
_BOGACKI_SHAMPINE_TABLEAU = _ButcherTableau(
alpha=[1/2, 3/4, 1.],
beta=[
[1/2],
[0., 3/4],
[2/9, 1/3, 4/9]
],
c_sol=[2/9, 1/3, 4/9, 0.],
c_error=[2/9-7/24, 1/3-1/4, 4/9-1/3, -1/8],
)
BS_C_MID = [ 0., 0.5, 0., 0. ]
def _interp_fit_bosh3(y0, y1, k, dt):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
dt = dt.type_as(y0[0])
y_mid = tuple(y0_ + _scaled_dot_product(dt, BS_C_MID, k_) for y0_, k_ in zip(y0, k))
f0 = tuple(k_[0] for k_ in k)
f1 = tuple(k_[-1] for k_ in k)
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
class Bosh3Solver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
f0 = self.func(t[0].type_as(self.y0[0]), self.y0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 2, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_bosh3_step(self.rk_state)
n_steps += 1
return _interp_evaluate(self.rk_state.interp_coeff, self.rk_state.t0, self.rk_state.t1, next_t)
def _adaptive_bosh3_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_BOGACKI_SHAMPINE_TABLEAU)
########################################################
# Error Ratio #
########################################################
mean_sq_error_ratio = _compute_error_ratio(y1_error, atol=self.atol, rtol=self.rtol, y0=y0, y1=y1)
accept_step = (torch.tensor(mean_sq_error_ratio) <= 1).all()
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
interp_coeff = _interp_fit_bosh3(y0, y1, k, dt) if accept_step else interp_coeff
dt_next = _optimal_step_size(
dt, mean_sq_error_ratio, safety=self.safety, ifactor=self.ifactor, dfactor=self.dfactor, order=3
)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, interp_coeff)
return rk_state
| 4,552 | 44.989899 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/misc.py | import warnings
import torch
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def _flatten_convert_none_to_zeros(sequence, like_sequence):
flat = [
p.contiguous().view(-1) if p is not None else torch.zeros_like(q).view(-1)
for p, q in zip(sequence, like_sequence)
]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def _possibly_nonzero(x):
return isinstance(x, torch.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys):
"""Calculate a scaled, vector inner product between lists of Tensors."""
# Using _possibly_nonzero lets us avoid wasted computation.
return sum([(scale * x) * y for x, y in zip(xs, ys) if _possibly_nonzero(x) or _possibly_nonzero(y)])
def _dot_product(xs, ys):
"""Calculate the vector inner product between two lists of Tensors."""
return sum([x * y for x, y in zip(xs, ys)])
def _has_converged(y0, y1, rtol, atol):
"""Checks that each element is within the error tolerance."""
error_tol = tuple(atol + rtol * torch.max(torch.abs(y0_), torch.abs(y1_)) for y0_, y1_ in zip(y0, y1))
error = tuple(torch.abs(y0_ - y1_) for y0_, y1_ in zip(y0, y1))
return all((error_ < error_tol_).all() for error_, error_tol_ in zip(error, error_tol))
def _convert_to_tensor(a, dtype=None, device=None):
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if dtype is not None:
a = a.type(dtype)
if device is not None:
a = a.to(device)
return a
def _is_finite(tensor):
_check = (tensor == float('inf')) + (tensor == float('-inf')) + torch.isnan(tensor)
return not _check.any()
def _decreasing(t):
return (t[1:] < t[:-1]).all()
def _assert_increasing(t):
assert (t[1:] > t[:-1]).all(), 't must be strictly increasing or decreasing'
def _is_iterable(inputs):
try:
iter(inputs)
return True
except TypeError:
return False
def _norm(x):
"""Compute RMS norm."""
if torch.is_tensor(x):
return x.norm() / (x.numel()**0.5)
else:
return torch.sqrt(sum(x_.norm()**2 for x_ in x) / sum(x_.numel() for x_ in x))
def _handle_unused_kwargs(solver, unused_kwargs):
if len(unused_kwargs) > 0:
warnings.warn('{}: Unexpected arguments {}'.format(solver.__class__.__name__, unused_kwargs))
def _select_initial_step(fun, t0, y0, order, rtol, atol, f0=None):
"""Empirically select a good initial step.
The algorithm is described in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t0 : float
Initial value of the independent variable.
y0 : ndarray, shape (n,)
Initial value of the dependent variable.
direction : float
Integration direction.
order : float
Method order.
rtol : float
Desired relative tolerance.
atol : float
Desired absolute tolerance.
Returns
-------
h_abs : float
Absolute value of the suggested initial step.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
t0 = t0.to(y0[0])
if f0 is None:
f0 = fun(t0, y0)
rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
atol = atol if _is_iterable(atol) else [atol] * len(y0)
scale = tuple(atol_ + torch.abs(y0_) * rtol_ for y0_, atol_, rtol_ in zip(y0, atol, rtol))
d0 = tuple(_norm(y0_ / scale_) for y0_, scale_ in zip(y0, scale))
d1 = tuple(_norm(f0_ / scale_) for f0_, scale_ in zip(f0, scale))
if max(d0).item() < 1e-5 or max(d1).item() < 1e-5:
h0 = torch.tensor(1e-6).to(t0)
else:
h0 = 0.01 * max(d0_ / d1_ for d0_, d1_ in zip(d0, d1))
y1 = tuple(y0_ + h0 * f0_ for y0_, f0_ in zip(y0, f0))
f1 = fun(t0 + h0, y1)
d2 = tuple(_norm((f1_ - f0_) / scale_) / h0 for f1_, f0_, scale_ in zip(f1, f0, scale))
if max(d1).item() <= 1e-15 and max(d2).item() <= 1e-15:
h1 = torch.max(torch.tensor(1e-6).to(h0), h0 * 1e-3)
else:
h1 = (0.01 / max(d1 + d2))**(1. / float(order + 1))
return torch.min(100 * h0, h1)
def _compute_error_ratio(error_estimate, error_tol=None, rtol=None, atol=None, y0=None, y1=None):
if error_tol is None:
assert rtol is not None and atol is not None and y0 is not None and y1 is not None
rtol if _is_iterable(rtol) else [rtol] * len(y0)
atol if _is_iterable(atol) else [atol] * len(y0)
error_tol = tuple(
atol_ + rtol_ * torch.max(torch.abs(y0_), torch.abs(y1_))
for atol_, rtol_, y0_, y1_ in zip(atol, rtol, y0, y1)
)
error_ratio = tuple(error_estimate_ / error_tol_ for error_estimate_, error_tol_ in zip(error_estimate, error_tol))
mean_sq_error_ratio = tuple(torch.mean(error_ratio_ * error_ratio_) for error_ratio_ in error_ratio)
return mean_sq_error_ratio
def _optimal_step_size(last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5):
"""Calculate the optimal size for the next step."""
mean_error_ratio = max(mean_error_ratio) # Compute step size based on highest ratio.
if mean_error_ratio == 0:
return last_step * ifactor
if mean_error_ratio < 1:
dfactor = _convert_to_tensor(1, dtype=torch.float64, device=mean_error_ratio.device)
error_ratio = torch.sqrt(mean_error_ratio).to(last_step)
exponent = torch.tensor(1 / order).to(last_step)
factor = torch.max(1 / ifactor, torch.min(error_ratio**exponent / safety, 1 / dfactor))
return last_step / factor
def _check_inputs(func, y0, t):
tensor_input = False
if torch.is_tensor(y0):
tensor_input = True
y0 = (y0,)
_base_nontuple_func_ = func
func = lambda t, y: (_base_nontuple_func_(t, y[0]),)
assert isinstance(y0, tuple), 'y0 must be either a torch.Tensor or a tuple'
for y0_ in y0:
assert torch.is_tensor(y0_), 'each element must be a torch.Tensor but received {}'.format(type(y0_))
if _decreasing(t):
t = -t
_base_reverse_func = func
func = lambda t, y: tuple(-f_ for f_ in _base_reverse_func(-t, y))
for y0_ in y0:
if not torch.is_floating_point(y0_):
raise TypeError('`y0` must be a floating point Tensor but is a {}'.format(y0_.type()))
if not torch.is_floating_point(t):
raise TypeError('`t` must be a floating point Tensor but is a {}'.format(t.type()))
return tensor_input, func, y0, t
| 6,621 | 32.785714 | 119 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, num_skips = 10, skip_proportion = 0.80 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points= torch.Tensor(num_skips+2).type_as(integration_time)
rand_points.uniform_(t[0],t[1])
rand_points , _ = rand_points.sort()
#rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
rand_points[0]=t[0]
#rand_points[num_skips-1]=t[1]
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1] + skip
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
| 3,574 | 38.722222 | 146 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/interp.py | import torch
from .misc import _convert_to_tensor, _dot_product
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
a = tuple(
_dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
b = tuple(
_dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
c = tuple(
_dot_product([-4 * dt, dt, -11, -5, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
d = tuple(dt * f0_ for f0_ in f0)
e = y0
return [a, b, c, d, e]
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
dtype = coefficients[0][0].dtype
device = coefficients[0][0].device
t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
t1 = _convert_to_tensor(t1, dtype=dtype, device=device)
t = _convert_to_tensor(t, dtype=dtype, device=device)
assert (t0 <= t) & (t <= t1), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(t0, t, t1)
x = ((t - t0) / (t1 - t0)).type(dtype).to(device)
xs = [torch.tensor(1).type(dtype).to(device), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return tuple(_dot_product(coefficients_, reversed(xs)) for coefficients_ in zip(*coefficients))
| 2,501 | 36.909091 | 110 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/tsit5.py | import torch
from .misc import _scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs
from .solvers import AdaptiveStepsizeODESolver
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
# Parameters from Tsitouras (2011).
_TSITOURAS_TABLEAU = _ButcherTableau(
alpha=[0.161, 0.327, 0.9, 0.9800255409045097, 1., 1.],
beta=[
[0.161],
[-0.008480655492357, 0.3354806554923570],
[2.897153057105494, -6.359448489975075, 4.362295432869581],
[5.32586482843925895, -11.74888356406283, 7.495539342889836, -0.09249506636175525],
[5.86145544294642038, -12.92096931784711, 8.159367898576159, -0.071584973281401006, -0.02826905039406838],
[0.09646076681806523, 0.01, 0.4798896504144996, 1.379008574103742, -3.290069515436081, 2.324710524099774],
],
c_sol=[0.09646076681806523, 0.01, 0.4798896504144996, 1.379008574103742, -3.290069515436081, 2.324710524099774, 0],
c_error=[
0.09646076681806523 - 0.001780011052226,
0.01 - 0.000816434459657,
0.4798896504144996 - -0.007880878010262,
1.379008574103742 - 0.144711007173263,
-3.290069515436081 - -0.582357165452555,
2.324710524099774 - 0.458082105929187,
-1 / 66,
],
)
def _interp_coeff_tsit5(t0, dt, eval_t):
t = float((eval_t - t0) / dt)
b1 = -1.0530884977290216 * t * (t - 1.3299890189751412) * (t**2 - 1.4364028541716351 * t + 0.7139816917074209)
b2 = 0.1017 * t**2 * (t**2 - 2.1966568338249754 * t + 1.2949852507374631)
b3 = 2.490627285651252793 * t**2 * (t**2 - 2.38535645472061657 * t + 1.57803468208092486)
b4 = -16.54810288924490272 * (t - 1.21712927295533244) * (t - 0.61620406037800089) * t**2
b5 = 47.37952196281928122 * (t - 1.203071208372362603) * (t - 0.658047292653547382) * t**2
b6 = -34.87065786149660974 * (t - 1.2) * (t - 0.666666666666666667) * t**2
b7 = 2.5 * (t - 1) * (t - 0.6) * t**2
return [b1, b2, b3, b4, b5, b6, b7]
def _interp_eval_tsit5(t0, t1, k, eval_t):
dt = t1 - t0
y0 = tuple(k_[0] for k_ in k)
interp_coeff = _interp_coeff_tsit5(t0, dt, eval_t)
y_t = tuple(y0_ + _scaled_dot_product(dt, interp_coeff, k_) for y0_, k_ in zip(y0, k))
return y_t
def _optimal_step_size(last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5):
"""Calculate the optimal size for the next Runge-Kutta step."""
if mean_error_ratio == 0:
return last_step * ifactor
if mean_error_ratio < 1:
dfactor = _convert_to_tensor(1, dtype=torch.float64, device=mean_error_ratio.device)
error_ratio = torch.sqrt(mean_error_ratio).type_as(last_step)
exponent = torch.tensor(1 / order).type_as(last_step)
factor = torch.max(1 / ifactor, torch.min(error_ratio**exponent / safety, 1 / dfactor))
return last_step / factor
def _abs_square(x):
return torch.mul(x, x)
class Tsit5Solver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol
self.atol = atol
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 4, self.rtol, self.atol).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(
self.y0,
self.func(t[0].type_as(self.y0[0]), self.y0), t[0], t[0], first_step,
tuple(map(lambda x: [x] * 7, self.y0))
)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_tsit5_step(self.rk_state)
n_steps += 1
return _interp_eval_tsit5(self.rk_state.t0, self.rk_state.t1, self.rk_state.interp_coeff, next_t)
def _adaptive_tsit5_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, _ = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_TSITOURAS_TABLEAU)
########################################################
# Error Ratio #
########################################################
error_tol = tuple(self.atol + self.rtol * torch.max(torch.abs(y0_), torch.abs(y1_)) for y0_, y1_ in zip(y0, y1))
tensor_error_ratio = tuple(y1_error_ / error_tol_ for y1_error_, error_tol_ in zip(y1_error, error_tol))
sq_error_ratio = tuple(
torch.mul(tensor_error_ratio_, tensor_error_ratio_) for tensor_error_ratio_ in tensor_error_ratio
)
mean_error_ratio = (
sum(torch.sum(sq_error_ratio_) for sq_error_ratio_ in sq_error_ratio) /
sum(sq_error_ratio_.numel() for sq_error_ratio_ in sq_error_ratio)
)
accept_step = mean_error_ratio <= 1
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
dt_next = _optimal_step_size(dt, mean_error_ratio, self.safety, self.ifactor, self.dfactor)
k_next = k if accept_step else self.rk_state.interp_coeff
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, k_next)
return rk_state
| 6,777 | 47.414286 | 120 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_skip_step.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_skip_step(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
integration_time = t.type_as(y0)#integration_time.type_as(x)
range_time = t[1]-t[0]
skip = range_time * skip_proportion
rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
rand_points[0]=t[0]
rand_points[num_skips-1]=t[1]
integration_time[0]=rand_points[0]
integration_time[1]=rand_points[1]
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
first = out[0]
for i in range(1,rand_points.shape[0]-1):
integration_time[0]=rand_points[i] + skip
integration_time[1]=rand_points[i+1]
if (integration_time[1] - integration_time[0]) > skip :
out = odeint_adjoint( func, out[1], integration_time)
result = out.clone()
result[0] = first
return result
| 3,667 | 38.021277 | 140 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adams.py | import collections
import torch
from .solvers import AdaptiveStepsizeODESolver
from .misc import (
_handle_unused_kwargs, _select_initial_step, _convert_to_tensor, _scaled_dot_product, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
_MIN_ORDER = 1
_MAX_ORDER = 12
gamma_star = [
1, -1 / 2, -1 / 12, -1 / 24, -19 / 720, -3 / 160, -863 / 60480, -275 / 24192, -33953 / 3628800, -0.00789255,
-0.00678585, -0.00592406, -0.00523669, -0.0046775, -0.00421495, -0.0038269
]
class _VCABMState(collections.namedtuple('_VCABMState', 'y_n, prev_f, prev_t, next_t, phi, order')):
"""Saved state of the variable step size Adams-Bashforth-Moulton solver as described in
Solving Ordinary Differential Equations I - Nonstiff Problems III.5
by Ernst Hairer, Gerhard Wanner, and Syvert P Norsett.
"""
def g_and_explicit_phi(prev_t, next_t, implicit_phi, k):
curr_t = prev_t[0]
dt = next_t - prev_t[0]
g = torch.empty(k + 1).to(prev_t[0])
explicit_phi = collections.deque(maxlen=k)
beta = torch.tensor(1).to(prev_t[0])
g[0] = 1
c = 1 / torch.arange(1, k + 2).to(prev_t[0])
explicit_phi.append(implicit_phi[0])
for j in range(1, k):
beta = (next_t - prev_t[j - 1]) / (curr_t - prev_t[j]) * beta
beat_cast = beta.to(implicit_phi[j][0])
explicit_phi.append(tuple(iphi_ * beat_cast for iphi_ in implicit_phi[j]))
c = c[:-1] - c[1:] if j == 1 else c[:-1] - c[1:] * dt / (next_t - prev_t[j - 1])
g[j] = c[0]
c = c[:-1] - c[1:] * dt / (next_t - prev_t[k - 1])
g[k] = c[0]
return g, explicit_phi
def compute_implicit_phi(explicit_phi, f_n, k):
k = min(len(explicit_phi) + 1, k)
implicit_phi = collections.deque(maxlen=k)
implicit_phi.append(f_n)
for j in range(1, k):
implicit_phi.append(tuple(iphi_ - ephi_ for iphi_, ephi_ in zip(implicit_phi[j - 1], explicit_phi[j - 1])))
return implicit_phi
class VariableCoefficientAdamsBashforth(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, implicit=True, first_step=None, max_order=_MAX_ORDER, safety=0.9, ifactor=10.0, dfactor=0.2,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.implicit = implicit
self.first_step = first_step
self.max_order = int(max(_MIN_ORDER, min(max_order, _MAX_ORDER)))
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
def before_integrate(self, t):
prev_f = collections.deque(maxlen=self.max_order + 1)
prev_t = collections.deque(maxlen=self.max_order + 1)
phi = collections.deque(maxlen=self.max_order)
t0 = t[0]
f0 = self.func(t0.type_as(self.y0[0]), self.y0)
prev_t.appendleft(t0)
prev_f.appendleft(f0)
phi.appendleft(f0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 2, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _select_initial_step(self.func, t[0], self.y0, 2, self.rtol[0], self.atol[0], f0=f0).to(t)
self.vcabm_state = _VCABMState(self.y0, prev_f, prev_t, next_t=t[0] + first_step, phi=phi, order=1)
def advance(self, final_t):
final_t = _convert_to_tensor(final_t).to(self.vcabm_state.prev_t[0])
while final_t > self.vcabm_state.prev_t[0]:
self.vcabm_state = self._adaptive_adams_step(self.vcabm_state, final_t)
assert final_t == self.vcabm_state.prev_t[0]
return self.vcabm_state.y_n
def _adaptive_adams_step(self, vcabm_state, final_t):
y0, prev_f, prev_t, next_t, prev_phi, order = vcabm_state
if next_t > final_t:
next_t = final_t
dt = (next_t - prev_t[0])
dt_cast = dt.to(y0[0])
# Explicit predictor step.
g, phi = g_and_explicit_phi(prev_t, next_t, prev_phi, order)
g = g.to(y0[0])
p_next = tuple(
y0_ + _scaled_dot_product(dt_cast, g[:max(1, order - 1)], phi_[:max(1, order - 1)])
for y0_, phi_ in zip(y0, tuple(zip(*phi)))
)
# Update phi to implicit.
next_f0 = self.func(next_t.to(p_next[0]), p_next)
implicit_phi_p = compute_implicit_phi(phi, next_f0, order + 1)
# Implicit corrector step.
y_next = tuple(
p_next_ + dt_cast * g[order - 1] * iphi_ for p_next_, iphi_ in zip(p_next, implicit_phi_p[order - 1])
)
# Error estimation.
tolerance = tuple(
atol_ + rtol_ * torch.max(torch.abs(y0_), torch.abs(y1_))
for atol_, rtol_, y0_, y1_ in zip(self.atol, self.rtol, y0, y_next)
)
local_error = tuple(dt_cast * (g[order] - g[order - 1]) * iphi_ for iphi_ in implicit_phi_p[order])
error_k = _compute_error_ratio(local_error, tolerance)
accept_step = (torch.tensor(error_k) <= 1).all()
if not accept_step:
# Retry with adjusted step size if step is rejected.
dt_next = _optimal_step_size(dt, error_k, self.safety, self.ifactor, self.dfactor, order=order)
return _VCABMState(y0, prev_f, prev_t, prev_t[0] + dt_next, prev_phi, order=order)
# We accept the step. Evaluate f and update phi.
next_f0 = self.func(next_t.to(p_next[0]), y_next)
implicit_phi = compute_implicit_phi(phi, next_f0, order + 2)
next_order = order
if len(prev_t) <= 4 or order < 3:
next_order = min(order + 1, 3, self.max_order)
else:
error_km1 = _compute_error_ratio(
tuple(dt_cast * (g[order - 1] - g[order - 2]) * iphi_ for iphi_ in implicit_phi_p[order - 1]), tolerance
)
error_km2 = _compute_error_ratio(
tuple(dt_cast * (g[order - 2] - g[order - 3]) * iphi_ for iphi_ in implicit_phi_p[order - 2]), tolerance
)
if min(error_km1 + error_km2) < max(error_k):
next_order = order - 1
elif order < self.max_order:
error_kp1 = _compute_error_ratio(
tuple(dt_cast * gamma_star[order] * iphi_ for iphi_ in implicit_phi_p[order]), tolerance
)
if max(error_kp1) < max(error_k):
next_order = order + 1
# Keep step size constant if increasing order. Else use adaptive step size.
dt_next = dt if next_order > order else _optimal_step_size(
dt, error_k, self.safety, self.ifactor, self.dfactor, order=order + 1
)
prev_f.appendleft(next_f0)
prev_t.appendleft(next_t)
return _VCABMState(p_next, prev_f, prev_t, next_t + dt_next, implicit_phi, order=next_order)
| 7,148 | 39.851429 | 128 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_v2.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
#from .odeint import odeint
from .adjoint import odeint_adjoint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_adjoint_stochastic_end_v2(func, y0, actual_t, rtol=1e-6, atol=1e-12, method=None, options=None, shrink_proportion = 0.5, shrink_std = 0.02 , mode='train', min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if isinstance(y0, tuple):
integration_time = t.type_as(y0[0])#integration_time.type_as(x)
else:
integration_time = t.type_as(y0)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = (t[1]-t[0]) * shrink_proportion
#m = normal.Normal(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
integration_time[0]=t[0]
if mode=='train':
integration_time[1]=max(m.sample(), t[0] + min_length)
else:
integration_time[1]= t[0] + range_time
#print("actual_t")
#print(actual_t)
#print("integration_time")
#print(integration_time)
#print("=================")
if rev:
integration_time = reverse_time(integration_time)
out = odeint_adjoint( func, y0, integration_time,rtol=rtol,atol=atol,method=method,options=options)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
| 4,062 | 35.276786 | 184 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/solvers.py | import abc
import torch
from .misc import _assert_increasing, _handle_unused_kwargs
class AdaptiveStepsizeODESolver(object):
__metaclass__ = abc.ABCMeta
def __init__(self, func, y0, atol, rtol, **unused_kwargs):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.atol = atol
self.rtol = rtol
def before_integrate(self, t):
pass
@abc.abstractmethod
def advance(self, next_t):
raise NotImplementedError
def integrate(self, t):
_assert_increasing(t)
solution = [self.y0]
t = t.to(self.y0[0].device, torch.float64)
self.before_integrate(t)
for i in range(1, len(t)):
y = self.advance(t[i])
solution.append(y)
return tuple(map(torch.stack, tuple(zip(*solution))))
class FixedGridODESolver(object):
__metaclass__ = abc.ABCMeta
def __init__(self, func, y0, step_size=None, grid_constructor=None, **unused_kwargs):
unused_kwargs.pop('rtol', None)
unused_kwargs.pop('atol', None)
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
if step_size is not None and grid_constructor is None:
self.grid_constructor = self._grid_constructor_from_step_size(step_size)
elif grid_constructor is None:
self.grid_constructor = lambda f, y0, t: t
else:
raise ValueError("step_size and grid_constructor are exclusive arguments.")
def _grid_constructor_from_step_size(self, step_size):
def _grid_constructor(func, y0, t):
start_time = t[0]
end_time = t[-1]
niters = torch.ceil((end_time - start_time) / step_size + 1).item()
t_infer = torch.arange(0, niters).to(t) * step_size + start_time
if t_infer[-1] > t[-1]:
t_infer[-1] = t[-1]
return t_infer
return _grid_constructor
@property
@abc.abstractmethod
def order(self):
pass
@abc.abstractmethod
def step_func(self, func, t, dt, y):
pass
def integrate(self, t):
_assert_increasing(t)
t = t.type_as(self.y0[0])
time_grid = self.grid_constructor(self.func, self.y0, t)
assert time_grid[0] == t[0] and time_grid[-1] == t[-1]
time_grid = time_grid.to(self.y0[0])
solution = [self.y0]
j = 1
y0 = self.y0
for t0, t1 in zip(time_grid[:-1], time_grid[1:]):
dy = self.step_func(self.func, t0, t1 - t0, y0)
y1 = tuple(y0_ + dy_ for y0_, dy_ in zip(y0, dy))
while j < len(t) and t1 >= t[j]:
solution.append(self._linear_interp(t0, t1, y0, y1, t[j]))
j += 1
y0 = y1
return tuple(map(torch.stack, tuple(zip(*solution))))
def _linear_interp(self, t0, t1, y0, y1, t):
if t == t0:
return y0
if t == t1:
return y1
t0, t1, t = t0.to(y0[0]), t1.to(y0[0]), t.to(y0[0])
slope = tuple((y1_ - y0_) / (t1 - t0) for y0_, y1_, in zip(y0, y1))
return tuple(y0_ + slope_ * (t - t0) for y0_, slope_ in zip(y0, slope))
| 3,276 | 29.06422 | 89 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v2.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import _check_inputs
import numpy as np
import torch
from .odeint import odeint
from torch.distributions import normal
from torch.distributions import uniform
SOLVERS = {
'explicit_adams': AdamsBashforth,
'fixed_adams': AdamsBashforthMoulton,
'adams': VariableCoefficientAdamsBashforth,
'tsit5': Tsit5Solver,
'dopri5': Dopri5Solver,
'bosh3': Bosh3Solver,
'euler': Euler,
'midpoint': Midpoint,
'rk4': RK4,
'adaptive_heun': AdaptiveHeunSolver,
}
def odeint_stochastic_end_v2(func, y0, actual_t, rtol=1e-7, atol=1e-9, shrink_proportion = 0.5, shrink_std = 0.02, method=None, options=None, mode='train',min_length=0.01 ):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(t, y), y(t[0]) = y0
```
where y is a Tensor of any shape.
Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. Converted to a Tensor with float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set.
name: Optional name for this operation.
Returns:
y: Tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
t = actual_t.clone()
if t.size(0)!=2:
return odeint(func,y0,actual_t)
integration_time = t.type_as(y0)#integration_time.type_as(x)
rev = False
if t[1]<t[0]:
t = reverse_time(t)
rev = True
range_time = (t[1]-t[0]) * shrink_proportion
#m = normal.Normal(t[0]+shrink_proportion, shrink_std)
m = uniform.Uniform(t[0] + range_time - shrink_std , t[0] + range_time + shrink_std)
integration_time[0]=t[0]
if mode=='train':
integration_time[1]=max(m.sample(), t[0] + min_length)
else:
integration_time[1]= t[0] + range_time
if rev and mode=='train':
integration_time = reverse_time(integration_time)
out = odeint( func, y0, integration_time)
return out
def reverse_time(t):
temp1 = t[1].item()
temp0 = t[0].item()
t[1] = temp0
t[0] = temp1
return t
#def odeint_skip_step(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None, num_skips = 5, skip_proportion = 0.01 ):
# """Integrate a system of ordinary differential equations.
#
# Solves the initial value problem for a non-stiff system of first order ODEs:
# ```
# dy/dt = func(t, y), y(t[0]) = y0
# ```
# where y is a Tensor of any shape.
#
# Output dtypes and numerical precision are based on the dtypes of the inputs `y0`.
#
# Args:
# func: Function that maps a Tensor holding the state `y` and a scalar Tensor
# `t` into a Tensor of state derivatives with respect to time.
# y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
# have any floating point or complex dtype.
# t: 1-D Tensor holding a sequence of time points for which to solve for
# `y`. The initial time point should be the first element of this sequence,
# and each time must be larger than the previous time. May have any floating
# point dtype. Converted to a Tensor with float64 dtype.
# rtol: optional float64 Tensor specifying an upper bound on relative error,
# per element of `y`.
# atol: optional float64 Tensor specifying an upper bound on absolute error,
# per element of `y`.
# method: optional string indicating the integration method to use.
# options: optional dict of configuring options for the indicated integration
# method. Can only be provided if a `method` is explicitly set.
# name: Optional name for this operation.
#
# Returns:
# y: Tensor, where the first dimension corresponds to different
# time points. Contains the solved value of y for each desired time point in
# `t`, with the initial value `y0` being the first element along the first
# dimension.
#
# Raises:
# ValueError: if an invalid `method` is provided.
# TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
# an invalid dtype.
# """
#
# tensor_input, func, y0, t = _check_inputs(func, y0, t)
#
# if options is None:
# options = {}
# elif method is None:
# raise ValueError('cannot supply `options` without specifying `method`')
#
# if method is None:
# method = 'dopri5'
#
#
# #print("y0")
# #print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# solution = solver.integrate(t)
#
#
# integration_time = t #integration_time.type_as(x)
# range_time = t[1]-t[0]
# skip = range_time * skip_proportion
# rand_points = np.sort(np.random.uniform(t[0], t[1],size=num_skips + 2))
# rand_points[0]=t[0]
# rand_points[num_skips-1]=t[1]
#
#
# integration_time[0]=rand_points[0]
# integration_time[1]=rand_points[1]
#
# print(integration_time)
# print(y0)
#
# print("=======================================")
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# solution = out
# y0 = (out[0][1],)
# for i in range(1,rand_points.shape[0]-1):
# integration_time[0]=rand_points[i] + skip
# integration_time[1]=rand_points[i+1]
#
# if integration_time[1]>integration_time[0]:
# print(integration_time)
# print(y0)
# solver = SOLVERS[method](func, y0, rtol=rtol, atol=atol, **options)
# out = solver.integrate(integration_time)
#
# y0 = (out[0][1],)
#
# solution[0][1] = out[0][1]
#
#
# if tensor_input:
# solution = solution[0]
#
#
# return solution
| 7,458 | 35.925743 | 175 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/dopri5.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
_DORMAND_PRINCE_SHAMPINE_TABLEAU = _ButcherTableau(
alpha=[1 / 5, 3 / 10, 4 / 5, 8 / 9, 1., 1.],
beta=[
[1 / 5],
[3 / 40, 9 / 40],
[44 / 45, -56 / 15, 32 / 9],
[19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
[9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
],
c_sol=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84, 0],
c_error=[
35 / 384 - 1951 / 21600,
0,
500 / 1113 - 22642 / 50085,
125 / 192 - 451 / 720,
-2187 / 6784 - -12231 / 42400,
11 / 84 - 649 / 6300,
-1. / 60.,
],
)
DPS_C_MID = [
6025192743 / 30085553152 / 2, 0, 51252292925 / 65400821598 / 2, -2691868925 / 45128329728 / 2,
187940372067 / 1594534317056 / 2, -1776094331 / 19743644256 / 2, 11237099 / 235043384 / 2
]
def _interp_fit_dopri5(y0, y1, k, dt, tableau=_DORMAND_PRINCE_SHAMPINE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
dt = dt.type_as(y0[0])
y_mid = tuple(y0_ + _scaled_dot_product(dt, DPS_C_MID, k_) for y0_, k_ in zip(y0, k))
f0 = tuple(k_[0] for k_ in k)
f1 = tuple(k_[-1] for k_ in k)
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _abs_square(x):
return torch.mul(x, x)
def _ta_append(list_of_tensors, value):
"""Append a value to the end of a list of PyTorch tensors."""
list_of_tensors.append(value)
return list_of_tensors
class Dopri5Solver(AdaptiveStepsizeODESolver):
def __init__(
self, func, y0, rtol, atol, first_step=None, safety=0.9, ifactor=10.0, dfactor=0.2, max_num_steps=2**31 - 1,
**unused_kwargs
):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func = func
self.y0 = y0
self.rtol = rtol if _is_iterable(rtol) else [rtol] * len(y0)
self.atol = atol if _is_iterable(atol) else [atol] * len(y0)
self.first_step = first_step
self.safety = _convert_to_tensor(safety, dtype=torch.float64, device=y0[0].device)
self.ifactor = _convert_to_tensor(ifactor, dtype=torch.float64, device=y0[0].device)
self.dfactor = _convert_to_tensor(dfactor, dtype=torch.float64, device=y0[0].device)
self.max_num_steps = _convert_to_tensor(max_num_steps, dtype=torch.int32, device=y0[0].device)
def before_integrate(self, t):
f0 = self.func(t[0].type_as(self.y0[0]), self.y0)
if self.first_step is None:
first_step = _select_initial_step(self.func, t[0], self.y0, 4, self.rtol[0], self.atol[0], f0=f0).to(t)
else:
first_step = _convert_to_tensor(self.first_step, dtype=t.dtype, device=t.device)
self.rk_state = _RungeKuttaState(self.y0, f0, t[0], t[0], first_step, interp_coeff=[self.y0] * 5)
def advance(self, next_t):
"""Interpolate through the next time point, integrating as necessary."""
n_steps = 0
while next_t > self.rk_state.t1:
assert n_steps < self.max_num_steps, 'max_num_steps exceeded ({}>={})'.format(n_steps, self.max_num_steps)
self.rk_state = self._adaptive_dopri5_step(self.rk_state)
n_steps += 1
return _interp_evaluate(self.rk_state.interp_coeff, self.rk_state.t0, self.rk_state.t1, next_t)
def _adaptive_dopri5_step(self, rk_state):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
########################################################
# Assertions #
########################################################
assert t0 + dt > t0, 'underflow in dt {}'.format(dt.item())
for y0_ in y0:
assert _is_finite(torch.abs(y0_)), 'non-finite values in state `y`: {}'.format(y0_)
y1, f1, y1_error, k = _runge_kutta_step(self.func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_SHAMPINE_TABLEAU)
########################################################
# Error Ratio #
########################################################
mean_sq_error_ratio = _compute_error_ratio(y1_error, atol=self.atol, rtol=self.rtol, y0=y0, y1=y1)
accept_step = (torch.tensor(mean_sq_error_ratio) <= 1).all()
########################################################
# Update RK State #
########################################################
y_next = y1 if accept_step else y0
f_next = f1 if accept_step else f0
t_next = t0 + dt if accept_step else t0
interp_coeff = _interp_fit_dopri5(y0, y1, k, dt) if accept_step else interp_coeff
dt_next = _optimal_step_size(
dt, mean_sq_error_ratio, safety=self.safety, ifactor=self.ifactor, dfactor=self.dfactor, order=5
)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next, interp_coeff)
return rk_state
| 5,566 | 44.260163 | 118 | py |
FragmentVC | FragmentVC-main/convert_batch.py | #!/usr/bin/env python3
"""Convert multiple pairs."""
import warnings
from pathlib import Path
from functools import partial
from multiprocessing import Pool, cpu_count
import yaml
import torch
import numpy as np
import soundfile as sf
from jsonargparse import ArgumentParser, ActionConfigFile
from data import load_wav, log_mel_spectrogram, plot_mel, plot_attn
from models import load_pretrained_wav2vec
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("info_path", type=str)
parser.add_argument("output_dir", type=str, default=".")
parser.add_argument("-c", "--ckpt_path", default="checkpoints/fragmentvc.pt")
parser.add_argument("-w", "--wav2vec_path", default="checkpoints/wav2vec_small.pt")
parser.add_argument("-v", "--vocoder_path", default="checkpoints/vocoder.pt")
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=326)
parser.add_argument("--win_len", type=int, default=1304)
parser.add_argument("--n_fft", type=int, default=1304)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
info_path,
output_dir,
ckpt_path,
wav2vec_path,
vocoder_path,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
**kwargs,
):
"""Main function."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
print("[INFO] Wav2Vec is loaded from", wav2vec_path)
model = torch.jit.load(ckpt_path).to(device).eval()
print("[INFO] FragmentVC is loaded from", ckpt_path)
vocoder = torch.jit.load(vocoder_path).to(device).eval()
print("[INFO] Vocoder is loaded from", vocoder_path)
path2wav = partial(load_wav, sample_rate=sample_rate)
wav2mel = partial(
log_mel_spectrogram,
preemph=preemph,
sample_rate=sample_rate,
n_mels=n_mels,
n_fft=n_fft,
hop_length=hop_len,
win_length=win_len,
f_min=f_min,
)
with open(info_path) as f:
infos = yaml.load(f, Loader=yaml.FullLoader)
out_mels = []
attns = []
for pair_name, pair in infos.items():
src_wav = load_wav(pair["source"], sample_rate, trim=True)
src_wav = torch.FloatTensor(src_wav).unsqueeze(0).to(device)
with Pool(cpu_count()) as pool:
tgt_wavs = pool.map(path2wav, pair["target"])
tgt_mels = pool.map(wav2mel, tgt_wavs)
tgt_mel = np.concatenate(tgt_mels, axis=0)
tgt_mel = torch.FloatTensor(tgt_mel.T).unsqueeze(0).to(device)
with torch.no_grad():
src_feat = wav2vec.extract_features(src_wav, None)[0]
out_mel, attn = model(src_feat, tgt_mel)
out_mel = out_mel.transpose(1, 2).squeeze(0)
out_mels.append(out_mel)
attns.append(attn)
print(f"[INFO] Pair {pair_name} converted")
print("[INFO] Generating waveforms...")
with torch.no_grad():
out_wavs = vocoder.generate(out_mels)
print("[INFO] Waveforms generated")
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
for pair_name, out_mel, out_wav, attn in zip(
infos.keys(), out_mels, out_wavs, attns
):
out_wav = out_wav.cpu().numpy()
out_path = Path(out_dir, pair_name)
plot_mel(out_mel, filename=out_path.with_suffix(".mel.png"))
plot_attn(attn, filename=out_path.with_suffix(".attn.png"))
sf.write(out_path.with_suffix(".wav"), out_wav, sample_rate)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
main(**parse_args())
| 3,966 | 29.05303 | 87 | py |
FragmentVC | FragmentVC-main/convert.py | #!/usr/bin/env python3
"""Convert using one source utterance and multiple target utterances."""
import warnings
from datetime import datetime
from pathlib import Path
from copy import deepcopy
import torch
import numpy as np
import soundfile as sf
from jsonargparse import ArgumentParser, ActionConfigFile
import sox
from data import load_wav, log_mel_spectrogram, plot_mel, plot_attn
from models import load_pretrained_wav2vec
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("source_path", type=str)
parser.add_argument("target_paths", type=str, nargs="+")
parser.add_argument("-w", "--wav2vec_path", type=str, required=True)
parser.add_argument("-c", "--ckpt_path", type=str, required=True)
parser.add_argument("-v", "--vocoder_path", type=str, required=True)
parser.add_argument("-o", "--output_path", type=str, default="output.wav")
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=326)
parser.add_argument("--win_len", type=int, default=1304)
parser.add_argument("--n_fft", type=int, default=1304)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
source_path,
target_paths,
wav2vec_path,
ckpt_path,
vocoder_path,
output_path,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
**kwargs,
):
"""Main function."""
begin_time = step_moment = datetime.now()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
print("[INFO] Wav2Vec is loaded from", wav2vec_path)
model = torch.jit.load(ckpt_path).to(device).eval()
print("[INFO] FragmentVC is loaded from", ckpt_path)
vocoder = torch.jit.load(vocoder_path).to(device).eval()
print("[INFO] Vocoder is loaded from", vocoder_path)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
tfm = sox.Transformer()
tfm.vad(location=1)
tfm.vad(location=-1)
src_wav = load_wav(source_path, sample_rate)
src_wav = deepcopy(tfm.build_array(input_array=src_wav, sample_rate_in=sample_rate))
src_wav = torch.FloatTensor(src_wav).unsqueeze(0).to(device)
print("[INFO] source waveform shape:", src_wav.shape)
tgt_mels = []
for tgt_path in target_paths:
tgt_wav = load_wav(tgt_path, sample_rate)
tgt_wav = tfm.build_array(input_array=tgt_wav, sample_rate_in=sample_rate)
tgt_wav = deepcopy(tgt_wav)
tgt_mel = log_mel_spectrogram(
tgt_wav, preemph, sample_rate, n_mels, n_fft, hop_len, win_len, f_min
)
tgt_mels.append(tgt_mel)
tgt_mel = np.concatenate(tgt_mels, axis=0)
tgt_mel = torch.FloatTensor(tgt_mel.T).unsqueeze(0).to(device)
print("[INFO] target spectrograms shape:", tgt_mel.shape)
with torch.no_grad():
src_feat = wav2vec.extract_features(src_wav, None)[0]
print("[INFO] source Wav2Vec feature shape:", src_feat.shape)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
out_mel, attns = model(src_feat, tgt_mel)
out_mel = out_mel.transpose(1, 2).squeeze(0)
print("[INFO] converted spectrogram shape:", out_mel.shape)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
out_wav = vocoder.generate([out_mel])[0]
out_wav = out_wav.cpu().numpy()
print("[INFO] generated waveform shape:", out_wav.shape)
elaspe_time = datetime.now() - step_moment
step_moment = datetime.now()
print("[INFO] elasped time", elaspe_time.total_seconds())
wav_path = Path(output_path)
sf.write(wav_path, out_wav, sample_rate)
print("[INFO] generated waveform is saved to", wav_path)
mel_path = wav_path.with_suffix(".mel.png")
plot_mel(out_mel, filename=mel_path)
print("[INFO] mel-spectrogram plot is saved to", mel_path)
attn_path = wav_path.with_suffix(".attn.png")
plot_attn(attns, filename=attn_path)
print("[INFO] attention plot is saved to", attn_path)
elaspe_time = datetime.now() - begin_time
print("[INFO] Overall elasped time", elaspe_time.total_seconds())
if __name__ == "__main__":
warnings.filterwarnings("ignore")
main(**parse_args())
| 4,829 | 32.776224 | 88 | py |
FragmentVC | FragmentVC-main/train.py | #!/usr/bin/env python3
"""Train FragmentVC model."""
import argparse
import datetime
import random
from pathlib import Path
import torch
import torch.nn as nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from data import IntraSpeakerDataset, collate_batch
from models import FragmentVC, get_cosine_schedule_with_warmup
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str)
parser.add_argument("--save_dir", type=str, default=".")
parser.add_argument("--total_steps", type=int, default=250000)
parser.add_argument("--warmup_steps", type=int, default=500)
parser.add_argument("--valid_steps", type=int, default=1000)
parser.add_argument("--log_steps", type=int, default=100)
parser.add_argument("--save_steps", type=int, default=10000)
parser.add_argument("--milestones", type=int, nargs=2, default=[50000, 150000])
parser.add_argument("--exclusive_rate", type=float, default=1.0)
parser.add_argument("--n_samples", type=int, default=10)
parser.add_argument("--accu_steps", type=int, default=2)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--n_workers", type=int, default=8)
parser.add_argument("--preload", action="store_true")
parser.add_argument("--comment", type=str)
return vars(parser.parse_args())
def model_fn(batch, model, criterion, self_exclude, ref_included, device):
"""Forward a batch through model."""
srcs, src_masks, refs, ref_masks, tgts, tgt_masks, overlap_lens = batch
srcs = srcs.to(device)
src_masks = src_masks.to(device)
refs = refs.to(device)
ref_masks = ref_masks.to(device)
tgts = tgts.to(device)
tgt_masks = tgt_masks.to(device)
if ref_included:
if random.random() >= self_exclude:
refs = torch.cat((refs, tgts), dim=2)
ref_masks = torch.cat((ref_masks, tgt_masks), dim=1)
else:
refs = tgts
ref_masks = tgt_masks
outs, _ = model(srcs, refs, src_masks=src_masks, ref_masks=ref_masks)
losses = []
for out, tgt, overlap_len in zip(outs.unbind(), tgts.unbind(), overlap_lens):
loss = criterion(out[:, :overlap_len], tgt[:, :overlap_len])
losses.append(loss)
return sum(losses) / len(losses)
def valid(dataloader, model, criterion, device):
"""Validate on validation set."""
model.eval()
running_loss = 0.0
pbar = tqdm(total=len(dataloader.dataset), ncols=0, desc="Valid", unit=" uttr")
for i, batch in enumerate(dataloader):
with torch.no_grad():
loss = model_fn(batch, model, criterion, 1.0, True, device)
running_loss += loss.item()
pbar.update(dataloader.batch_size)
pbar.set_postfix(loss=f"{running_loss / (i+1):.2f}")
pbar.close()
model.train()
return running_loss / len(dataloader)
def main(
data_dir,
save_dir,
total_steps,
warmup_steps,
valid_steps,
log_steps,
save_steps,
milestones,
exclusive_rate,
n_samples,
accu_steps,
batch_size,
n_workers,
preload,
comment,
):
"""Main function."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
metadata_path = Path(data_dir) / "metadata.json"
dataset = IntraSpeakerDataset(data_dir, metadata_path, n_samples, preload)
lengths = [trainlen := int(0.9 * len(dataset)), len(dataset) - trainlen]
trainset, validset = random_split(dataset, lengths)
train_loader = DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=n_workers,
pin_memory=True,
collate_fn=collate_batch,
)
valid_loader = DataLoader(
validset,
batch_size=batch_size * accu_steps,
num_workers=n_workers,
drop_last=True,
pin_memory=True,
collate_fn=collate_batch,
)
train_iterator = iter(train_loader)
if comment is not None:
log_dir = "logs/"
log_dir += datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log_dir += "_" + comment
writer = SummaryWriter(log_dir)
save_dir_path = Path(save_dir)
save_dir_path.mkdir(parents=True, exist_ok=True)
model = FragmentVC().to(device)
model = torch.jit.script(model)
criterion = nn.L1Loss()
optimizer = AdamW(model.parameters(), lr=1e-4)
scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
best_loss = float("inf")
best_state_dict = None
self_exclude = 0.0
ref_included = False
pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step")
for step in range(total_steps):
batch_loss = 0.0
for _ in range(accu_steps):
try:
batch = next(train_iterator)
except StopIteration:
train_iterator = iter(train_loader)
batch = next(train_iterator)
loss = model_fn(batch, model, criterion, self_exclude, ref_included, device)
loss = loss / accu_steps
batch_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
pbar.update()
pbar.set_postfix(loss=f"{batch_loss:.2f}", excl=self_exclude, step=step + 1)
if step % log_steps == 0 and comment is not None:
writer.add_scalar("Loss/train", batch_loss, step)
writer.add_scalar("Self-exclusive Rate", self_exclude, step)
if (step + 1) % valid_steps == 0:
pbar.close()
valid_loss = valid(valid_loader, model, criterion, device)
if comment is not None:
writer.add_scalar("Loss/valid", valid_loss, step + 1)
if valid_loss < best_loss:
best_loss = valid_loss
best_state_dict = model.state_dict()
pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step")
if (step + 1) % save_steps == 0 and best_state_dict is not None:
loss_str = f"{best_loss:.4f}".replace(".", "dot")
best_ckpt_name = f"retriever-best-loss{loss_str}.pt"
loss_str = f"{valid_loss:.4f}".replace(".", "dot")
curr_ckpt_name = f"retriever-step{step+1}-loss{loss_str}.pt"
current_state_dict = model.state_dict()
model.cpu()
model.load_state_dict(best_state_dict)
model.save(str(save_dir_path / best_ckpt_name))
model.load_state_dict(current_state_dict)
model.save(str(save_dir_path / curr_ckpt_name))
model.to(device)
pbar.write(f"Step {step + 1}, best model saved. (loss={best_loss:.4f})")
if (step + 1) >= milestones[1]:
self_exclude = exclusive_rate
elif (step + 1) == milestones[0]:
ref_included = True
optimizer = AdamW(
[
{"params": model.unet.parameters(), "lr": 1e-6},
{"params": model.smoothers.parameters()},
{"params": model.mel_linear.parameters()},
{"params": model.post_net.parameters()},
],
lr=1e-4,
)
scheduler = get_cosine_schedule_with_warmup(
optimizer, warmup_steps, total_steps - milestones[0]
)
pbar.write("Optimizer and scheduler restarted.")
elif (step + 1) > milestones[0]:
self_exclude = (step + 1 - milestones[0]) / (milestones[1] - milestones[0])
self_exclude *= exclusive_rate
pbar.close()
if __name__ == "__main__":
main(**parse_args())
| 7,874 | 30.754032 | 88 | py |
FragmentVC | FragmentVC-main/preprocess.py | #!/usr/bin/env python3
"""Precompute Wav2Vec features."""
import os
import json
from pathlib import Path
from tempfile import mkstemp
from multiprocessing import cpu_count
import tqdm
import torch
from torch.utils.data import DataLoader
from jsonargparse import ArgumentParser, ActionConfigFile
from models import load_pretrained_wav2vec
from data import PreprocessDataset
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("data_dirs", type=str, nargs="+")
parser.add_argument("wav2vec_path", type=str)
parser.add_argument("out_dir", type=str)
parser.add_argument("--trim_method", choices=["librosa", "vad"], default="vad")
parser.add_argument("--n_workers", type=int, default=cpu_count())
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=326)
parser.add_argument("--win_len", type=int, default=1304)
parser.add_argument("--n_fft", type=int, default=1304)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
data_dirs,
wav2vec_path,
out_dir,
trim_method,
n_workers,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
**kwargs,
):
"""Main function."""
out_dir_path = Path(out_dir)
if out_dir_path.exists():
assert out_dir_path.is_dir()
else:
out_dir_path.mkdir(parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = PreprocessDataset(
data_dirs,
trim_method,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
)
dataloader = DataLoader(
dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=n_workers
)
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
speaker_infos = {}
pbar = tqdm.tqdm(total=len(dataset), ncols=0)
for speaker_name, audio_path, wav, mel in dataloader:
if wav.size(-1) < 10:
continue
wav = wav.to(device)
speaker_name = speaker_name[0]
audio_path = audio_path[0]
with torch.no_grad():
feat = wav2vec.extract_features(wav, None)[0]
feat = feat.detach().cpu().squeeze(0)
mel = mel.squeeze(0)
fd, temp_file = mkstemp(suffix=".tar", prefix="utterance-", dir=out_dir_path)
torch.save({"feat": feat, "mel": mel}, temp_file)
os.close(fd)
if speaker_name not in speaker_infos.keys():
speaker_infos[speaker_name] = []
speaker_infos[speaker_name].append(
{
"feature_path": Path(temp_file).name,
"audio_path": audio_path,
"feat_len": len(feat),
"mel_len": len(mel),
}
)
pbar.update(dataloader.batch_size)
with open(out_dir_path / "metadata.json", "w") as f:
json.dump(speaker_infos, f, indent=2)
if __name__ == "__main__":
main(**parse_args())
| 3,318 | 25.766129 | 85 | py |
FragmentVC | FragmentVC-main/models/utils.py | """Useful utilities."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from fairseq.models.wav2vec import Wav2Vec2Model
def load_pretrained_wav2vec(ckpt_path):
"""Load pretrained Wav2Vec model."""
ckpt = torch.load(ckpt_path)
model = Wav2Vec2Model.build_model(ckpt["args"], task=None)
model.load_state_dict(ckpt["model"])
model.remove_pretraining_modules()
model.eval()
return model
def get_cosine_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float = 0.5,
last_epoch: int = -1,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(
max(1, num_training_steps - num_warmup_steps)
)
return max(
0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
| 2,140 | 33.532258 | 116 | py |
FragmentVC | FragmentVC-main/models/model.py | """FragmentVC model architecture."""
from typing import Tuple, List, Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .convolutional_transformer import Smoother, Extractor
class FragmentVC(nn.Module):
"""
FragmentVC uses Wav2Vec feature of the source speaker to query and attend
on mel spectrogram of the target speaker.
"""
def __init__(self, d_model=512):
super().__init__()
self.unet = UnetBlock(d_model)
self.smoothers = nn.TransformerEncoder(Smoother(d_model, 2, 1024), num_layers=3)
self.mel_linear = nn.Linear(d_model, 80)
self.post_net = nn.Sequential(
nn.Conv1d(80, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 80, kernel_size=5, padding=2),
nn.BatchNorm1d(80),
nn.Dropout(0.5),
)
def forward(
self,
srcs: Tensor,
refs: Tensor,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
ref_masks: (batch, ref_len)
"""
# out: (src_len, batch, d_model)
out, attns = self.unet(srcs, refs, src_masks=src_masks, ref_masks=ref_masks)
# out: (src_len, batch, d_model)
out = self.smoothers(out, src_key_padding_mask=src_masks)
# out: (src_len, batch, 80)
out = self.mel_linear(out)
# out: (batch, 80, src_len)
out = out.transpose(1, 0).transpose(2, 1)
refined = self.post_net(out)
out = out + refined
# out: (batch, 80, src_len)
return out, attns
class UnetBlock(nn.Module):
"""Hierarchically attend on references."""
def __init__(self, d_model: int):
super(UnetBlock, self).__init__()
self.conv1 = nn.Conv1d(80, d_model, 3, padding=1, padding_mode="replicate")
self.conv2 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.conv3 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.prenet = nn.Sequential(
nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, d_model),
)
self.extractor1 = Extractor(d_model, 2, 1024, no_residual=True)
self.extractor2 = Extractor(d_model, 2, 1024)
self.extractor3 = Extractor(d_model, 2, 1024)
def forward(
self,
srcs: Tensor,
refs: Tensor,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
ref_masks: (batch, ref_len)
"""
# tgt: (batch, tgt_len, d_model)
tgt = self.prenet(srcs)
# tgt: (tgt_len, batch, d_model)
tgt = tgt.transpose(0, 1)
# ref*: (batch, d_model, mel_len)
ref1 = self.conv1(refs)
ref2 = self.conv2(F.relu(ref1))
ref3 = self.conv3(F.relu(ref2))
# out*: (tgt_len, batch, d_model)
out, attn1 = self.extractor1(
tgt,
ref3.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn2 = self.extractor2(
out,
ref2.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn3 = self.extractor3(
out,
ref1.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
# out: (tgt_len, batch, d_model)
return out, [attn1, attn2, attn3]
| 4,523 | 29.362416 | 88 | py |
FragmentVC | FragmentVC-main/models/convolutional_transformer.py | """Convolutional transsformer"""
from typing import Optional, Tuple
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module, Dropout, LayerNorm, Conv1d, MultiheadAttention
class Smoother(Module):
"""Convolutional Transformer Encoder Layer"""
def __init__(self, d_model: int, nhead: int, d_hid: int, dropout=0.1):
super(Smoother, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
) -> Tensor:
# multi-head self attention
src2 = self.self_attn(
src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
# add & norm
src = src + self.dropout1(src2)
src = self.norm1(src)
# conv1d
src2 = src.transpose(0, 1).transpose(1, 2)
src2 = self.conv2(F.relu(self.conv1(src2)))
src2 = src2.transpose(1, 2).transpose(0, 1)
# add & norm
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class Extractor(Module):
"""Convolutional Transformer Decoder Layer"""
def __init__(
self, d_model: int, nhead: int, d_hid: int, dropout=0.1, no_residual=False,
):
super(Extractor, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.no_residual = no_residual
def forward(
self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
# multi-head self attention
tgt2 = self.self_attn(
tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
# add & norm
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# multi-head cross attention
tgt2, attn = self.cross_attn(
tgt,
memory,
memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
# add & norm
if self.no_residual:
tgt = self.dropout2(tgt2)
else:
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# conv1d
tgt2 = tgt.transpose(0, 1).transpose(1, 2)
tgt2 = self.conv2(F.relu(self.conv1(tgt2)))
tgt2 = tgt2.transpose(1, 2).transpose(0, 1)
# add & norm
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn
| 3,526 | 28.889831 | 84 | py |
FragmentVC | FragmentVC-main/data/intra_speaker_dataset.py | """Dataset for reconstruction scheme."""
import json
import random
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
import torch
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
class IntraSpeakerDataset(Dataset):
"""Dataset for reconstruction scheme.
Returns:
speaker_id: speaker id number.
feat: Wav2Vec feature tensor.
mel: log mel spectrogram tensor.
"""
def __init__(self, data_dir, metadata_path, n_samples=5, pre_load=False):
with open(metadata_path, "r") as f:
metadata = json.load(f)
executor = ThreadPoolExecutor(max_workers=4)
futures = []
for speaker_name, utterances in metadata.items():
for utterance in utterances:
futures.append(
executor.submit(
_process_data,
speaker_name,
data_dir,
utterance["feature_path"],
pre_load,
)
)
self.data = []
self.speaker_to_indices = {}
for i, future in enumerate(tqdm(futures, ncols=0)):
result = future.result()
speaker_name = result[0]
self.data.append(result)
if speaker_name not in self.speaker_to_indices:
self.speaker_to_indices[speaker_name] = [i]
else:
self.speaker_to_indices[speaker_name].append(i)
self.data_dir = Path(data_dir)
self.n_samples = n_samples
self.pre_load = pre_load
def __len__(self):
return len(self.data)
def _get_data(self, index):
if self.pre_load:
speaker_name, content_emb, target_mel = self.data[index]
else:
speaker_name, content_emb, target_mel = _load_data(*self.data[index])
return speaker_name, content_emb, target_mel
def __getitem__(self, index):
speaker_name, content_emb, target_mel = self._get_data(index)
utterance_indices = self.speaker_to_indices[speaker_name].copy()
utterance_indices.remove(index)
sampled_mels = []
for sampled_id in random.sample(utterance_indices, self.n_samples):
sampled_mel = self._get_data(sampled_id)[2]
sampled_mels.append(sampled_mel)
reference_mels = torch.cat(sampled_mels, dim=0)
return content_emb, reference_mels, target_mel
def _process_data(speaker_name, data_dir, feature_path, load):
if load:
return _load_data(speaker_name, data_dir, feature_path)
else:
return speaker_name, data_dir, feature_path
def _load_data(speaker_name, data_dir, feature_path):
feature = torch.load(Path(data_dir, feature_path))
content_emb = feature["feat"]
target_mel = feature["mel"]
return speaker_name, content_emb, target_mel
def collate_batch(batch):
"""Collate a batch of data."""
srcs, refs, tgts = zip(*batch)
src_lens = [len(src) for src in srcs]
ref_lens = [len(ref) for ref in refs]
tgt_lens = [len(tgt) for tgt in tgts]
overlap_lens = [
min(src_len, tgt_len) for src_len, tgt_len in zip(src_lens, tgt_lens)
]
srcs = pad_sequence(srcs, batch_first=True) # (batch, max_src_len, wav2vec_dim)
src_masks = [torch.arange(srcs.size(1)) >= src_len for src_len in src_lens]
src_masks = torch.stack(src_masks) # (batch, max_src_len)
refs = pad_sequence(refs, batch_first=True, padding_value=-20)
refs = refs.transpose(1, 2) # (batch, mel_dim, max_ref_len)
ref_masks = [torch.arange(refs.size(2)) >= ref_len for ref_len in ref_lens]
ref_masks = torch.stack(ref_masks) # (batch, max_ref_len)
tgts = pad_sequence(tgts, batch_first=True, padding_value=-20)
tgts = tgts.transpose(1, 2) # (batch, mel_dim, max_tgt_len)
tgt_masks = [torch.arange(tgts.size(2)) >= tgt_len for tgt_len in tgt_lens]
tgt_masks = torch.stack(tgt_masks) # (batch, max_tgt_len)
return srcs, src_masks, refs, ref_masks, tgts, tgt_masks, overlap_lens
| 4,148 | 31.928571 | 84 | py |
FragmentVC | FragmentVC-main/data/preprocess_dataset.py | """Precompute Wav2Vec features and spectrograms."""
from copy import deepcopy
from pathlib import Path
import torch
from librosa.util import find_files
import sox
from .utils import load_wav, log_mel_spectrogram
class PreprocessDataset(torch.utils.data.Dataset):
"""Prefetch audio data for preprocessing."""
def __init__(
self,
data_dirs,
trim_method,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
):
data = []
for data_dir in data_dirs:
data_dir_path = Path(data_dir)
speaker_dirs = [x for x in data_dir_path.iterdir() if x.is_dir()]
for speaker_dir in speaker_dirs:
audio_paths = find_files(speaker_dir)
if len(audio_paths) == 0:
continue
speaker_name = speaker_dir.name
for audio_path in audio_paths:
data.append((speaker_name, audio_path))
self.trim_method = trim_method
self.sample_rate = sample_rate
self.preemph = preemph
self.hop_len = hop_len
self.win_len = win_len
self.n_fft = n_fft
self.n_mels = n_mels
self.f_min = f_min
self.data = data
if trim_method == "vad":
tfm = sox.Transformer()
tfm.vad(location=1)
tfm.vad(location=-1)
self.sox_transform = tfm
def __len__(self):
return len(self.data)
def __getitem__(self, index):
speaker_name, audio_path = self.data[index]
if self.trim_method == "librosa":
wav = load_wav(audio_path, self.sample_rate, trim=True)
elif self.trim_method == "vad":
wav = load_wav(audio_path, self.sample_rate)
trim_wav = self.sox_transform.build_array(
input_array=wav, sample_rate_in=self.sample_rate
)
wav = deepcopy(trim_wav if len(trim_wav) > 10 else wav)
mel = log_mel_spectrogram(
wav,
self.preemph,
self.sample_rate,
self.n_mels,
self.n_fft,
self.hop_len,
self.win_len,
self.f_min,
)
return speaker_name, audio_path, torch.FloatTensor(wav), torch.FloatTensor(mel)
| 2,354 | 26.068966 | 87 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/utils.py | # coding=utf-8
# Copyleft 2019 Project LXRT
import sys
import csv
import base64
import time
import torch
import numpy as np
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
def load_obj_tsv(fname, topk=None):
"""Load object features from tsv file.
:param fname: The path to the tsv file.
:param topk: Only load features for top K images (lines) in the tsv file.
Will load all the features if topk is either -1 or None.
:return: A list of image object features where each feature is a dict.
See FILENAMES above for the keys in the feature dict.
"""
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in enumerate(tqdm(reader)):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
data.append(item)
if topk is not None and len(data) == topk:
break
elapsed_time = time.time() - start_time
print("Loaded %d images in file %s in %d seconds." % (len(data), fname, elapsed_time))
return data
def load_obj_tsv_save_to_h5(fname, save_h5_name, save_json_name, all_examples):
import h5py
import json
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
metadata = []
import h5py
h5_file = h5py.File(save_h5_name, 'w')
h5_features = h5_file.create_dataset('features', (all_examples, 36, 2048), dtype=np.float32)
h5_boxes = h5_file.create_dataset('boxes', (all_examples, 36, 4), dtype=np.float32)
h5_objects_id = h5_file.create_dataset('objects_id', (all_examples,36), dtype=np.int64)
h5_objects_conf = h5_file.create_dataset('objects_conf', (all_examples,36), dtype=np.float32)
h5_attrs_id = h5_file.create_dataset('attrs_id', (all_examples,36), dtype=np.int64)
h5_attrs_conf = h5_file.create_dataset('attrs_conf', (all_examples,36), dtype=np.float32)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in enumerate(tqdm(reader)):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
metadata.append(
{
"img_id": item["img_id"],
"img_h": item["img_h"],
"img_w": item['img_w']
}
)
h5_features[i] = item["features"]
h5_boxes[i] = item["boxes"]
h5_objects_id[i] = item["objects_id"]
h5_objects_conf[i] = item["objects_conf"]
h5_attrs_id[i] = item["attrs_id"]
h5_attrs_conf[i] = item["attrs_conf"]
with open(save_json_name, "w") as f:
json.dump(metadata, f)
return data
def create_slim_h5(fname, save_h5_name, save_json_name, all_examples, img_ids_to_keep):
import h5py
import json
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
metadata = []
import h5py
h5_file = h5py.File(save_h5_name, 'w')
h5_features = h5_file.create_dataset('features', (all_examples, 36, 2048), dtype=np.float32)
h5_boxes = h5_file.create_dataset('boxes', (all_examples, 36, 4), dtype=np.float32)
h5_objects_id = h5_file.create_dataset('objects_id', (all_examples,36), dtype=np.int64)
h5_objects_conf = h5_file.create_dataset('objects_conf', (all_examples,36), dtype=np.float32)
h5_attrs_id = h5_file.create_dataset('attrs_id', (all_examples,36), dtype=np.int64)
h5_attrs_conf = h5_file.create_dataset('attrs_conf', (all_examples,36), dtype=np.float32)
i = 0
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for index, item in enumerate(tqdm(reader)):
#continue
if item["img_id"] not in img_ids_to_keep:
continue
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
metadata.append(
{
"img_id": item["img_id"],
"img_h": item["img_h"],
"img_w": item['img_w']
}
)
h5_features[i] = item["features"]
h5_boxes[i] = item["boxes"]
h5_objects_id[i] = item["objects_id"]
h5_objects_conf[i] = item["objects_conf"]
h5_attrs_id[i] = item["attrs_id"]
h5_attrs_conf[i] = item["attrs_conf"]
i += 1
with open(save_json_name, "w") as f:
json.dump(metadata, f)
return data
def load_lxmert_sgg(path, model):
print("Load rel pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
'''print("loaded_state_dict", loaded_state_dict["model"].keys())
print("\n\n\n\n\n")
print("model_state_dict", model_state_dict.keys())
assert(0)'''
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
# module.rel_heads.rel_predictor.lxrt.encoder.r_layers.3.output.LayerNorm.weight -> encoder.r_layers.3.output.LayerNorm.weight
load_state_dict_flexible(model.lxrt_encoder.model.bert, new_loaded_state_dict)
def load_lxmert_sgg_pretrain(path, model):
print("Load rel pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
'''print("loaded_state_dict", loaded_state_dict.keys())
print("\n\n\n\n\n")
print("model_state_dict", model_state_dict.keys())
assert(0)'''
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
# module.rel_heads.rel_predictor.lxrt.encoder.r_layers.3.output.LayerNorm.weight -> encoder.r_layers.3.output.LayerNorm.weight
load_state_dict_flexible(model.bert, new_loaded_state_dict)
def load_lxmert_to_sgg(path, model):
print("Load rel pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
'''print("loaded_state_dict", loaded_state_dict.keys())
print("\n\n\n\n\n")
print("model_state_dict", model_state_dict.keys())
assert(0)'''
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
# module.rel_heads.rel_predictor.lxrt.encoder.r_layers.3.output.LayerNorm.weight -> encoder.r_layers.3.output.LayerNorm.weight
load_state_dict_flexible(model.bert, new_loaded_state_dict)
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print("Full loading failed!! Try partial loading!!")
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped: " + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name) | 9,752 | 38.646341 | 138 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/param.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import argparse
import random
import numpy as np
import torch
import logging
logging.basicConfig(level=logging.INFO)
def get_optimizer(optim):
# Bind the optimizer
if optim == 'rms':
print("Optimizer: Using RMSProp")
optimizer = torch.optim.RMSprop
elif optim == 'adam':
print("Optimizer: Using Adam")
optimizer = torch.optim.Adam
elif optim == 'adamax':
print("Optimizer: Using Adamax")
optimizer = torch.optim.Adamax
elif optim == 'sgd':
print("Optimizer: sgd")
optimizer = torch.optim.SGD
elif 'bert' in optim:
optimizer = 'bert' # The bert optimizer will be bind later.
else:
assert False, "Please add your optimizer %s in the list." % optim
return optimizer
def parse_args():
parser = argparse.ArgumentParser()
# Data Splits
parser.add_argument("--train", default='train')
parser.add_argument("--valid", default='valid')
parser.add_argument("--test", default=None)
# Training Hyper-parameters
parser.add_argument('--batchSize', dest='batch_size', type=int, default=256)
parser.add_argument('--optim', default='bert')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9595, help='random seed')
# Debugging
parser.add_argument('--output', type=str, default='snap/test')
parser.add_argument("--fast", action='store_const', default=False, const=True)
parser.add_argument("--tiny", action='store_const', default=False, const=True)
parser.add_argument("--tqdm", action='store_const', default=False, const=True)
# Model Loading
parser.add_argument('--load', type=str, default=None,
help='Load the model (usually the fine-tuned model).')
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
parser.add_argument('--loadLXMERTQA', dest='load_lxmert_qa', type=str, default=None,
help='Load the pre-trained LXMERT model with QA answer head.')
parser.add_argument("--fromScratch", dest='from_scratch', action='store_const', default=False, const=True,
help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '
'the model would be trained from scratch. If --fromScratch is'
' not specified, the model would load BERT-pre-trained weights by'
' default. ')
# Optimization
parser.add_argument("--mceLoss", dest='mce_loss', action='store_const', default=False, const=True)
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument("--llayers", default=9, type=int, help='Number of Language layers')
parser.add_argument("--xlayers", default=5, type=int, help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=5, type=int, help='Number of object Relationship layers.')
# LXMERT Pre-training Config
parser.add_argument("--taskMatched", dest='task_matched', action='store_const', default=False, const=True)
parser.add_argument("--taskMaskLM", dest='task_mask_lm', action='store_const', default=False, const=True)
parser.add_argument("--taskObjPredict", dest='task_obj_predict', action='store_const', default=False, const=True)
parser.add_argument("--taskQA", dest='task_qa', action='store_const', default=False, const=True)
parser.add_argument("--visualLosses", dest='visual_losses', default='obj,attr,feat', type=str)
parser.add_argument("--qaSets", dest='qa_sets', default=None, type=str)
parser.add_argument("--wordMaskRate", dest='word_mask_rate', default=0.15, type=float)
parser.add_argument("--objMaskRate", dest='obj_mask_rate', default=0.15, type=float)
# Training configuration
parser.add_argument("--multiGPU", action='store_const', default=False, const=True)
parser.add_argument("--numWorkers", dest='num_workers', default=0)
parser.add_argument("--config", dest='config', default=None, type=str)
parser.add_argument("--save_folder", dest='save_folder', default="test", type=str)
# Invalid parameters just designed to accomodate sgg code
parser.add_argument("--config-file", dest="config-file", default=None, type=str)
parser.add_argument("--algorithm", dest="algorithm", default=None, type=str)
parser.add_argument("--save_path", dest="save_path", default=None, type=str)
# Parse the arguments.
args = parser.parse_args()
# Bind optimizer class.
args.optimizer = get_optimizer(args.optim)
# Set seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# Added by harold. Allows additional parameters specified by the json file.
import commentjson
from attrdict import AttrDict
from pprint import pprint
if args.config is not None:
with open(args.config) as f:
config_json = commentjson.load(f)
dict_args = vars(args)
dict_args.update(config_json) # Update with overwrite
args = AttrDict(dict_args)
import shutil
import os
output = args.output
if not os.path.exists(output):
os.mkdir(output)
shutil.copyfile(args.config, os.path.join(output, os.path.basename(args.config)))
# Set up logs
import sys
run_log_counter = 0
while(os.path.exists(args.output + '/run_{}.log'.format(run_log_counter))):
run_log_counter += 1
file_log = open(args.output + '/run_{}.log'.format(run_log_counter),'w') # File where you need to keep the logs
file_log.write("")
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
file_log.write(data) # Write the data of stdout here to a text file as well
def flush(self):
pass
sys.stdout = Unbuffered(sys.stdout)
from pprint import pprint
pprint(args)
print("\n\n\n\n")
with open(args.config) as f:
print(f.read())
return args
args = parse_args()
| 6,424 | 38.906832 | 117 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/convert_tsv_to_h5.py | import sys
import csv
import base64
import time
import torch
import numpy as np
from src.utils import load_obj_tsv_save_to_h5
load_obj_tsv_save_to_h5(
"data/mscoco_imgfeat/train2014_obj36.tsv",
"data/mscoco_imgfeat/train2014_obj36.h5",
"data/mscoco_imgfeat/train2014_obj36.json",
82783
)
load_obj_tsv_save_to_h5(
"data/vg_gqa_imgfeat/vg_gqa_obj36.tsv",
"data/vg_gqa_imgfeat/vg_gqa_obj36.h5",
"data/vg_gqa_imgfeat/vg_gqa_obj36.json",
148854
)
load_obj_tsv_save_to_h5(
"data/mscoco_imgfeat/val2014_obj36.tsv",
"data/mscoco_imgfeat/val2014_obj36.h5",
"data/mscoco_imgfeat/val2014_obj36.json",
40504
)
'''
load_obj_tsv_save_to_h5(
"data/nlvr2_imgfeat/train_obj36.tsv",
"data/nlvr2_imgfeat/train_obj36.h5",
"data/nlvr2_imgfeat/train_obj36.json",
103170
)
load_obj_tsv_save_to_h5(
"data/nlvr2_imgfeat/valid_obj36.tsv",
"data/nlvr2_imgfeat/valid_obj36.h5",
"data/nlvr2_imgfeat/valid_obj36.json",
8102
)'''
'''
load_obj_tsv_save_to_h5(
"data/nlvr2_imgfeat/test_obj36.tsv",
"data/nlvr2_imgfeat/test_obj36.h5",
"data/nlvr2_imgfeat/test_obj36.json",
8082
)'''
| 1,165 | 20.592593 | 47 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/box.py | import torch
import numpy
import numpy as np
def heuristic_filter(box_a, box_b, image_size, threshhold = 0.15):
# center_mass
box_a_x_center = (box_a[0] + box_a[2]) / 2
box_b_x_center = (box_b[0] + box_b[2]) / 2
box_a_y_center = (box_a[1] + box_a[3]) / 2
box_b_y_center = (box_b[1] + box_b[3]) / 2
# X non overlap
if box_a[0] > box_b[2] or box_b[0] > box_a[2]:
if min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / image_size[0] > threshhold:
return False
if box_a[1] > box_b[3] or box_b[1] > box_a[3]:
if min(abs(box_a[1] - box_b[3]), abs(box_b[1] - box_a[3])) / image_size[1] > threshhold:
return False
'''print(abs(box_b_x_center - box_a_x_center) / image_size[0])
if abs(box_b_x_center - box_a_x_center) / image_size[0] > threshhold:
return False
if abs(box_b_y_center - box_a_y_center) / image_size[1] > threshhold:
return False'''
return True
def determine_box_position_type(box_a, box_b, image_size):
if box_a[0] > box_b[2] or box_b[0] > box_a[2]: # No overlap
# Then calculate their distance
if box_a[1] > box_b[3] or box_b[1] > box_a[3]: # y not overlap
return ( "x, y not overlap",
(min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / image_size[0]).item(),
(min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2]))).item(),
(min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2])) / image_size[0]).item()
)
else:
overlap_length = min(abs(box_a[1] - box_b[3]), abs(box_b[1] - box_a[3]))
overlap_ratio = overlap_length / min(abs(box_a[1] - box_a[3]), abs(box_b[1] - box_b[3]))
return ("x not overlap, y overlap", min(overlap_ratio.item(), 1))
else:
# there is overlap, calculate how much they overlap
overlap_length = min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2]))
overlap_ratio = overlap_length / min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2]))
return min(overlap_ratio.item(), 1)
def add_to_the_left_to_the_right_relation(box_a, box_b, image_size, y_overlap_ratio_thresh, x_overlap_ratio_thresh):
if box_a[0] > box_b[2] or box_b[0] > box_a[2]: # No overlap
'''distance_ratio = min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / image_size[0]
if distance_ratio < no_overlap_thresh:
return (True, box_a[0] > box_b[2]) # a is to the right of b, if box_a[0] > box_b[2]
else:
return (False, box_a[0] > box_b[2]) '''
if box_a[1] > box_b[3] or box_b[1] > box_a[3]: # y not overlap
return (False, box_a[0] > box_b[2])
else:
overlap_length = min(abs(box_a[1] - box_b[3]), abs(box_b[1] - box_a[3]))
overlap_ratio = overlap_length / min(abs(box_a[1] - box_a[3]), abs(box_b[1] - box_b[3]))
if overlap_ratio > y_overlap_ratio_thresh:
return (True, box_a[0] > box_b[0])
else:
return (False, box_a[0] > box_b[0])
else:
# there is overlap, calculate how much they overlap
overlap_length = min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2]))
overlap_ratio = overlap_length / min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2]))
if overlap_ratio < x_overlap_ratio_thresh:
return (True, box_a[0] > box_b[0])
else:
return (False, box_a[0] > box_b[0])
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2))
boxlist1 = boxlist1.convert("xyxy")
boxlist2 = boxlist2.convert("xyxy")
N = len(boxlist1)
M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
###########################################################################
### Torch Utils, creds to Max de Groot
###########################################################################
def bbox_intersections(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_intersections_np(box_a, box_b)
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def bbox_overlaps(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_overlaps_np(box_a, box_b)
inter = bbox_intersections(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2] - box_b[:, 0]) *
(box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
| 7,027 | 40.099415 | 135 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/tag_data_utilis.py | import numpy as np
import torch.nn as nn
from param import args
from lxrt.entry import LXRTEncoder
from lxrt.modeling import BertLayerNorm, GeLU
from lxrt.tokenization import BertTokenizer
import torch
import numpy as np
from collections import defaultdict
import numpy
import random
'''
Given that tags will be extensively used now, writing some snippets for creating tags.
'''
def pad_np_arrays(list_of_np_array, padding_value, dtype):
if isinstance(list_of_np_array[0], list):
list_of_np_array = [np.array(i, dtype = dtype) for i in list_of_np_array]
padding_lengths = get_padding_lengths(list_of_np_array)
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
final_list = []
for array_index, array in enumerate(list_of_np_array):
return_array = numpy.asarray(numpy.ones(max_shape, dtype = dtype) * padding_value)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(array.shape)
#if len(array.shape) < len(max_shape):
# slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = array
final_list.append(return_array)
final_list = np.stack(final_list, 0)
tensor = torch.from_numpy(final_list)
return tensor
def transfer_object_labels_to_symbolic_ids(obj_labels, attribute_labels, symbolic_vocab, obj_confs = None, attr_confs = None):
return_list = []
for index in range(len(obj_labels)):
prob = random.random()
if prob < args.get("insert_attr_ratio", 0.0):
if args.get("kl_divergence", False):
if args.get("non_top1_sampling", False):
p = attr_confs[index][attribute_labels[index]]
p = p / p.sum()
attr_label_i = np.random.choice(attribute_labels[index], p=p)
#attr_label_i = np.random.choice(attr_confs.shape[-1], p=attr_confs[index])
else:
attr_label_i = attribute_labels[index, 0]
else:
attr_label_i = attribute_labels[index]
return_list.append(symbolic_vocab.word2id[symbolic_vocab.attr_id2word(attr_label_i)])
else:
if args.get("kl_divergence", False):
if args.get("non_top1_sampling", False):
new_obj_confs = deepcopy(obj_confs)
new_obj_confs[new_obj_confs<0.1] = 0
p = new_obj_confs[index][obj_labels[index]]
sum_p = p.sum()
if sum_p == 0:
obj_label_i = obj_labels[index, 0]
else:
p = p / sum_p
obj_label_i =np.random.choice(obj_labels[index], p=p)
#obj_label_i = np.random.choice(obj_confs.shape[-1], p=obj_confs[index])
else:
obj_label_i = obj_labels[index, 0]
else:
obj_label_i = obj_labels[index]
return_list.append(symbolic_vocab.word2id[symbolic_vocab.obj_id2word(obj_label_i)])
return np.array(return_list, dtype=np.int64)
def convert_semantic_objective(labels, symbolic_vocab, obj = False, attr = False, tokenizer=None):
if obj:
words = [symbolic_vocab.obj_id2word(i) for i in labels]
elif attr:
words = [symbolic_vocab.attr_id2word(i) for i in labels]
else:
assert(0)
words = [symbolic_vocab.id2objective[symbolic_vocab.word2id[i]] for i in words]
semantic_objective = np.array(words, dtype=np.int64) # object_num * 2
return semantic_objective
def create_tags_pretrain(obj_labels, attr_labels, obj_confs, attr_confs, tokenizer, symbolic_vocab, visual_tags_box, feat_mask, use_bert_input = True):
obj_labels_transformed = transfer_object_labels_to_symbolic_ids(obj_labels, attr_labels, symbolic_vocab, obj_confs, attr_confs)
visual_tags_bert_words = []
visual_tags_box_bert_input = []
visual_tags_mlm_labels = []
visual_tags_segment_ids = []
for tag_index, tag in enumerate(obj_labels_transformed):
tag_word = symbolic_vocab.id2word[tag]
if args.get("use_segment_id_for_attr", False):
seg_id = symbolic_vocab.get_seg_id(tag)
sub_tokens = tokenizer.tokenize(tag_word)
prob = random.random()
if prob < args.get('tag_mask_ratio', 0.15) or (feat_mask[tag_index] != 0 and random.random() < args.get("tag_joint_mask_ratio", 0.5)):
new_prob = random.random()
if new_prob < 0.8:
for sub_token in sub_tokens:
visual_tags_bert_words.append("[MASK]")
elif new_prob < 0.9:
for sub_token in sub_tokens:
visual_tags_bert_words.append(random.choice(list(tokenizer.vocab.keys())))
else:
visual_tags_bert_words.extend(sub_tokens)
for sub_token in sub_tokens:
try:
visual_tags_mlm_labels.append(tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
visual_tags_mlm_labels.append(tokenizer.vocab["[UNK]"])
logging.warning("Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
visual_tags_bert_words.append(sub_token)
visual_tags_mlm_labels.append(-1)
# duplicate box
for sub_token in sub_tokens:
visual_tags_box_bert_input.append(visual_tags_box[tag_index])
if args.get("use_segment_id_for_attr", False):
visual_tags_segment_ids.append(seg_id)
visual_tags = tokenizer.convert_tokens_to_ids(visual_tags_bert_words)
visual_tags_objective = visual_tags_mlm_labels
visual_tags_mask = [1] * len(visual_tags)
visual_tags_box = visual_tags_box_bert_input
visual_tags_segment_ids = None
return visual_tags, visual_tags_objective, visual_tags_mask, visual_tags_box, visual_tags_segment_ids
def create_tags(obj_labels, attr_labels, obj_confs, attr_confs, tokenizer, symbolic_vocab, visual_tags_box, use_bert_input = True, record_index = None):
obj_labels_transformed = transfer_object_labels_to_symbolic_ids(obj_labels, attr_labels, symbolic_vocab, obj_confs, attr_confs)
visual_tags_bert_words = []
visual_tags_box_bert_input = []
#visual_tags_mlm_labels = []
visual_tags_segment_ids = []
recorded_indexes = []
counter = 0
for tag_index, tag in enumerate(obj_labels_transformed):
tag_word = symbolic_vocab.id2word[tag]
if args.get("use_segment_id_for_attr", False):
seg_id = symbolic_vocab.get_seg_id(tag)
sub_tokens = tokenizer.tokenize(tag_word)
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
visual_tags_bert_words.append(sub_token)
#visual_tags_mlm_labels.append(-1)
if tag_index == record_index:
recorded_indexes.append(counter)
counter += 1
# duplicate box
for sub_token in sub_tokens:
visual_tags_box_bert_input.append(visual_tags_box[tag_index])
if args.get("use_segment_id_for_attr", False):
visual_tags_segment_ids.append(seg_id)
visual_tags = tokenizer.convert_tokens_to_ids(visual_tags_bert_words)
visual_tags_mask = [1] * len(visual_tags)
visual_tags_box = visual_tags_box_bert_input
visual_tags_segment_ids = None
visual_tags_type = None
if record_index is not None:
return visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids, recorded_indexes
return visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids | 8,378 | 44.291892 | 152 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/text_data.py | import random
from torch.utils.data import Dataset
from lxrt.tokenization import BertTokenizer
import logging
from lxmert_data import InputExample
import json
from param import args
from lxmert_data import InputFeatures, random_word
import os
from src.tools import sharearray
import gc
from tqdm import tqdm
import numpy as np
class GeneralCorpusNP(Dataset):
def __init__(self, ann_file, pretrained_model_name, tokenizer=None, seq_len=64, min_seq_len=64,
encoding="utf-8", on_memory=True,
**kwargs):
assert on_memory, "only support on_memory mode!"
self.tokenizer = tokenizer if tokenizer is not None else BertTokenizer.from_pretrained(pretrained_model_name)
self.vocab = self.tokenizer.vocab
self.seq_len = seq_len
self.min_seq_len = min_seq_len
self.on_memory = on_memory
self.ann_file = ann_file
self.encoding = encoding
self.test_mode = False
self.do_no_fill = False
self.use_mismatch_objective = args.get("task_matched", False)
#self.load_corpus_with_passages()
# load samples into memory
if on_memory:
if self.use_mismatch_objective:
#self.corpus = self.load_corpus_with_passages_preprocess()
self.load_corpus_with_passages_preprocess()
else:
self.corpus = self.load_corpus()
if args.get("presegment_sentence", False):
self.presegment_sentence()
print("Using {} with {} data.\n\n".format(self.ann_file, len(self)))
def load_corpus(self):
corpus = []
print("\n\nLoading text only corpus...")
for ann_file in self.ann_file.split('+'):
with open(ann_file, 'r', encoding=self.encoding) as f:
all_text = f.read().lower()
corpus.extend([l.strip('\n').strip('\r').strip('\n') for l in all_text.split("\n")])
corpus = [l.strip() for l in corpus if l.strip() != '']
return corpus
def load_corpus_with_passages_preprocess(self):
corpus = []
print("\n\nLoading text only corpus...")
if os.path.exists(args.text_only_corpus_cache):
with open(args.text_only_corpus_cache, 'rb') as f:
corpus = np.load(f)
self.corpus = sharearray.cache(self.ann_file.split("/")[-1], corpus)
del corpus
gc.collect()
with open(args.text_only_corpus_cache.replace("npy", "json"), 'r') as f:
files = json.load(f)
[self.passage_split, self.sentence_split] = files
self.sentence_counter = [0] * len(self.sentence_split)
else:
new_text = []
passage_split = []
sentence_split = []
current_counter = 0
for ann_file in self.ann_file.split('+'):
with open(ann_file, 'r', encoding=self.encoding) as f:
all_text = f.read().lower()
one_passage_sentence_split = []
counter = 0
for line in tqdm(all_text.split("\n")):
line = line.strip('\n').strip('\r').strip('\n')
line = self.tokenizer.wordpiece_tokenizer.tokenize(line)
line_ids = self.tokenizer.convert_tokens_to_ids(line)
if len(line) != 0:
new_text.extend(line_ids)
counter += len(line_ids)
one_passage_sentence_split.append(counter)
else:
if counter != 0:
#all_text.extend(one_passage)
sentence_split.append(one_passage_sentence_split)
current_counter += counter
passage_split.append(current_counter)
one_passage = []
one_passage_sentence_split = []
counter = 0
#corpus.extend([l.strip('\n').strip('\r').strip('\n') for l in all_text.split("\n")])
#corpus = [l.strip() for l in corpus if l.strip() != '']
self.sentence_counter = [0] * len(passage_split) # we keep a record of when
self.corpus = np.array(new_text)
self.passage_split = passage_split
self.sentence_split = sentence_split
with open(args.text_only_corpus_cache, 'wb') as f:
np.save(f, self.corpus)
with open(args.text_only_corpus_cache.replace("npy", "json"), 'w') as f:
json.dump([self.passage_split, self.sentence_split], f)
assert(0)
#def save_sentence_counter(self):
#
def __len__(self):
if args.get("presegment_sentence", False) and "sbu-captions-all.json" not in self.ann_file:
return len(self.mapping)
return len(self.passage_split)
def retrieve_a_piece(self, index, seq_len):
if index == 0:
begin = 0
else:
begin = self.passage_split[index - 1]
end = self.passage_split[index]
text = self.corpus[begin:end]
sentence_split = self.sentence_split[index]
## Retrive part of
start_index = self.sentence_counter[index]
all_tokenized_words = []
all_mlm_labels = []
current_length = 0
final_index = -1
for i in range(start_index, len(sentence_split)):
if i == 0:
begin = 0
else:
begin = sentence_split[i - 1]
end = sentence_split[i]
tokens = self.tokenizer.convert_ids_to_tokens(text[begin:end])
tokens, mlm_labels = self.random_word_wwm(tokens)
if current_length == 0 or len(tokens) + current_length <= seq_len:
all_tokenized_words.extend(tokens)
all_mlm_labels.extend(mlm_labels)
current_length += len(tokens)
final_index = (i + 1) % len(sentence_split)
else:
final_index = (i + 1) % len(sentence_split)
break
self.sentence_counter[index] = final_index # Start from here next time retrieve a piece is called; Not sure how this will behave if we have multiple workers...
#print(index, self.sentence_counter[index])
all_tokenized_words = all_tokenized_words[:seq_len]
all_mlm_labels = all_mlm_labels[:seq_len]
return all_tokenized_words, all_mlm_labels
def exhaustively_retrieve_a_piece(self, index, seq_len):
all_ranges = []
if index == 0:
begin = 0
else:
begin = self.passage_split[index - 1]
end = self.passage_split[index]
text = self.corpus[begin:end]
sentence_split = self.sentence_split[index]
## Retrive part of
start_index = 0 #self.sentence_counter[index]
while True:
all_tokenized_words = []
all_mlm_labels = []
current_length = 0
final_index = -1
sent_begin = 0
sent_end = 0
for i in range(start_index, len(sentence_split)):
if i == 0:
sent_begin = 0
else:
sent_begin = sentence_split[i - 1]
tmp_sent_end = sentence_split[i]
if current_length == 0 or (tmp_sent_end - sent_begin) + current_length <= seq_len:
current_length += tmp_sent_end - sent_begin
sent_end = tmp_sent_end
final_index = (i + 1) % len(sentence_split)
else:
final_index = (i + 1) % len(sentence_split)
break
if start_index == 0:
sent_begin = 0
else:
sent_begin = sentence_split[start_index - 1]
start_index = final_index
all_ranges.append((begin + sent_begin, begin + sent_end))
if start_index == 0:
break
return all_ranges
def presegment_sentence(self):
all_segments = []
self.mapping = {}
current_len = 0
for i in tqdm(range(len(self.passage_split))):
tmp = self.exhaustively_retrieve_a_piece(i, self.seq_len // 2)
for j in range(len(tmp)):
self.mapping[current_len + j] = current_len + (j + 1)%len(tmp)
current_len += len(tmp)
all_segments.extend(tmp)
self.all_segments = all_segments
def retrieve_a_piece_preseged(self, index, seq_len):
seg = self.all_segments[index]
tokens = self.tokenizer.convert_ids_to_tokens(self.corpus[seg[0]:seg[1]])
tokens, mlm_labels = self.random_word_wwm(tokens)
tokens = tokens[:seq_len]
mlm_labels = mlm_labels[:seq_len]
return tokens, mlm_labels
def __getitem__(self, item):
if self.use_mismatch_objective:
i = 0
max_seq_length = self.seq_len // 2 # We have two parts
if args.get("presegment_sentence", False) and "sbu-captions-all.json" not in self.ann_file:
text_a_tokens, text_a_labels = self.retrieve_a_piece_preseged(item, seq_len = max_seq_length)
# First we take out some sentences
if random.random() < 0.5:
# Take out our own
b_index = self.mapping[item]
text_b_tokens, text_b_labels = self.retrieve_a_piece_preseged(b_index, seq_len=max_seq_length)
match = 1
else:
random_index = random.randint(0, len(self) - 1)
while random_index == item:
random_index = random.randint(0, len(self) - 1)
text_b_tokens, text_b_labels = self.retrieve_a_piece_preseged(random_index, seq_len=max_seq_length)
match = 0
else:
text_a_tokens, text_a_labels = self.retrieve_a_piece(item, seq_len = max_seq_length)
# First we take out some sentences
if random.random() < 0.5:
# Take out our own
text_b_tokens, text_b_labels = self.retrieve_a_piece(item, seq_len=max_seq_length)
match = 1
else:
random_index = random.randint(0, len(self) - 1)
while random_index == item:
random_index = random.randint(0, len(self) - 1)
text_b_tokens, text_b_labels = self.retrieve_a_piece(random_index, seq_len=max_seq_length)
match = 0
text_a_ids = self.tokenizer.convert_tokens_to_ids(text_a_tokens)
text_b_ids = self.tokenizer.convert_tokens_to_ids(text_b_tokens)
example = InputExample(
None, (text_a_tokens, text_b_tokens), (None, None),
(None, None), (None, None),
match, 1,
mlm_labels=(text_a_labels, text_b_labels),
token_ids=(text_a_ids, text_b_ids),
max_seq_len = self.seq_len + 3
)
if args.get("faster_loading", False):
return self.convert_example_to_features(example, self.seq_len + 3, self.tokenizer)
raw = self.corpus[item]
# tokenize
tokens = self.tokenizer.basic_tokenizer.tokenize(raw.lower())
if not self.do_no_fill:
# add more tokens if len(tokens) < min_len
_cur = (item + 1) % len(self.corpus)
while len(tokens) < self.min_seq_len:
_cur_tokens = self.tokenizer.basic_tokenizer.tokenize(self.corpus[_cur])
tokens.extend(_cur_tokens)
_cur = (_cur + 1) % len(self.corpus)
# masked language modeling
tokens, mlm_labels = self.random_word_wwm(tokens)
# convert token to its vocab id
ids = self.tokenizer.convert_tokens_to_ids(tokens)
# truncate
if len(ids) > self.seq_len:
ids = ids[:self.seq_len]
mlm_labels = mlm_labels[:self.seq_len]
example = InputExample(
None, tokens, (None, None),
(None, None), (None, None),
None, 1,
mlm_labels=mlm_labels,
token_ids=ids,
max_seq_len = self.seq_len
)
if args.get("faster_loading", False):
return self.convert_example_to_features(example, args.get("max_seq_length", 20), self.tokenizer)
return example
def convert_example_to_features(self, example: InputExample, max_seq_length, tokenizer, hybrid_num=10):
if isinstance(example.mlm_labels, tuple):
text_a_ids, text_b_ids = example.token_ids
text_a_labels, text_b_labels = example.mlm_labels
input_ids = tokenizer.convert_tokens_to_ids(["[CLS]"]) + text_a_ids + tokenizer.convert_tokens_to_ids(["[SEP]"]) + text_b_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
lm_label_ids = [-1] + text_a_labels + [-1] + text_b_labels + [-1]
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(None, None),
obj_labels={
'obj': (None, None),
'attr': (None, None),
'feat': (None, None),
},
is_matched=example.is_matched,
ans=-1,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None
)
return features
if example.mlm_labels is not None: # The data is already pre-masked
input_ids = example.token_ids
lm_label_ids = example.mlm_labels
max_seq_len = example.max_seq_len + 2
# Add [CLS] and [SEP]
input_ids = tokenizer.convert_tokens_to_ids(["[CLS]"]) + input_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
lm_label_ids = [-1] + lm_label_ids + [-1]
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(None, None),
obj_labels={
'obj': (None, None),
'attr': (None, None),
'feat': (None, None),
},
is_matched=1,
ans=-1,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None
)
return features
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
elif prob < 0.9:
for sub_token in sub_tokens:
output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# -> rest 10% randomly keep current token
else:
for sub_token in sub_tokens:
output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning("Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
## if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def string_to_sequence(s: str, dtype=np.int32) -> np.ndarray:
return np.array([ord(c) for c in s], dtype=dtype)
def sequence_to_string(seq: np.ndarray) -> str:
return ''.join([chr(c) for c in seq]) | 18,260 | 38.270968 | 182 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/qa_answer_table.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import json
import torch
class AnswerTable:
ANS_CONVERT = {
"a man": "man",
"the man": "man",
"a woman": "woman",
"the woman": "woman",
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10',
'grey': 'gray',
}
def __init__(self, dsets=None):
try:
self.all_ans = json.load(open("data/lxmert/all_ans.json"))
except:
self.all_ans = json.load(open("/local/harold/ubert/lxmert/data/lxmert/all_ans.json"))
if dsets is not None:
dsets = set(dsets)
# If the answer is used in the dsets
self.anss = [ans['ans'] for ans in self.all_ans if
len(set(ans['dsets']) & dsets) > 0]
else:
self.anss = [ans['ans'] for ans in self.all_ans]
self.ans_set = set(self.anss)
self._id2ans_map = self.anss
self._ans2id_map = {ans: ans_id for ans_id, ans in enumerate(self.anss)}
assert len(self._id2ans_map) == len(self._ans2id_map)
for ans_id, ans in enumerate(self._id2ans_map):
assert self._ans2id_map[ans] == ans_id
def convert_ans(self, ans):
if len(ans) == 0:
return ""
ans = ans.lower()
if ans[-1] == '.':
ans = ans[:-1].strip()
if ans.startswith("a "):
ans = ans[2:].strip()
if ans.startswith("an "):
ans = ans[3:].strip()
if ans.startswith("the "):
ans = ans[4:].strip()
if ans in self.ANS_CONVERT:
ans = self.ANS_CONVERT[ans]
return ans
def ans2id(self, ans):
return self._ans2id_map[ans]
def id2ans(self, ans_id):
return self._id2ans_map[ans_id]
def ans2id_map(self):
return self._ans2id_map.copy()
def id2ans_map(self):
return self._id2ans_map.copy()
def used(self, ans):
return ans in self.ans_set
def all_answers(self):
return self.anss.copy()
@property
def num_answers(self):
return len(self.anss)
def load_lxmert_qa(path, model, label2ans):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Isolate answer head
answer_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith("answer_head."):
answer_state_dict[key.replace('answer_head.', '')] = value
# Do surgery on answer state dict
ans_weight = answer_state_dict['logit_fc.3.weight']
ans_bias = answer_state_dict['logit_fc.3.bias']
import copy
new_answer_weight = copy.deepcopy(model_state_dict['logit_fc.3.weight'])
new_answer_bias = copy.deepcopy(model_state_dict['logit_fc.3.bias'])
answer_table = AnswerTable()
loaded = 0
unload = 0
if type(label2ans) is list:
label2ans = {label: ans for label, ans in enumerate(label2ans)}
for label, ans in label2ans.items():
new_ans = answer_table.convert_ans(ans)
if answer_table.used(new_ans):
ans_id_9500 = answer_table.ans2id(new_ans)
new_answer_weight[label] = ans_weight[ans_id_9500]
new_answer_bias[label] = ans_bias[ans_id_9500]
loaded += 1
else:
new_answer_weight[label] = 0.
new_answer_bias[label] = 0.
unload += 1
print("Loaded %d answers from LXRTQA pre-training and %d not" % (loaded, unload))
print()
answer_state_dict['logit_fc.3.weight'] = new_answer_weight
answer_state_dict['logit_fc.3.bias'] = new_answer_bias
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
assert len(bert_model_keys - bert_loaded_keys) == 0
model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)
def load_lxmert_from_pretrain_noqa(path, model):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
#bert_state_dict = {}
#for key, value in loaded_state_dict.items():
# if key.startswith('bert.'):
# bert_state_dict[key] = value
# Load Bert Weights
load_state_dict_flexible(model.lxrt_encoder.model, loaded_state_dict) #model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
if model.lxrt_encoder.load_pretrain_head:
print("\nLoad pre-trained head\n")
head_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('cls.'):
head_state_dict[key.replace("cls.", "")] = value
load_state_dict_flexible(model.lxrt_encoder.pretrained_head, head_state_dict)
'''# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)'''
def load_lxmert_for_vcr_finetune_from_vcr_pretrain(path, model):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("model.module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
load_state_dict_flexible(model.lxrt_encoder.model, bert_state_dict) #model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
'''# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)'''
def load_lxmert_from_pretrain_vcr_pretrain(path, model):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
load_state_dict_flexible(model.model, loaded_state_dict) #model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
'''# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)'''
def load_lxmert_from_sgg_and_lxmert_pretrain(path, model, label2ans):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load LXMERT pre-trained for sgg and lxmert pre-training from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
#for key in list(loaded_state_dict.keys()):
# loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
loaded_state_dict = new_loaded_state_dict
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Isolate answer head
answer_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith("answer_head."):
answer_state_dict[key.replace('answer_head.', '')] = value
# Do surgery on answer state dict
ans_weight = answer_state_dict['logit_fc.3.weight']
ans_bias = answer_state_dict['logit_fc.3.bias']
import copy
new_answer_weight = copy.deepcopy(model_state_dict['logit_fc.3.weight'])
new_answer_bias = copy.deepcopy(model_state_dict['logit_fc.3.bias'])
answer_table = AnswerTable()
loaded = 0
unload = 0
if type(label2ans) is list:
label2ans = {label: ans for label, ans in enumerate(label2ans)}
for label, ans in label2ans.items():
new_ans = answer_table.convert_ans(ans)
if answer_table.used(new_ans):
ans_id_9500 = answer_table.ans2id(new_ans)
new_answer_weight[label] = ans_weight[ans_id_9500]
new_answer_bias[label] = ans_bias[ans_id_9500]
loaded += 1
else:
new_answer_weight[label] = 0.
new_answer_bias[label] = 0.
unload += 1
print("Loaded %d answers from LXRTQA pre-training and %d not" % (loaded, unload))
print()
answer_state_dict['logit_fc.3.weight'] = new_answer_weight
answer_state_dict['logit_fc.3.bias'] = new_answer_bias
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
#print(len(bert_model_keys - bert_loaded_keys))
assert len(bert_model_keys - bert_loaded_keys) == 0
model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
#load_state_dict_flexible(model.lxrt_encoder.model, bert_state_dict)
# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print("Full loading failed!! Try partial loading!!")
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped: " + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name) | 13,691 | 34.842932 | 147 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/lxmert_data.py | # coding=utf-8
# Copyleft 2019 project LXRT.
from collections import defaultdict
import json
import random
import numpy as np
from torch.utils.data import Dataset
import torch
from param import args
from src.pretrain.qa_answer_table import AnswerTable
from src.utils import load_obj_tsv
from copy import deepcopy
import h5py
from lxrt.h5_data import ImageFeatureDataset
from lxrt.tokenization import BertTokenizer
from src.pretrain import tag_data_utilis
from tqdm import tqdm
from src.tools import sharearray
import os
TINY_IMG_NUM = 500
FAST_IMG_NUM = 5000
Split2ImgFeatPath = {
'mscoco_train': '/local/harold/ubert/lxmert/data/mscoco_imgfeat/train2014_obj36.tsv',
'mscoco_minival': '/local/harold/ubert/lxmert/data/mscoco_imgfeat/val2014_obj36.tsv',
'mscoco_nominival': '/local/harold/ubert/lxmert/data/mscoco_imgfeat/val2014_obj36.tsv',
'vgnococo': '/local/harold/ubert/lxmert/data/vg_gqa_imgfeat/vg_gqa_obj36.tsv',
}
Split2ImgFeatPath_h5 = {
'mscoco_train': 'data/mscoco_imgfeat/train2014_obj36.h5',
'mscoco_minival': 'data/mscoco_imgfeat/val2014_obj36.h5',
'mscoco_nominival': 'data/mscoco_imgfeat/val2014_obj36.h5',
'vgnococo': 'data/vg_gqa_imgfeat/vg_gqa_obj36.h5',
"nlvr_for_pretrain_train": "data/nlvr2_imgfeat/train_obj36.h5",
"nlvr_for_pretrain_valid": "data/nlvr2_imgfeat/valid_obj36.h5",
"flickr_train": 'data/flickr30k/fixed36_no_features_split_0_of_1_splits.h5'
}
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, uid, sent, visual_feats=None,
obj_labels=None, attr_labels=None,
is_matched=None, label=None, sent_b=None,
use_visual_tag_flag=False,
mlm_labels=None,token_ids=None,max_seq_len=96):
self.uid = uid
self.sent = sent
self.visual_feats = visual_feats
self.obj_labels = obj_labels
self.attr_labels = attr_labels
self.is_matched = is_matched # whether the visual and obj matched
self.label = label
self.sent_b = sent_b
self.use_visual_tag_flag = use_visual_tag_flag
# The following attributes are used for the bookcorpus/wikipedia pre-training
self.mlm_labels = mlm_labels
self.token_ids = token_ids
self.max_seq_len = max_seq_len
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids, input_mask, segment_ids, lm_label_ids,
visual_feats, obj_labels,
is_matched, ans,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None,
obj_labels_transformed_mismatch=None,
visual_tags_box_mismatch=None,
use_visual_tag_flag=False,
visual_tags_segment_ids=None,
visual_feats_seg_ids=None
):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
self.visual_feats = visual_feats
self.obj_labels = obj_labels
self.is_matched = is_matched
self.ans = ans
self.visual_tags = visual_tags
self.visual_tags_objective = visual_tags_objective
self.visual_tags_mask = visual_tags_mask
self.visual_tags_box = visual_tags_box
self.visual_tags_mismatch = visual_tags_mismatch
self.obj_labels_transformed_mismatch = obj_labels_transformed_mismatch
self.visual_tags_box_mismatch = visual_tags_box_mismatch
self.use_visual_tag_flag = use_visual_tag_flag
self.visual_tags_segment_ids = visual_tags_segment_ids
self.visual_feats_seg_ids = visual_feats_seg_ids
class LXMERTDataset:
def __init__(self, splits: str, qa_sets=None):
"""
:param splits: The data sources to be loaded
:param qa_sets: if None, no action
o.w., only takes the answers appearing in these dsets
and remove all unlabeled data (MSCOCO captions)
"""
self.name = splits
self.sources = splits.split(',')
# Loading datasets to data
self.data = []
for source in self.sources:
try:
self.data.extend(json.load(open("data/lxmert/%s.json" % source)))
except:
self.data.extend(json.load(open("/local/harold/ubert/lxmert/data/lxmert/%s.json" % source))) # hacky
print("Load %d data from %s" % (len(self.data), self.name))
# Create answer table according to the qa_sets
self.answer_table = AnswerTable(qa_sets)
print("Load an answer table of size %d." % (len(self.answer_table.ans2id_map())))
# Modify the answers
for datum in self.data:
labelf = datum['labelf']
for cat, labels in labelf.items():
for label in labels:
for ans in list(label.keys()):
new_ans = self.answer_table.convert_ans(ans)
if self.answer_table.used(new_ans):
if ans != new_ans:
label[new_ans] = label.pop(ans)
else:
label.pop(ans)
def __len__(self):
return len(self.data)
def make_uid(img_id, dset, sent_idx):
return "%s_%s_%03d" % (img_id, dset, sent_idx),
def load_vocabs():
attributes = []
with open(args.attributes_vocab) as f:
for line in f:
attr = line.strip("\n")
if len(attr) != 0:
attributes.append(attr)
assert (len(attributes) == 400)
objects = []
with open(args.objects_vocab) as f:
for line in f:
attr = line.strip("\n")
if len(attr) != 0:
objects.append(attr)
assert (len(objects) == 1600)
return objects, attributes
def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with probability
ratio = args.word_mask_rate
if prob < ratio:
prob /= ratio
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
"""
Example in obj tsv:
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
"""
mapping_rawdataset_name_to_json = {
"mscoco_train,mscoco_nominival,vgnococo": "train",
"mscoco_minival": "val"
}
from lxrt.symbolic_vocabulary import SymbolicVocab
global symbolic_vocab
symbolic_vocab = SymbolicVocab(args.objects_vocab, args.attributes_vocab)
class LXMERTTorchDataset(Dataset):
def __init__(self, dataset: LXMERTDataset, topk=-1, sgg_dataset = None, image_only = False, text_only = False, use_visual_tag_flag = False, limit_source = [], available_split_for_cc = None):
super().__init__()
self.raw_dataset = dataset
self.name = '_'.join(self.raw_dataset.sources)
if args.get('disable_mismatch_for_other_dataset', False):
# Do not resample for datasets such as BookCorpus
self.task_matched = args.task_matched if "book_corpus" in self.raw_dataset.sources else False
else:
self.task_matched = args.task_matched
print(self.raw_dataset.sources)
print(self.task_matched)
print("\n\n\n")
self.sgg_dataset = sgg_dataset
self.image_only = image_only
self.text_only = text_only
self.use_visual_tag_flag = use_visual_tag_flag
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
self.task_nlvr2 = args.get("task_nlvr2", False)
if args.tiny:
topk = TINY_IMG_NUM
elif args.fast:
topk = FAST_IMG_NUM
#self.fake_data = args.get("fake_data", False)
self.custom_coco_data = args.get("custom_coco_data", False)
self.use_h5_file = args.get("use_h5_file", False)
if self.use_h5_file:
if "google_cc_train" in dataset.sources:
if args.get('change_split', False):
available_split_for_cc = [39]
else:
available_split_for_cc = args.get("available_split_for_cc", [0])
sources = []
split_map = {}
for i in available_split_for_cc:
sources.append("google_cc_{}".format(i))
split_map["google_cc_{}".format(i)] = "data/google_concetual/butd_feat/train_no_features_split_{}_of_40_splits.h5".format(i)
self.image_feature_dataset = ImageFeatureDataset.create(sources, split_map, load_custom_h5_version2=True, text_only = self.text_only, on_memory = False)
elif "open_images_train" in dataset.sources:
available_split_for_open_image = args.get("available_split_for_open_image", [0])
sources = []
split_map = {}
for split_i, split_j, total_split in available_split_for_open_image:
sources.append("open_image_{}_{}".format(split_i, split_j))
split_map["open_image_{}_{}".format(split_i, split_j)] = "data/open_image/butd_feat/train_{}_no_features_split_{}_of_{}_splits.h5".format(split_i, split_j, total_split)
self.image_feature_dataset = ImageFeatureDataset.create(sources, split_map, load_custom_h5_version2=True, on_memory = False)
else:
self.image_feature_dataset = ImageFeatureDataset.create(dataset.sources, Split2ImgFeatPath_h5, text_only = self.text_only, load_custom_h5_version2 = True if "flickr_train" in dataset.sources else False, on_memory = args.get("on_memory", False))
self.ids_to_index = self.image_feature_dataset.ids_to_index
# Screen data
used_data = []
for datum in self.raw_dataset.data:
if datum['img_id'] in self.ids_to_index:
used_data.append(datum)
else:
# Original LXMERT. Load the dataset
img_data = []
for source in self.raw_dataset.sources:
img_data.extend(load_obj_tsv(Split2ImgFeatPath[source], topk))
self.imgid2img = {}
for img_datum in img_data:
self.imgid2img[img_datum['img_id']] = img_datum
# Filter out the dataset
used_data = []
for datum in self.raw_dataset.data:
if datum['img_id'] in self.imgid2img:
used_data.append(datum)
used_data = used_data[::args.get("partial_dataset", 1)]
if sgg_dataset is not None:
used_data = [datum for datum in used_data if str(datum["img_id"]) in self.sgg_dataset.imageids_to_index]
# Flatten the dataset (into one sent + one image entries)
self.data = []
record_img_id = set()
remaining_set = set()
for datum in used_data:
# datum: {'img_id': 'COCO_train2014_000000318556', 'labelf': {'vqa': [{'no': 1}, {'yes': 1}, {'no': 1}, {'blue': 1, 'blue and white': 0.3}]}, 'sentf': {'mscoco': ['A very clean and well decorated empty bathroom', 'A blue and white bathroom with butterfly themed wall tiles.', 'A bathroom with a border of butterflies and blue paint on the walls above it.', 'An angled view of a beautifully decorated bathroom.', 'A clock that blends in with the wall hangs in a bathroom. '], 'vqa': ['Is the sink full of water?', 'Are there any butterflies on the tiles?', 'Is this bathroom in a hotel?', 'What color are the walls?']}}
sentf = datum['sentf']
for sents_cat, sents in sentf.items():
if sents_cat in limit_source:
continue
remaining_set.add(sents_cat)
if sents_cat in datum['labelf']:
labels = datum['labelf'][sents_cat]
else:
labels = None
for sent_idx, sent in enumerate(sents):
new_datum = {
'uid': make_uid(datum['img_id'], sents_cat, sent_idx) if args.task_qa else None,
'img_id': datum['img_id'], # if not self.text_only else "",
'sent': sent #if not self.image_only else ""
}
if image_only: # If we only use image, make sure one image only appears one time
if datum["img_id"] in record_img_id:
continue
record_img_id.add(datum["img_id"])
if labels is not None and args.task_qa:
new_datum['label'] = labels[sent_idx]
if self.task_nlvr2:
new_datum['match_label'] = datum["label"]
new_datum['img_id_1'] = datum["img_id_1"]
self.data.append(new_datum)
if image_only:
dataset_str = "image_only"
elif text_only:
dataset_str = "text_only"
else:
dataset_str = "vision and language"
if self.image_only and args.get("screen_image", False):
counter = 0
from tqdm import tqdm
_data = []
for data_item in tqdm(self.data):
img_id = data_item["img_id"]
image_index = self.image_feature_dataset.ids_to_index[img_id]
img_h = self.image_feature_dataset.h5_wh[image_index][1]
img_w = self.image_feature_dataset.h5_wh[image_index][0]
if img_h == 0 or img_w == 0:
counter += 1
else:
_data.append(data_item)
print("Screened {} images with zero heights and weidths, {} in total".format(counter, len(_data)))
self.data = _data
print("Use {} data in {} torch dataset, {}, limit_source {}".format(len(self.data), dataset_str, remaining_set, limit_source))
if text_only:
del self.image_feature_dataset
if text_only or image_only:
del self.raw_dataset.data
del self.raw_dataset
self.compress_memory = False
if args.get("compress_memory", False):
# Move some data to shared memory so the memory will not explode when using multi-process for data loading
self.compress()
print("\n\n\n")
def compress(self):
print("image_only", self.image_only)
print("text_only", self.text_only)
self._img_ids_shared_array, self._img_ids_record_position = self.compress_list_of_strings([i["img_id"] for i in self.data], "data_imonly_img_id_{}".format(self.name))
self.compress_memory = True
self._sent_shared_array, self._sent_record_position = self.compress_list_of_strings([i["sent"] for i in self.data], "data_txtonly_sent_{}".format(self.name))
self.compress_memory = True
def compress_list_of_strings(self, list_of_string, name):
record_position = []
all_text = []
current_length = 0
for index, string in enumerate(list_of_string):
array = [ord(c) for c in string]
all_text.extend(array)
current_length += len(array)
record_position.append(current_length)
shared_array = sharearray.cache(name, lambda: np.array(all_text, dtype=np.int32))
del all_text
return shared_array, record_position
def decompress_string_index(self, index, shared_array, record_position):
string_array = shared_array[0 if index == 0 else record_position[index - 1]:record_position[index]]
return ''.join([chr(c) for c in string_array])
def decompress_getitem__(self, index):
if self._sent_shared_array is not None:
sent = self.decompress_string_index(index, self._sent_shared_array, self._sent_record_position)
else:
sent = ""
if self._img_ids_shared_array is not None:
img_id = self.decompress_string_index(index, self._img_ids_shared_array, self._img_ids_record_position)
else:
img_id = None
return {"sent": sent, "img_id": img_id, "uid": None}
def __len__(self):
return len(self.data)
def random_feat(self):
"""Get a random obj feat from the dataset."""
if self.compress_memory:
datum = self.decompress_getitem__(random.randint(0, len(self.data) - 1))
else:
datum = self.data[random.randint(0, len(self.data) - 1)]
img_id = datum['img_id']
if self.use_h5_file:
image_index = self.ids_to_index[img_id]
feat = self.image_feature_dataset.h5_features[image_index]
feat = feat[random.randint(0, 35)]
else:
img_info = self.imgid2img[img_id]
feat = img_info['features']
feat = feat[random.randint(0, 35)]
return feat
def random_tags(self):
"""Get a random obj feat from the dataset."""
datum = self.data[random.randint(0, len(self.data)-1)]
img_id = datum['img_id']
image_index, obj_num, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs = self.image_feature_dataset.get_everything_except_features(img_id)
boxes = boxes.copy()
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
return image_index, obj_num, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs
def __getitem__(self, item: int):
if self.compress_memory:
datum = self.decompress_getitem__(item)
else:
datum = self.data[item]
uid = datum['uid']
img_id = datum['img_id']
sent=datum['sent'].lower()
if not self.text_only:
# Get image info
if self.use_h5_file:
image_index, obj_num, feats, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs = self.image_feature_dataset[img_id]
else:
img_info = self.imgid2img[img_id]
obj_num = img_info['num_boxes']
feats = img_info['features'].copy()
boxes = img_info['boxes'].copy()
obj_labels = img_info['objects_id'].copy()
obj_confs = img_info['objects_conf'].copy()
attr_labels = img_info['attrs_id'].copy()
attr_confs = img_info['attrs_conf'].copy()
assert obj_num == len(boxes) == len(feats)
# Normalize the boxes (to 0 ~ 1)
img_h, img_w = img_info['img_h'], img_info['img_w']
#print(item, img_info, img_h, img_w)
boxes = boxes.copy()
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
# If calculating the matched loss, replace the sentence with an sentence
# corresponding to other image.
is_matched=None
if args.get('task_nlvr2', False):
match_label = datum["match_label"]
is_matched = match_label
second_image_index, second_obj_num, second_feats, second_boxes, second_img_h, second_img_w, second_obj_labels, second_obj_confs, second_attr_labels, second_attr_confs = self.image_feature_dataset[datum["img_id_1"]]
second_boxes = second_boxes.copy()
second_boxes[:, (0, 2)] /= second_img_w
second_boxes[:, (1, 3)] /= second_img_h
np.testing.assert_array_less(second_boxes, 1+1e-5)
np.testing.assert_array_less(-second_boxes, 0 + 1e-5)
feats=np.concatenate((feats, second_feats))
boxes=np.concatenate((boxes, second_boxes))
obj_labels=np.concatenate((obj_labels, second_obj_labels))
obj_confs=np.concatenate((obj_confs, second_obj_confs))
#obj_confs=np.concatenate((obj_confs, second_obj_confs))
attr_labels = np.concatenate((attr_labels, second_attr_labels))
attr_confs = np.concatenate((attr_confs, second_attr_confs))
elif self.task_matched :
if random.random() < 0.5:
is_matched = 0
if self.compress_memory:
other_datum = self.decompress_getitem__(random.randint(0, len(self.data) - 1))
else:
other_datum = self.data[random.randint(0, len(self.data)-1)]
while other_datum['img_id'] == img_id:
if self.compress_memory:
other_datum = self.decompress_getitem__(random.randint(0, len(self.data) - 1))
else:
other_datum = self.data[random.randint(0, len(self.data)-1)]
sent = other_datum['sent']
else:
is_matched = 1
# Label, convert answer to id
if 'label' in datum and args.task_qa:
label = datum['label'].copy()
for ans in list(label.keys()):
label[self.raw_dataset.answer_table.ans2id(ans)] = label.pop(ans)
else:
label = None
if self.image_only:
sent = None
if self.text_only:
feats = None
boxes = None
obj_labels = None
obj_confs = None
attr_labels = None
attr_confs = None
# Create target
example = InputExample(
uid, sent, (feats, boxes),
(obj_labels, obj_confs), (attr_labels, attr_confs),
is_matched, label,
use_visual_tag_flag=self.use_visual_tag_flag
)
#if args.get("faster_loading", False):
return self.convert_example_to_features(example, args.get("max_seq_length", 20), self.tokenizer)
def random_mask_features(self, feats, boxes = None):
mask_feats = deepcopy(feats) #.copy()
feat_mask = np.zeros(len(feats), dtype=np.float32)
for i in range(len(feats)):
prob = random.random()
# mask token with probability
if prob < args.obj_mask_rate:
feat_mask[i] = 1.
prob /= args.obj_mask_rate
# 80% randomly change token to zero feat
if prob < 0.8:
mask_feats[i, :] = 0.
# 10% randomly change token to random feat
elif prob < 0.9:
if not args.get("disable_random_feat", False) and not args.get("inbatch_random", False):
mask_feats[i,:] = self.random_feat()
if args.get("inbatch_random", False):
feat_mask[i] = 2.0 # special mark
# -> rest 10% randomly keep current feat
# Need to predict this feat
return mask_feats, feat_mask
def convert_example_to_features(self, example: InputExample, max_seq_length, tokenizer):
if example.mlm_labels is not None: # The data is already pre-masked
input_ids = example.token_ids
lm_label_ids = example.mlm_labels
max_seq_len = example.max_seq_len + 2
# Add [CLS] and [SEP]
input_ids = tokenizer.convert_tokens_to_ids(["[CLS]"]) + input_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
lm_label_ids = [-1] + lm_label_ids + [-1]
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(None, None),
obj_labels={
'obj': (None, None),
'attr': (None, None),
'feat': (None, None),
},
is_matched=None,
ans=-1,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None
)
return features
if example.sent is not None:
tokens = tokenizer.tokenize(example.sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens) > max_seq_length - 2:
tokens = tokens[:(max_seq_length - 2)]
# Ge random words
masked_tokens, masked_label = random_word(tokens, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
masked_tokens = ['[CLS]'] + masked_tokens + ['[SEP]']
input_ids = tokenizer.convert_tokens_to_ids(masked_tokens)
# Mask & Segment Word
lm_label_ids = ([-1] + masked_label + [-1])
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
elif args.get("insert_cls", False):
masked_tokens = ["[CLS]"]
input_ids = tokenizer.convert_tokens_to_ids(masked_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
lm_label_ids = [-1]
else:
input_ids = None
input_mask = None
segment_ids = None
lm_label_ids = None
if example.use_visual_tag_flag and example.visual_feats[0] is not None: # Let's do a hybrid embedding
feat, boxes = example.visual_feats
obj_labels, obj_confs = example.obj_labels
attr_labels, attr_confs = example.attr_labels
# Mask Image Features:
masked_feat, feat_mask = self.random_mask_features(feat, boxes=boxes)
assert(args.non_exclusive_tags)
assert(args.use_bert_input_for_tags)
visual_tags, visual_tags_objective, visual_tags_mask, visual_tags_box, visual_tags_segment_ids = tag_data_utilis.create_tags_pretrain(
obj_labels=obj_labels,
attr_labels=attr_labels,
obj_confs=obj_confs,
attr_confs=attr_confs,
tokenizer=self.tokenizer,
symbolic_vocab=symbolic_vocab,
visual_tags_box = boxes,
feat_mask = feat_mask,
use_bert_input=True
)
elif example.visual_feats[0] is not None:
feat, boxes = example.visual_feats
obj_labels, obj_confs = example.obj_labels
attr_labels, attr_confs = example.attr_labels
# Mask Image Features:
masked_feat, feat_mask = self.random_mask_features(feat, boxes=boxes)
visual_tags = None
visual_tags_objective = None
visual_tags_mask = None
visual_tags_box = None
visual_mismatch_label = None
obj_labels_transformed_mismatch = None
visual_tags_box_mismatch = None
else:
masked_feat = None
boxes = None
obj_labels = None
obj_confs = None
attr_labels = None
attr_confs = None
feat_mask = None
feat = None
visual_tags = None
visual_tags_objective = None
visual_tags_mask = None
visual_tags_box = None
visual_mismatch_label = None
obj_labels_transformed_mismatch = None
visual_tags_box_mismatch = None
# QA answer label
if example.label is None or len(example.label) == 0 or example.is_matched != 1:
# 1. No label 2. Label is pruned 3. unmatched visual + language pair
ans = -1
else:
keys, values = zip(*example.label.items())
if len(keys) == 1:
ans = keys[0]
else:
value_sum = sum(values)
prob = [value / value_sum for value in values]
choice = np.random.multinomial(1, prob).argmax()
ans = keys[choice]
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(masked_feat, boxes),
obj_labels={
'obj': (obj_labels, obj_confs),
'attr': (attr_labels, attr_confs),
'feat': (feat, feat_mask),
},
is_matched=example.is_matched,
ans=ans,
visual_tags = visual_tags,
visual_tags_objective = visual_tags_objective,
visual_tags_mask = visual_tags_mask,
visual_tags_box=visual_tags_box,
visual_tags_mismatch=None if not args.get('use_tag_mismatch', None) else visual_mismatch_label,
obj_labels_transformed_mismatch=None if not args.get("use_tag_mismatch", None) else obj_labels_transformed_mismatch,
visual_tags_box_mismatch=None if not args.get('use_tag_mismatch', None) else visual_tags_box_mismatch,
use_visual_tag_flag=example.use_visual_tag_flag )
return features
def create_in_batch_random_feat(self, example, example_index, all_examples):
if args.get("inbatch_random", False) and example.visual_feats[0] is not None:
feats, _ = example.visual_feats
feat_mask = example.obj_labels["feat"][1]
#original_feats = example.obj_labels["feat"][0]
for i in range(len(feat_mask)):
if feat_mask[i] == 2:
feat_mask[i] = 1
select_index = random.randint(0, len(all_examples) - 1)
while select_index == example_index:
select_index = random.randint(0, len(all_examples) - 1)
select_index_j = random.randint(0, len(feat_mask) - 1)
while select_index_j == i:
select_index_j = random.randint(0, len(feat_mask) - 1)
feats[i] = all_examples[select_index].obj_labels["feat"][0][select_index_j]
return example
def custom_collact_fn(self, examples):
hybrid_num = random.randint(args.get("hybrid_min", 2), args.get("hybrid_max", 34))
train_features = [self.create_in_batch_random_feat(example, example_index, all_examples = examples) for example_index, example in enumerate(examples)]
if train_features[0].input_ids is not None:
# language Inputs
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
# Language Prediction
lm_labels = torch.tensor([f.lm_label_ids for f in train_features], dtype=torch.long)
else:
input_ids = None
input_mask = None
segment_ids = None
lm_labels = None
if train_features[0].visual_feats[0] is not None:
# Visual Inputs
if isinstance(train_features[0].visual_feats[0], torch.FloatTensor):
feats = torch.stack([f.visual_feats[0] for f in train_features])
else:
feats = torch.from_numpy(np.stack([f.visual_feats[0] for f in train_features]))
pos = torch.from_numpy(np.stack([f.visual_feats[1] for f in train_features]))
# Visual Prediction
obj_labels = {}
for key in args.visual_losses.split(","):#('obj', 'attr', 'feat'):
visn_labels = torch.from_numpy(np.stack([f.obj_labels[key][0] for f in train_features]))
#if self.custom_coco_data:
# visn_mask = torch.ones(visn_labels.size(0), visn_labels.size(1)).float().cuda()
#else:
visn_mask = torch.from_numpy(np.stack([f.obj_labels[key][1] for f in train_features]))
assert visn_labels.size(0) == visn_mask.size(0) and visn_labels.size(1) == visn_mask.size(1)
obj_labels[key] = (visn_labels, visn_mask)
if args.get('task_nlvr2', False):
visual_feats_seg_ids = []
for i in range(feats.size(0)):
visual_feats_seg_ids.append([0] * 36 + [1] * 36)
visual_feats_seg_ids= torch.tensor(visual_feats_seg_ids, dtype=torch.int64)
else:
visual_feats_seg_ids = None
else:
feats = None
pos = None
obj_labels = None
visual_feats_seg_ids = None
if train_features[0].visual_tags is not None:
# do padding
tag_max_length = max([len(f.visual_tags) for f in train_features])
for f in train_features:
current_tag_length = len(f.visual_tags)
if current_tag_length < tag_max_length:
f.visual_tags = f.visual_tags + [0] * (tag_max_length - current_tag_length)
f.visual_tags_objective = f.visual_tags_objective + [-1] * (tag_max_length - current_tag_length)
f.visual_tags_mask = f.visual_tags_mask + [0] * (tag_max_length - current_tag_length)
f.visual_tags_box = f.visual_tags_box + [ np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32) ] * (tag_max_length - current_tag_length)
f.visual_tags_box = np.stack(f.visual_tags_box)
if f.visual_tags_segment_ids is not None:
f.visual_tags_segment_ids = f.visual_tags_segment_ids + [0] * (tag_max_length - current_tag_length)
visual_tags = torch.tensor([f.visual_tags for f in train_features], dtype=torch.long)
visual_tags_mask = torch.tensor([f.visual_tags_mask for f in train_features], dtype=torch.long)
visual_tags_box = torch.from_numpy(np.stack([f.visual_tags_box for f in train_features]))
visual_tags_objective = torch.tensor([f.visual_tags_objective for f in train_features], dtype=torch.long)
if train_features[0].visual_tags_mismatch is not None:
visual_tags_mismatch = torch.tensor([f.visual_tags_mismatch for f in train_features], dtype=torch.long)
else:
visual_tags_mismatch = None
if train_features[0].visual_tags_segment_ids is not None:
visual_tags_segment_ids = torch.tensor([f.visual_tags_segment_ids for f in train_features], dtype=torch.long)
else:
visual_tags_segment_ids = None
if args.get("tag_hard_max_length", None) is not None and tag_max_length > args.tag_hard_max_length:
# truncate the tag sequence
visual_tags = visual_tags[:, :args.tag_hard_max_length].contiguous()
visual_tags_mask = visual_tags_mask[:, :args.tag_hard_max_length].contiguous()
visual_tags_box = visual_tags_box[:, :args.tag_hard_max_length].contiguous()
visual_tags_objective = visual_tags_objective[:, :args.tag_hard_max_length].contiguous()
if visual_tags_mismatch is not None:
visual_tags_mismatch = visual_tags_mismatch[:, :args.tag_hard_max_length].contiguous()
if visual_tags_segment_ids is not None:
visual_tags_segment_ids = visual_tags_segment_ids[:, :args.tag_hard_max_length].contiguous()
else:
visual_tags = None
visual_tags_mask = None
visual_tags_box = None
visual_tags_objective = None
visual_tags_mismatch = None
visual_tags_segment_ids = None
if train_features[0].is_matched is not None:
matched_labels = torch.tensor([f.is_matched for f in train_features], dtype=torch.long)
else:
matched_labels = None
ans = torch.from_numpy(np.stack([f.ans for f in train_features]))
if args.get("lxmert_style_nlvr", False):
# Reorganize the inputs
input_ids = input_ids.unsqueeze(1).expand(input_ids.size(0), 2, input_ids.size(-1)).contiguous().view(-1, input_ids.size(-1)).contiguous()
lm_labels = lm_labels.unsqueeze(1).expand(lm_labels.size(0), 2, lm_labels.size(-1)).contiguous().view(-1, lm_labels.size(-1)).contiguous()
input_mask = input_mask.unsqueeze(1).expand(input_mask.size(0), 2, input_mask.size(-1)).contiguous().view(-1, input_mask.size(-1)).contiguous()
visual_feats_seg_ids = None
feats = feats.view(-1, feats.size(1)//2, feats.size(-1)).contiguous()
pos = pos.view(-1, pos.size(1) // 2, pos.size(-1)).contiguous()
if args.get("use_visual_tag_flag", False):
visual_tags = visual_tags.view(-1, visual_tags.size(1) // 2).contiguous()
visual_tags_box = visual_tags_box.view(-1, visual_tags_box.size(1) // 2, visual_tags_box.size(-1)).contiguous()
visual_tags_objective = visual_tags_objective.view(-1, visual_tags_objective.size(1) // 2).contiguous()
visual_tags_mask = visual_tags_mask.view(-1, visual_tags_mask.size(1)//2).contiguous()
return [input_ids, segment_ids, input_mask, lm_labels, feats, pos, obj_labels, matched_labels, ans, visual_feats_seg_ids, visual_tags, visual_tags_mask, visual_tags_box, visual_tags_objective, visual_tags_mismatch, visual_tags_segment_ids]
class LXMERTEvaluator:
def __init__(self, dataset: LXMERTDataset):
self.raw_dataset = dataset
# Create QA Eval Data
self.data = []
for datum in self.raw_dataset.data:
sentf = datum['sentf']
for sents_cat, sents in sentf.items():
if sents_cat in datum['labelf']: # A labeled dataset
labels = datum['labelf'][sents_cat]
for sent_idx, sent in enumerate(sents):
new_datum = {
'uid': make_uid(datum['img_id'], sents_cat, sent_idx),
'img_id': datum['img_id'],
'sent': sent,
'dset': sents_cat,
'label': labels[sent_idx]
}
self.data.append(new_datum)
# uid2datum
self.uid2datum = {}
for datum in self.data:
self.uid2datum[datum['uid']] = datum
def evaluate(self, uid2ans: dict, pprint=False):
score = 0.
cnt = 0
dset2score = defaultdict(lambda: 0.)
dset2cnt = defaultdict(lambda: 0)
for uid, ans in uid2ans.items():
if uid not in self.uid2datum: # Not a labeled data
continue
datum = self.uid2datum[uid]
label = datum['label']
dset = datum['dset']
if ans in label:
score += label[ans]
dset2score[dset] += label[ans]
cnt += 1
dset2cnt[dset] += 1
accu = score / cnt
dset2accu = {}
for dset in dset2cnt:
dset2accu[dset] = dset2score[dset] / dset2cnt[dset]
if pprint:
accu_str = "Overall Accu %0.4f, " % (accu)
sorted_keys = sorted(dset2accu.keys())
for key in sorted_keys:
accu_str += "%s Accu %0.4f, " % (key, dset2accu[key])
print(accu_str)
return accu, dset2accu
def dump_result(self, uid2ans: dict, path):
raise NotImplemented
| 42,230 | 43.453684 | 630 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/lxmert_pretrain.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import collections
import os
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import json
from param import args
from pretrain.lxmert_data import LXMERTDataset, LXMERTTorchDataset, LXMERTEvaluator
from pretrain.text_data import GeneralCorpusNP
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTPretraining
from collections import defaultdict
DataTuple = collections.namedtuple("DataTuple", 'dataset torchdset loader evaluator vl_torchdset')
EvalDataTuple = collections.namedtuple("EvalDataTuple", 'dataset torchdset loader evaluator vl_torchdset textonly')
class TrainingMeter():
def __init__(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
def update(self, loss_dict):
for key, item in loss_dict.items():
self.counter_dict[key] += 1
self.true_dict[key] += item
def report(self):
keys = list(self.counter_dict.keys())
keys.sort()
for key in keys:
print(" {} : {:.7}".format(key, self.true_dict[key] / self.counter_dict[key]))
def clean(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import _SingleProcessDataLoaderIter, _MultiProcessingDataLoaderIter
if args.get('random_seed', None):
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.random.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
def get_tuple(splits: str, bs: int, shuffle=False, drop_last=False, topk=-1, num_workers = 0, limit_source = [], restrict_source = None) -> DataTuple:
# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
qa_sets = args.qa_sets
if qa_sets is not None:
qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(","))
# Build dataset, data loader, and evaluator.
dset = LXMERTDataset(splits, qa_sets=qa_sets)
tset = LXMERTTorchDataset(
dset,
topk,
limit_source = limit_source,
use_visual_tag_flag = args.get("allow_tag_for_eval", False) # As this function is called for evaulation in our context
)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=num_workers,
collate_fn= tset.custom_collact_fn if args.get('custom_collact_fn', False) else lambda x: x,
drop_last=drop_last, pin_memory=args.get("pin_memory", True)
)
evaluator = LXMERTEvaluator(dset)
print()
return DataTuple(dataset=dset, torchdset=tset, loader=data_loader, evaluator=evaluator, vl_torchdset=tset)
from lxrt.h5_data import CustomBatchSampler, ConcateDataset
def get_tuple_hybrid(splits: str, bs: int, shuffle=False, drop_last=False, num_workers=0, topk=-1, image_only_splits=None, text_only_splits = None, limit_source = [], restrict_source = None) -> DataTuple:
# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
qa_sets = args.qa_sets
if qa_sets is not None:
qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(","))
# Three type of datasets: v&l, language, vision
datasets_list_torch = []
datasets_list = []
if splits is not None:
vl_dataset = LXMERTDataset(splits, qa_sets=qa_sets)
vl_dataset_torch = LXMERTTorchDataset(vl_dataset, topk, limit_source = limit_source, randomized_pairing = args.get("randomized_pairing", False), use_visual_tag_flag = args.get("use_visual_tag_flag", False))
datasets_list.append(vl_dataset)
datasets_list_torch.append(vl_dataset_torch)
if text_only_splits is not None:
text_only_datasets = []
for split in text_only_splits.split("+"):
if not("book_corpus" in split or "sbu" in split):
text_only_dataset = LXMERTDataset(split, qa_sets=qa_sets)
text_only_dataset_torch = LXMERTTorchDataset(text_only_dataset, topk, text_only=True, limit_source=limit_source)
datasets_list.append(text_only_dataset)
datasets_list_torch.append(text_only_dataset_torch)
text_only_datasets.append(text_only_dataset_torch)
else:
text_only_dataset = None
if "book_corpus" in split and args.get("text_shared_memory", False):
text_class = GeneralCorpusNP
else:
#text_class = GeneralCorpus
pass
text_only_dataset_torch = text_class(ann_file=args.book_corpus_path if "book_corpus" in split else args.sbu_path, pretrained_model_name="bert-base-uncased", tokenizer=None, seq_len=args.get("text_only_max_seq_len", 64), min_seq_len=args.get("text_only_min_seq_len", 64), encoding="utf-8", on_memory=True)
datasets_list.append(text_only_dataset)
datasets_list_torch.append(text_only_dataset_torch)
text_only_datasets.append(text_only_dataset_torch)
if image_only_splits is not None:
if image_only_splits != "":
image_only_dataset = LXMERTDataset(image_only_splits, qa_sets=qa_sets)
image_only_dataset_torch = LXMERTTorchDataset(image_only_dataset, topk, image_only=True, use_visual_tag_flag = args.get("use_visual_tag_flag", False))
datasets_list.append(image_only_dataset)
datasets_list_torch.append(image_only_dataset_torch)
if args.get("add_adhoc_google_cc_image_only", False):
google_cc_dataset = LXMERTDataset("google_cc_train", qa_sets=qa_sets)
google_cc_dataset_torch = LXMERTTorchDataset(google_cc_dataset, topk, image_only=True, use_visual_tag_flag=args.get("use_visual_tag_flag", False), available_split_for_cc = args.get("available_split_for_cc", [0]))
datasets_list.append(google_cc_dataset)
datasets_list_torch.append(google_cc_dataset_torch)
if args.get("add_adhoc_open_image_image_only", False):
open_image_dataset = LXMERTDataset("open_images_train", qa_sets=qa_sets)
open_image_torch = LXMERTTorchDataset(open_image_dataset, topk, image_only=True, use_visual_tag_flag=args.get("use_visual_tag_flag", False))
datasets_list.append(open_image_dataset)
datasets_list_torch.append(open_image_torch)
# Merge different datasets
merged_dataset = ConcateDataset(datasets_list_torch)
if args.task_qa:
merged_dataset.answer_table = datasets_list[0].answer_table if datasets_list[0] is not None else None
batch_sampler = CustomBatchSampler(merged_dataset.datasets, bs, upsample_ratios=args.get("upsample_ratios", [1,1,1]))
try:
custom_collact_fn = datasets_list_torch[0].custom_collact_fn if args.get('custom_collact_fn', False) else lambda x: x
except:
custom_collact_fn = datasets_list_torch[-1].custom_collact_fn if args.get('custom_collact_fn', False) else lambda x: x
data_loader = DataLoader(
merged_dataset, num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=custom_collact_fn,
pin_memory=args.get("pin_memory", True)
)
if args.task_qa:
evaluator = LXMERTEvaluator(datasets_list[0]) if datasets_list[0] is not None else None # The evaluator is for task_qa so no need to have it
else:
evaluator = None
print()
if splits is not None:
vl_torchdset = vl_dataset_torch
else:
vl_torchdset = datasets_list_torch[-1] # the last dataset
return DataTuple(dataset=merged_dataset, torchdset=merged_dataset, loader=data_loader, evaluator=evaluator, vl_torchdset=vl_torchdset)
if not args.get("hybrid", False):
train_tuple = get_tuple(args.train, args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, limit_source = args.get("limit_source", []))
valid_batch_size = args.get("valid_batch_size", 128)
valid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, drop_last=False, topk=5000, num_workers=args.get("val_num_workers", 2), limit_source = args.get("limit_source_for_val", []))
else:
train_tuple = get_tuple_hybrid(args.train, args.batch_size, shuffle=True, num_workers = args.num_workers, drop_last=True, image_only_splits = args.train_image_only, text_only_splits = args.get("train_text_only", None), limit_source = args.get("limit_source", []))
valid_batch_size = args.get("valid_batch_size", 128)
valid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, num_workers = args.get("val_num_workers", 2), drop_last=False, topk=5000, limit_source = args.get("limit_source_for_val", []))
from lxmert_data import symbolic_vocab
LOSSES_NAME = ('Mask_LM', 'Matched', 'Obj', 'Attr', 'Feat', 'QA')
class LXMERT:
def __init__(self, max_seq_length):
super().__init__()
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build model
self.model = LXRTPretraining.from_pretrained(
"bert-base-uncased",
args = args,
task_mask_lm=args.task_mask_lm,
task_obj_predict=args.task_obj_predict,
task_matched=args.task_matched,
task_qa=args.task_qa,
visual_losses=args.visual_losses,
num_answers= args.num_answers if args.get("num_answers", None) else train_tuple.dataset.answer_table.num_answers
)
# Weight initialization and loading
if args.from_scratch:
print("Train from Scratch: re-initialize all BERT weights.")
self.model.apply(self.model.init_bert_weights)
if args.get("use_tag_symbolic_embedding", False):
self.model.bert.embeddings.initialize_symbolic_embeddings(symbolic_vocab.get_symbolic_list(self.tokenizer))
self.model.special_initialize_pretraining_head()
if args.get("hybrid_embedding", False):
self.model.bert.embeddings.initialize_visual_position_type_embeddings()
if args.load_lxmert is not None:
# Load lxmert would not load the answer head.
self.load_lxmert(args.load_lxmert)
self.model = self.model.cuda()
if args.multiGPU:
self.model = nn.DataParallel(self.model)
self.global_step = 0
def forward(self, examples):
for index, i in enumerate(examples):
if i is not None:
if isinstance(i, dict):
for key in i:
i[key] = (i[key][0].cuda(), i[key][1].cuda())
else:
examples[index] = i.cuda()
input_ids, segment_ids, input_mask, lm_labels, feats, pos, obj_labels, matched_labels, ans, visual_feats_seg_ids, visual_tags, visual_tags_mask, visual_tags_box, visual_tags_objective, visual_tags_mismatch, visual_tags_segment_ids = examples
loss, losses, ans_logit, losses_dict = self.model(
input_ids, segment_ids, input_mask, lm_labels,
feats, pos, obj_labels, matched_labels, ans,
visual_feats_seg_ids = visual_feats_seg_ids,
visual_tags = visual_tags,
visual_tags_mask = visual_tags_mask,
visual_tags_box = visual_tags_box,
visual_tags_objective = visual_tags_objective,
visual_tags_mismatch = visual_tags_mismatch,
visual_tags_segment_ids = visual_tags_segment_ids
)
return loss, losses.detach().cpu(), ans_logit, losses_dict
def train_batch(self, optim, batch):
gradient_accumulation_steps = args.get("gradient_accumulation_steps", 1)
if (self.global_step + 1) % gradient_accumulation_steps == 0:
optim.zero_grad()
loss, losses, ans_logit, losses_dict = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.mean(0)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if (self.global_step + 1) % gradient_accumulation_steps == 0:
nn.utils.clip_grad_norm_(self.model.parameters(), 1.)
optim.step()
return loss.item(), losses.cpu().numpy(), ans_logit, losses_dict
def valid_batch(self, batch):
with torch.no_grad():
loss, losses, ans_logit, losses_dict = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.mean(0)
return loss.item(), losses.cpu().numpy(), ans_logit, losses_dict
def train(self, train_tuple: DataTuple, eval_tuple: DataTuple):
train_ld = train_tuple.loader
# Optimizer
from lxrt.optimization import BertAdam
batch_per_epoch = len(train_ld)
t_total = int(batch_per_epoch * args.epochs)
warmup_ratio = args.get("warmup_ratio", 0.05)
print("Total Iters: %d" % t_total)
if args.get("t_total", None):
t_total = args.t_total
print("!! Changing to specified t_toal in args: {}".format(t_total))
self.t_total = t_total
warmup_iters = int(t_total * warmup_ratio)
print("Batch per epoch: %d" % batch_per_epoch)
print("Warm up Iters: %d" % warmup_iters)
self.optim = BertAdam(self.model.parameters(), lr=args.lr, warmup=warmup_ratio, t_total=t_total)
if args.load is not None:
self.load(args.load, t_total = t_total)
gradient_accumulation_steps = args.get("gradient_accumulation_steps", 1)
# Train
best_eval_loss = 9595.
report_every = args.get("report_every", 100)
custom_train_meter = TrainingMeter()
for epoch in range(args.epochs):
# Train
self.model.train()
total_loss = 0.
total_losses = 0.
uid2ans = {}
for batch_id, batch in enumerate(tqdm(train_ld, total=len(train_ld))):
if args.get("skip_training", False):
break
loss, losses, logit, losses_dict = self.train_batch(self.optim, batch)
total_loss += loss
try:
total_losses += losses
except:
pass
if args.task_qa and batch[0].sent is not None:
assert(0) # Not used in our experiment
score, label = logit.max(1)
for datum, l in zip(batch, label.cpu().numpy()):
uid = datum.uid
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
for key, value in losses_dict.items():
losses_dict[key] = value.mean().item() # make the losses scalar
if "Masked LM" in losses_dict and losses_dict["Masked LM"] == 0:
del losses_dict["Masked LM"]
custom_train_meter.update(losses_dict)
if batch_id % report_every == 0 and batch_id > 0:
print("Folder: {} \n Epoch {} Iter: {}/{}".format(args.output, epoch, batch_id, len(train_ld)))
#print(pd.DataFrame(train_results[-report_every:]).mean())
custom_train_meter.report()
custom_train_meter.clean()
print()
if args.get("save_step", -1) != -1 and self.global_step != 0 and (self.global_step // gradient_accumulation_steps) % args.save_step == 0:
self.save("Step{}".format(self.global_step))
self.global_step += 1
print("The training loss for Epoch %d is %0.4f" % (epoch, total_loss / batch_per_epoch))
if args.task_qa:
train_tuple.evaluator.evaluate(uid2ans, pprint=True)
# Eval
avg_eval_loss = self.evaluate_epoch(eval_tuple, iters=-1)
if args.get("eval_on_train", False):
print("On train set")
self.evaluate_epoch(train_tuple, iters=-1)
if avg_eval_loss < best_eval_loss:
best_eval_loss = avg_eval_loss
self.save("BEST_EVAL_LOSS")
self.save("Epoch%02d" % (epoch+1))
def evaluate_epoch(self, eval_tuple: DataTuple, iters: int=-1):
self.model.eval()
eval_ld = eval_tuple.loader
total_loss = 0.
total_losses = 0.
uid2ans = {}
eval_meter = TrainingMeter()
for i, batch in enumerate(tqdm(eval_ld)):
loss, losses, logit, losses_dict = self.valid_batch(batch)
total_loss += loss
try:
total_losses += losses
except:
pass
for key, value in losses_dict.items():
losses_dict[key] = value.mean().item()
eval_meter.update(losses_dict)
if args.task_qa:
score, label = logit.max(1)
for datum, l in zip(batch, label.cpu().numpy()):
uid = datum.uid
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
if i == iters:
break
print("Evaluation:")
eval_meter.report()
print("\n\n\n\n\n\n\n\n")
if args.task_qa:
eval_tuple.evaluator.evaluate(uid2ans, pprint=True)
return total_loss / len(eval_ld)
def evaluate_epoch_text(self, eval_tuple: DataTuple, iters: int=-1):
self.model.eval()
eval_ld = eval_tuple.textonly
total_loss = 0.
total_losses = 0.
uid2ans = {}
eval_meter = TrainingMeter()
for i, batch in enumerate(tqdm(eval_ld)):
loss, losses, logit, losses_dict = self.valid_batch(batch)
total_loss += loss
total_losses += losses
for key, value in losses_dict.items():
losses_dict[key] = value.mean().item()
eval_meter.update(losses_dict)
if args.task_qa:
score, label = logit.max(1)
for datum, l in zip(batch, label.cpu().numpy()):
uid = datum.uid
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
if i == iters:
break
print("Evaluation text only:")
eval_meter.report()
print("\n\n\n\n\n\n\n\n")
return total_loss / len(eval_ld)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(args.output, "%s_LXRT.pth" % name))
if args.get("save_optimizer", False) and "Step" not in name:
torch.save(self.optim.state_dict(),
os.path.join(args.output, "%s_LXRT_optimizer.pth" % name))
def load(self, path, t_total):
print("Load model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
#self.model.load_state_dict(state_dict)
from qa_answer_table import load_state_dict_flexible
load_state_dict_flexible(self.model, state_dict)
optimizer_path = "{}_LXRT_optimizer.pth".format(path)
if os.path.exists(optimizer_path) and args.get("load_optimizer", True):
print("Load optimizer from {}".format(optimizer_path))
loaded_optim = torch.load(optimizer_path)
if args.get("reset_schedule", False):
for group in loaded_optim["param_groups"]:
group['lr'] = args.lr
group['warmup'] = args.warmup_ratio
group["t_total"] = t_total
for p in group['params']:
loaded_optim["state"][p]["step"]
loaded_optim["state"][p]["step"] = 0
self.optim.load_state_dict(loaded_optim)
def load_lxmert(self, path):
print("Load LXMERT model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
# Do not load any answer head
for key in list(state_dict.keys()):
if 'answer' in key:
state_dict.pop(key)
# Change Multi GPU to single GPU
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
state_dict = new_state_dict
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Keys in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Keys in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
self.model.load_state_dict(state_dict, strict=False)
if __name__ == "__main__":
lxmert = LXMERT(max_seq_length=args.get("max_seq_length", 20))
lxmert.train(train_tuple, valid_tuple)
| 21,642 | 41.189084 | 320 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/optimization.py | # coding=utf-8
# Copyright 2019 project LXRT
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
## Print a summary of the optimizer
print("BERTAdam lr {} total_steps {} warmup {}".format(lr, t_total, warmup))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
warned_for_t_total = False
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# LXRT: grad is clipped outside.
# Add grad clipping
# if group['max_grad_norm'] > 0:
# clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
# warning for exceeding t_total (only active with warmup_linear
if group['schedule'] == "warmup_linear" and progress > 1. and not warned_for_t_total:
logger.warning(
"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. "
"Please set 't_total' of {} correctly.".format(group['schedule'], lr_scheduled, self.__class__.__name__))
warned_for_t_total = True
# end warning
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 8,058 | 42.798913 | 141 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/entry.py | # coding=utf-8
# Copyright 2021 Project Unsupervised VisualBERT
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
import numpy as np
import numpy
from lxrt.tokenization import BertTokenizer
from collections import defaultdict
def get_padding_lengths(list_of_np_array):
return_dict = defaultdict(int)
for array in list_of_np_array:
for i, shape in enumerate(array.shape):
if return_dict["dimension_{}".format(i)] < shape:
return_dict["dimension_{}".format(i)] = shape
return return_dict
def pad_np_arrays(list_of_np_array, padding_value, dtype, cuda = True):
if isinstance(list_of_np_array[0], list):
list_of_np_array = [np.array(i, dtype=dtype) for i in list_of_np_array]
if list_of_np_array[0] is None:
return None
padding_lengths = get_padding_lengths(list_of_np_array)
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
final_list = []
for array_index, array in enumerate(list_of_np_array):
return_array = numpy.asarray(numpy.ones(max_shape, dtype = dtype) * padding_value)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(array.shape)
#if len(array.shape) < len(max_shape):
# slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = array
final_list.append(return_array)
final_list = np.stack(final_list, 0)
tensor = torch.from_numpy(final_list)
if cuda:
return tensor.cuda()
else:
return tensor
#from param import args
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_sents_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def convert_sents_to_features_tensors(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long).cuda()
return input_ids, input_mask, segment_ids
def convert_tags_to_tensorts(tags, cuda = True):
if tags[0] is None:
return None, None, None, None, None
visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids = zip(*tags)
visual_tags = pad_np_arrays(visual_tags, padding_value=0, dtype=np.int64, cuda = cuda)
visual_tags_mask = pad_np_arrays(visual_tags_mask, padding_value=0, dtype=np.int64, cuda = cuda)
visual_tags_box = pad_np_arrays(visual_tags_box, padding_value=0, dtype=np.float32, cuda = cuda)
visual_tags_type = pad_np_arrays(visual_tags_type, padding_value=0, dtype=np.int64, cuda = cuda)
visual_tags_segment_ids = pad_np_arrays(visual_tags_segment_ids, padding_value=0, dtype=np.int64, cuda = cuda)
return visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids
def convert_sent_features_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def set_visual_config(args, VISUAL_CONFIG):
VISUAL_CONFIG.l_layers = args.llayers
VISUAL_CONFIG.x_layers = args.xlayers
VISUAL_CONFIG.r_layers = args.rlayers
class LXRTEncoder(nn.Module):
def __init__(self, args, max_seq_length, mode='x'):
super().__init__()
self.max_seq_length = max_seq_length
from lxrt.modeling import LXRTFeatureExtraction as VisualBertForLXRFeature, VISUAL_CONFIG
set_visual_config(args, VISUAL_CONFIG)
# Using the bert tokenizer
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build LXRT Model
self.model = VisualBertForLXRFeature.from_pretrained(
"bert-base-uncased",
mode=mode
)
if args.from_scratch:
print("Re-initializing all the weights")
self.model.apply(self.model.init_bert_weights)
self.load_pretrain_head = args.get("load_pretrain_head", False)
if self.load_pretrain_head:
from lxmert.src.lxrt.modeling import BertPreTrainingHeads
self.pretrained_head = BertPreTrainingHeads(self.model.config, self.model.bert.embeddings.word_embeddings.weight)
def multi_gpu(self):
self.model = nn.DataParallel(self.model)
@property
def dim(self):
return 768
def forward(self, sents, feats, visual_attention_mask=None, input_already_tokenized=False, visual_feats_seg_ids = None):
if not input_already_tokenized:
train_features = convert_sents_to_features(
sents, self.max_seq_length, self.tokenizer)
else:
train_features = sents
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
output = self.model(input_ids, segment_ids, input_mask,
visual_feats=feats,
visual_attention_mask=visual_attention_mask,
visual_feats_seg_ids = visual_feats_seg_ids)
return output
def save(self, path):
torch.save(self.model.state_dict(),
os.path.join("%s_LXRT.pth" % path))
def load(self, path):
# Load state_dict from snapshot file
print("Load LXMERT pre-trained model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
else:
new_state_dict[key] = value
state_dict = new_state_dict
# Print out the differences of pre-trained and model weights.
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Weights in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Weights in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
# Load weights to model
self.model.load_state_dict(state_dict, strict=False)
| 11,480 | 37.016556 | 125 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/modeling.py | # coding=utf-8
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LXRT model."""
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
from copy import deepcopy
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except Importtokenization:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class GeLU(nn.Module):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class VisualConfig(object):
VISUAL_LOSSES = ['obj', 'attr', 'feat']
def __init__(self,
l_layers=12,
x_layers=5,
r_layers=0):
from param import args
self.l_layers = args.llayers
self.x_layers = args.xlayers
self.r_layers = args.rlayers
self.visual_feat_dim = 2048
self.visual_pos_dim = 4
'''if args.get("kl_divergence", False):
self.obj_id_num = 1601
self.attr_id_num = 401
else:'''
self.obj_id_num = 1600
self.attr_id_num = 400
self.visual_losses = self.VISUAL_LOSSES
weight = 1 / 0.15
if args.get("weight_disable", False):
weight = 1.0
ce_or_kl = "kl" if args.get("kl_divergence", False) else "ce"
self.visual_loss_config = {
'obj': (self.obj_id_num, ce_or_kl, (-1,), weight),
'attr': (self.attr_id_num, ce_or_kl, (-1,), weight),
'feat': (2048, 'l2', (-1, 2048), weight),
}
try:
from param import args
self.visualbert_style = args.get('visualbert_style', False)
self.symbolic_vocab_size = args.get('symbolic_vocab_size', 2632)
self.multi_choice = args.get("multi_choice", 0)
except:
self.visualbert_style = False
def set_visual_dims(self, feat_dim, pos_dim):
self.visual_feat_dim = feat_dim
self.visual_pos_dim = pos_dim
VISUAL_CONFIG = VisualConfig()
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout=nn.Dropout(config.hidden_dropout_prob)
if VISUAL_CONFIG.visualbert_style:
self.symbolic_embedding = nn.Embedding(VISUAL_CONFIG.symbolic_vocab_size + 1, config.hidden_size) # The first is reserved for masking
def forward(self, input_ids, token_type_ids=None, attribute_ids=None, symbolic_embedding=False):
if symbolic_embedding:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.symbolic_embedding(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
if attribute_ids is not None:
attribute_mask = (attribute_ids != 0).float()
attribute_embedding = self.symbolic_embedding(attribute_ids)
# Need to average along the latter lines
attribute_embedding = attribute_embedding * attribute_mask.unsqueeze(-1) # mask out paddings
attribute_embedding = attribute_embedding.sum(2)
length_attribute = attribute_mask.sum(2)
length_attribute[length_attribute == 0] = 1
attribute_embedding = attribute_embedding / length_attribute.unsqueeze(-1)
embeddings = embeddings + attribute_embedding
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def special_embedding(self, tokenized_words):
with torch.no_grad():
all_embeddings = []
for subwords in tokenized_words:
subwords = torch.LongTensor(subwords)
embedding = self.word_embeddings(subwords)
embedding = embedding.mean(dim=0)
all_embeddings.append(embedding)
all_embeddings = torch.stack( [torch.zeros_like(all_embeddings[0])] + all_embeddings, dim=0)
self.symbolic_embedding.weight = torch.nn.Parameter(all_embeddings)
class BertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim =config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if args.get("output_attention", False):
return context_layer, attention_probs
return context_layer
class BertAttOutput(nn.Module):
def __init__(self, config):
super(BertAttOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertCrossattLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = BertAttention(config)
self.output = BertAttOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask)
attention_output = self.output(output, input_tensor)
return attention_output
class BertSelfattLayer(nn.Module):
def __init__(self, config):
super(BertSelfattLayer, self).__init__()
self.self = BertAttention(config)
self.output = BertAttOutput(config)
def forward(self, input_tensor, attention_mask):
# Self attention attends to itself, thus keys and querys are the same (input_tensor).
self_output = self.self(input_tensor, input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertSelfattLayer(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
if args.get("output_attention", False):
attention_output, attention_weights = self.attention(hidden_states, attention_mask)
else:
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if args.get("output_attention", False):
return layer_output, attention_weights
return layer_output
"""
---------------------------------------------------------------------------------------
Above modules are copied from BERT (pytorch-transformer) with modifications.
---------------------------------------------------------------------------------------
"""
class BertEmbeddingsWithVisualEmbedding(nn.Module):
"""Construct the embeddings from word, position, token_type embeddings and visual embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsWithVisualEmbedding, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.symbolic_embedding = nn.Embedding(2003, config.hidden_size)
#### Below are for encoding visual features
# Segment and position embedding for image features
self.token_type_embeddings_visual = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.position_embeddings_visual = nn.Embedding(config.max_position_embeddings, config.hidden_size)
feat_dim = VISUAL_CONFIG.visual_feat_dim
pos_dim = VISUAL_CONFIG.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout=nn.Dropout(config.hidden_dropout_prob)
self.add_segment_embedding_to_visual = args.get("add_segment_embedding_to_visual", False)
self.add_segment_embedding_to_visual_tags=args.get("add_segment_embedding_to_visual_tags", False)
self.add_position_embedding_to_visual_tags=args.get("add_position_embedding_to_visual_tags", False)
self.tag_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.joint_layer_norm=args.get("joint_layer_norm", False)
self.use_segment_embedding_for_vision_and_tag=args.get("use_segment_embedding_for_vision_and_tag", False)
self.use_bert_input_for_tags=args.get('use_bert_input_for_tags', False)
self.disable_divide_2 = args.get("disable_divide_2", False)
def initialize_visual_position_type_embeddings(self):
### This is a bit unorthodox. The better way might be to add an inititilizer to AllenNLP.
# This function is used to initialize the token_type_embeddings_visual and positiona_embedding_visual, just incase.
self.token_type_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.token_type_embeddings.weight.data), requires_grad = True)
self.position_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.position_embeddings.weight.data), requires_grad = True)
return
def initialize_symbolic_embeddings(self, tokenized_words):
with torch.no_grad():
all_embeddings = []
for subwords in tokenized_words:
subwords = torch.LongTensor(subwords)
embedding = self.word_embeddings(subwords)
embedding = embedding.mean(dim=0)
all_embeddings.append(embedding)
all_embeddings = torch.stack(all_embeddings, dim=0)
self.symbolic_embedding = nn.Embedding.from_pretrained(deepcopy(all_embeddings), freeze = False)
def forward(self, input_ids, token_type_ids=None, visual_embeddings=None, visual_embeddings_type=None, position_embeddings_visual=None, image_text_alignment=None, confidence=None, position_ids=None, boxes=None, visual_tags=None, visual_tags_box=None, visual_tags_type=None, visual_tags_segment_ids=None):
if input_ids is not None:
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
text_embeddings = words_embeddings + position_embeddings + token_type_embeddings
if not self.joint_layer_norm:
text_embeddings = self.LayerNorm(text_embeddings)
else:
text_embeddings = None
if visual_tags is not None:
if self.use_bert_input_for_tags:
tag_embeddings = self.word_embeddings(visual_tags)
else:
tag_embeddings = self.symbolic_embedding(visual_tags)
if args.get("oscar_style", False):
tag_position_ids = torch.arange(visual_tags.size(1), dtype=torch.long, device=visual_tags.device)
tag_position_ids = tag_position_ids.unsqueeze(0).expand_as(visual_tags)
tag_type_ids = torch.ones_like(visual_tags)
tag_position_embeddings = self.position_embeddings_visual(tag_position_ids)
tag_type_embeddings = self.token_type_embeddings_visual(tag_type_ids)
tag_embeddings = tag_embeddings + tag_position_embeddings + tag_type_embeddings
else:
y = self.box_fc(visual_tags_box)
if not self.joint_layer_norm:
y = self.box_layer_norm(y)
tag_embeddings = self.tag_layer_norm(tag_embeddings)
if not self.disable_divide_2:
tag_embeddings = (tag_embeddings + y) / 2 # + token_type_embeddings
else:
tag_embeddings = tag_embeddings + y
if visual_tags_segment_ids is not None:
assert(self.use_segment_embedding_for_vision_and_tag)
if self.use_segment_embedding_for_vision_and_tag:
if visual_tags_segment_ids is not None:
tag_type_ids = visual_tags_segment_ids
else:
tag_type_ids = torch.zeros_like(visual_tags) # Temporary
tag_type_embeddings = self.token_type_embeddings_visual(tag_type_ids)
tag_embeddings += tag_type_embeddings
else:
tag_embeddings = None
if visual_embeddings is not None:
x = self.visn_fc(visual_embeddings)
#x = self.visn_layer_norm(x)
y = self.box_fc(boxes)
#y = self.box_layer_norm(y)
if not self.joint_layer_norm:
x = self.visn_layer_norm(x)
y = self.box_layer_norm(y)
if not self.disable_divide_2:
v_embeddings = (x + y) / 2
else:
v_embeddings = x + y
#if visual_embeddings_type is not None:
# assert(self.use_segment_embedding_for_vision_and_tag)
if self.use_segment_embedding_for_vision_and_tag:
if visual_embeddings_type is None:
visual_embeddings_type = torch.zeros(*visual_embeddings.size()[:-1], dtype=torch.long).cuda()
token_type_embeddings_visual = self.token_type_embeddings_visual(visual_embeddings_type)
v_embeddings += token_type_embeddings_visual
else:
v_embeddings = None
if args.get("joint_layer_norm", False):
# Concate the two:
embeddings = torch.cat([i for i in [text_embeddings, tag_embeddings, v_embeddings] if i is not None] , dim = 1) # concat the visual embeddings after the attentions
embeddings = self.LayerNorm(embeddings)
else:
embeddings = torch.cat([i for i in [text_embeddings, tag_embeddings, v_embeddings] if i is not None], dim=1) # concat the visual embeddings after the attentions
embeddings = self.dropout(embeddings)
return embeddings
def unfreeze_obj_feat(self):
all_modules = [
self.token_type_embeddings_visual,
self.position_embeddings_visual,
# Object feature encoding
self.visn_fc,
self.visn_layer_norm,
# Box position encoding
self.box_fc,
self.box_layer_norm,
self.dropout]
for submodule in all_modules:
for p in submodule.parameters():
p.requires_grad = True
class LXRTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = BertCrossattLayer(config)
# Self-attention Layers
self.lang_self_att = BertSelfattLayer(config)
self.visn_self_att = BertSelfattLayer(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
# Cross Attention
lang_att_output = self.visual_attention(lang_input, visn_input, ctx_att_mask=visn_attention_mask)
visn_att_output = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)
return lang_att_output, visn_att_output
def self_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask)
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)
return lang_att_output, visn_att_output
def output_fc(self, lang_input, visn_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visn_inter_output = self.visn_inter(visn_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return lang_output, visn_output
def forward(self, lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask):
lang_att_output = lang_feats
visn_att_output = visn_feats
lang_att_output, visn_att_output = self.cross_att(lang_att_output, lang_attention_mask,
visn_att_output, visn_attention_mask)
lang_att_output, visn_att_output = self.self_att(lang_att_output, lang_attention_mask,
visn_att_output, visn_attention_mask)
lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output)
return lang_output, visn_output
class VisualFeatEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = VISUAL_CONFIG.visual_feat_dim
pos_dim = VISUAL_CONFIG.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visn_input):
# This is when we do not have the box_fc yet
if isinstance(visn_input, tuple) or isinstance(visn_input, list):
feats, boxes = visn_input
x = self.visn_fc(feats)
x = self.visn_layer_norm(x)
y = self.box_fc(boxes)
y = self.box_layer_norm(y)
output = (x + y) / 2
output = self.dropout(output)
return output
else:
assert(0)
x = self.visn_fc(visn_input)
x = self.visn_layer_norm(x)
return x
def _cat_with_none(feat_1, feat_2, dim):
if feat_1 is None:
return feat_2
if feat_2 is None:
return feat_1
return torch.cat((feat_1, feat_2), dim=dim)
def _split_with_none(lang_feats, visn_feats, joint_feats):
if lang_feats is None:
assert(visn_feats.size(1) == joint_feats.size(1))
return None, joint_feats
if visn_feats is None:
assert(lang_feats.size(1) == joint_feats.size(1))
return joint_feats, None
return joint_feats[:, :lang_feats.size(1), :].contiguous(), joint_feats[:, lang_feats.size(1):, :].contiguous()
class LXRTEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = VisualFeatEncoder(config)
# Number of layers
self.num_l_layers = VISUAL_CONFIG.l_layers
self.num_x_layers = VISUAL_CONFIG.x_layers
self.num_r_layers = VISUAL_CONFIG.r_layers
print("LXRT encoder with %d l_layers, %d x_layers, and %d r_layers." %
(self.num_l_layers, self.num_x_layers, self.num_r_layers))
self.multi_choice = VISUAL_CONFIG.multi_choice
self.visualbert_style = VISUAL_CONFIG.visualbert_style
if self.visualbert_style:
layers = [BertLayer(config) for _ in range(self.num_l_layers)]
self.layer = nn.ModuleList(layers)
if args.get("additional_attention_layer", False):
_config = copy.deepcopy(config)
_config.intermediate_size = 768
_config.num_attention_heads = 1
#layers += [BertLayer(_config)]
self.additional_layer = BertLayer(_config)
print("\n\n!! Has {} layers".format(len(self.layer) + 1))
else:
print("\n\n!! Has {} layers".format(len(self.layer)))
return
# Layers
# Using self.layer instead of self.l_layer to support loading BERT weights.
'''self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_l_layers)]
) '''
layers = [BertLayer(config) for _ in range(self.num_l_layers)]
print(args.additional_attention_layer)
assert(0)
if args.get("additional_attention_layer", False):
_config = copy.deepcopy(config)
_config.intermediate_size = 768
layers += [BertLayer(_config)]
self.layer = nn.ModuleList(layers)
print("\n\n!! Has {} layers".format(len(self.layer)))
self.x_layers = nn.ModuleList(
[LXRTXLayer(config) for _ in range(self.num_x_layers)]
)
self.r_layers = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_r_layers)]
)
self.multi_choice = VISUAL_CONFIG.multi_choice
self.config = config
def forward(self,
lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask=None,
bypass_visual_feat=None, bypass_mask=None,
layer_limit = -1):
# Run visual embedding layer
# Note: Word embedding layer was executed outside this module.
# Keep this design to allow loading BERT weights.
if not args.get("hybrid_embedding", False):
if args.get("symbolic", False):
visn_feats, adj = visn_feats
elif visn_feats[0] is not None:
visn_feats = self.visn_fc(visn_feats)
else:
visn_feats = None
if self.multi_choice != 0:
visn_feats = visn_feats.unsqueeze(1).expand( visn_feats.size(0), self.multi_choice, visn_feats.size(1), visn_feats.size(2))
visn_attention_mask = visn_attention_mask.unsqueeze(1).expand(visn_attention_mask.size(0), self.multi_choice, visn_attention_mask.size(1), visn_attention_mask.size(2), visn_attention_mask.size(3))
#print(visn_feats.size())
visn_feats = visn_feats.reshape((-1, visn_feats.size(2), visn_feats.size(3)))
visn_attention_mask = visn_attention_mask.reshape((-1, visn_attention_mask.size(-3), visn_attention_mask.size(-2), visn_attention_mask.size(-1)))
if self.visualbert_style:
if args.get("bypass_visual_feat", False):
joint_feats = _cat_with_none(lang_feats, visn_feats, dim=1) #torch.cat((lang_feats, visn_feats), dim=1)
joint_mask = _cat_with_none(lang_attention_mask, visn_attention_mask, dim=-1) #torch.cat((lang_attention_mask, visn_attention_mask), dim=-1)
if args.get("include_additional_layer", True):
for layer_module in self.layer[:-1]:
joint_feats = layer_module(joint_feats, joint_mask)
joint_feats = torch.cat((joint_feats, bypass_visual_feat), dim=1)
joint_feats = self.layer[-1](joint_feats, bypass_mask)
return _split_with_none(joint_feats, visn_feats, joint_feats)
else:
for layer_module in self.layer:
joint_feats = layer_module(joint_feats, joint_mask)
return torch.cat((joint_feats, bypass_visual_feat), dim = 1), None
if args.get("seperate_modeling", False):
#assert (args.get("additional_attention_layer", False))
joint_feats = _cat_with_none(lang_feats, visn_feats, dim=1) #torch.cat((lang_feats, visn_feats), dim=1)
joint_mask = _cat_with_none(lang_attention_mask, visn_attention_mask, dim=-1) #torch.cat((lang_attention_mask, visn_attention_mask), dim=-1)
if layer_limit != -1:
for layer_module in self.layer[:layer_limit]:
joint_feats = layer_module(joint_feats, joint_mask)
else:
for layer_module in self.layer:
joint_feats = layer_module(joint_feats, joint_mask)
return _split_with_none(lang_feats, visn_feats, joint_feats) #joint_feats[:, :lang_feats.size(1), :].contiguous(), joint_feats[:, lang_feats.size(1):, :].contiguous()
joint_feats = _cat_with_none(lang_feats, visn_feats, dim=1) #torch.cat((lang_feats, visn_feats), dim=1)
joint_mask = _cat_with_none(lang_attention_mask, visn_attention_mask, dim=-1) #torch.cat((lang_attention_mask, visn_attention_mask), dim=-1)
all_attention_weights = []
for layer_module in self.layer:
if args.get("output_attention", False):
joint_feats, attention_weights = layer_module(joint_feats, joint_mask)
all_attention_weights.append(attention_weights)
else:
joint_feats = layer_module(joint_feats, joint_mask)
if args.get("additional_attention_layer", False):
joint_feats = self.additional_layer(joint_feats, joint_mask)
if args.get("output_attention", False):
return _split_with_none(lang_feats, visn_feats, joint_feats), all_attention_weights
return _split_with_none(lang_feats, visn_feats, joint_feats) #joint_feats[:, :lang_feats.size(1), :].contiguous(), joint_feats[:, lang_feats.size(1):, :].contiguous()
# Run language layers
if lang_feats is not None:
for layer_module in self.layer:
lang_feats = layer_module(lang_feats, lang_attention_mask)
# Run relational layers
for layer_module in self.r_layers:
visn_feats = layer_module(visn_feats, visn_attention_mask)
# Run cross-modality layers
if lang_feats is not None:
for layer_module in self.x_layers:
lang_feats, visn_feats = layer_module(lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask)
return lang_feats, visn_feats
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertVisualAnswerHead(nn.Module):
def __init__(self, config, num_answers):
super().__init__()
hid_dim = config.hidden_size
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_answers)
)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
class BertVisualObjHead(nn.Module):
def __init__(self, config, visual_losses):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = visual_losses.split(",")
for loss in visual_losses:
assert loss in VISUAL_CONFIG.VISUAL_LOSSES
self.visual_losses = visual_losses
sizes = {key: VISUAL_CONFIG.visual_loss_config[key][0] for key in self.visual_losses}
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict({
key: nn.Linear(config.hidden_size, sizes[key])
for key in self.visual_losses
})
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
if args.get("lxmert_style_nlvr", False):
self.seq_relationship_new = nn.Linear(config.hidden_size * 2, 2)
else:
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output, calculate_seq_score = True):
prediction_scores = self.predictions(sequence_output)
if not calculate_seq_score:
return prediction_scores, None
if args.get("lxmert_style_nlvr", False):
seq_relationship_score = self.seq_relationship_new(pooled_output)
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
cache_dir = args.get("cache_dir", "/local/harold/tmp/")
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path == 'bert-base-uncased':
try:
print("The BERT-weight-downloading query to AWS was time-out;"
"trying to download from UNC servers")
archive_file = "https://nlp.cs.unc.edu/data/bert/bert-base-uncased.tar.gz"
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
print("The weight-downloading still crashed with link: %s, "
"please check your network connection" % archive_file)
return None
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp(prefix="/local/harold/tmp/")
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
from param import args
class LXRTModel(BertPreTrainedModel):
"""LXRT Model."""
def __init__(self, config):
super().__init__(config)
if args.get("hybrid_embedding", False):
self.embeddings = BertEmbeddingsWithVisualEmbedding(config)
else:
self.embeddings = BertEmbeddings(config)
self.encoder = LXRTEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self,
input_ids, token_type_ids=None, attention_mask=None,
visual_feats=None, visual_attention_mask=None, position_embeddings_visual=None,
visual_tags=None, visual_tags_mask=None, visual_tags_box=None, visual_tags_type=None, visual_tags_segment_ids=None,
visual_feats_seg_ids = None,
):
if visual_attention_mask is None and visual_feats[0] is not None:
if args.get("uneven_masks", False):
visual_attention_mask = 1 - (visual_feats[0] == 0.0).all(-1).float().to(next(self.parameters()).device)
else:
visual_attention_mask = torch.ones(visual_feats[0].size(0), visual_feats[0].size(1)).to(next(self.parameters()).device)
if attention_mask is None and input_ids is not None:
attention_mask = torch.ones_like(input_ids)
if visual_tags_mask is None and visual_tags is not None:
visual_tags_mask = torch.ones_list(visual_tags)
# Process masks
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * -10000.0
else:
extended_visual_attention_mask = None
if attention_mask is not None:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
else:
extended_attention_mask = None
if visual_tags_mask is not None:
extended_visual_tags_mask = visual_tags_mask.unsqueeze(1).unsqueeze(2)
extended_visual_tags_mask = extended_visual_tags_mask.to(dtype=next(self.parameters()).dtype)
extended_visual_tags_mask = (1.0 - extended_visual_tags_mask) * -10000.0
else:
extended_visual_tags_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=None,
position_ids=None,
visual_embeddings=visual_feats[0],
boxes=visual_feats[1],
visual_embeddings_type=visual_feats_seg_ids,
position_embeddings_visual=None,
image_text_alignment=None,
confidence=None,
visual_tags=visual_tags,
visual_tags_box=visual_tags_box,
visual_tags_type=visual_tags_type,
visual_tags_segment_ids = visual_tags_segment_ids
)
concated_mask = torch.cat([ i for i in [extended_attention_mask, extended_visual_tags_mask, extended_visual_attention_mask] if i is not None], dim=-1)
# self.encoder will not distinguish between visual inputs, visual tag inputs or text inputs
if args.get("output_attention", False):
combined_feats, _, attention_weights = self.encoder(
embedding_output,
concated_mask,
visn_feats=None,
visn_attention_mask=None)
else:
combined_feats, _ = self.encoder(
embedding_output,
concated_mask,
visn_feats=None,
visn_attention_mask=None)
if attention_mask is not None:
lang_feats = combined_feats[:,:attention_mask.size(-1)]
else:
lang_feats = None
if visual_tags_mask is not None:
if attention_mask is None:
tag_feats = combined_feats[:,:visual_tags_mask.size(-1)]
else:
tag_feats = combined_feats[:, attention_mask.size(-1): attention_mask.size(-1) + visual_tags_mask.size(-1)]
else:
tag_feats = None
if visual_attention_mask is not None:
visn_feats =combined_feats[:, -visual_attention_mask.size(-1):]
else:
visn_feats = None
if lang_feats is not None:
pooled_output = self.pooler(lang_feats)
if args.get("output_attention", False):
return (lang_feats, tag_feats, visn_feats), pooled_output, attention_weights
return (lang_feats, tag_feats, visn_feats), pooled_output
else:
if args.get("output_attention", False):
return (lang_feats, tag_feats, visn_feats), None, attention_weights
return (lang_feats, tag_feats, visn_feats), None
class LXRTPretraining(BertPreTrainedModel):
def __init__(self,
config,
args=None,
task_mask_lm=True,
task_matched=True,
task_obj_predict=True,
visual_losses='',
task_qa=True,
num_answers=2):
super().__init__(config)
# Configuration
self.config = config
self.num_answers = num_answers
self.args = args
# Use of pre-training tasks
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_matched = task_matched
self.task_qa = task_qa
# LXRT backbone
self.bert = LXRTModel(config)
# Pre-training heads
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
if self.task_obj_predict:
self.obj_predict_head = BertVisualObjHead(config, visual_losses)
if self.task_qa:
self.answer_head = BertVisualAnswerHead(config, self.num_answers)
if args.get("use_tag_symbolic_embedding", False):
self.symbolic_head = deepcopy(self.cls)
# Weight initialization
self.apply(self.init_bert_weights)
def special_initialize_pretraining_head(self):
self.symbolic_head.predictions.decoder.weight = self.bert.embeddings.symbolic_embedding.weight
self.symbolic_head.predictions.bias = nn.Parameter(torch.zeros(self.symbolic_head.predictions.decoder.weight.size(0)))
def forward(self,
input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
visual_feats=None, pos=None, obj_labels=None,
matched_label=None, ans=None,
visual_tags=None, visual_tags_mask=None, visual_tags_box=None, visual_tags_type=None, visual_tags_objective=None, visual_tags_mismatch=None, visual_tags_segment_ids=None,
visual_feats_seg_ids=None,
return_cross_relationship_score = False
):
(lang_output, tags_output, visn_output), pooled_output = self.bert(
input_ids, token_type_ids, attention_mask,
visual_feats=(visual_feats, pos), visual_feats_seg_ids = visual_feats_seg_ids,
visual_tags=visual_tags, visual_tags_mask=visual_tags_mask, visual_tags_box=visual_tags_box, visual_tags_type = visual_tags_type, visual_tags_segment_ids = visual_tags_segment_ids
)
if input_ids is None:
answer_score = None
cross_relationship_score = None
else:
if args.get('lxmert_style_nlvr', False):
pooled_output = pooled_output.view(pooled_output.size(0) // 2, 2 * pooled_output.size(-1))
lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
if self.task_qa:
answer_score = self.answer_head(pooled_output)
else:
# This answer_score would not be used anywhere,
# just to keep a constant return function signature.
answer_score = pooled_output[0][0]
total_loss = 0.
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses = ()
losses_dict = {}
if masked_lm_labels is not None and self.task_mask_lm:
masked_lm_loss = loss_fct(
lang_prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1)
)
total_loss += masked_lm_loss
losses += (masked_lm_loss.detach(),)
if visual_feats is not None:
losses_dict["Masked LM"] = masked_lm_loss.detach()
else:
losses_dict["Text Only Masked LM"] = masked_lm_loss.detach()
if matched_label is not None and self.task_matched and cross_relationship_score is not None:
matched_loss = loss_fct(
cross_relationship_score.view(-1, 2),
matched_label.view(-1)
)
total_loss += matched_loss
losses += (matched_loss.detach(),)
losses_dict["Matches"] = matched_loss.detach()
if obj_labels is not None and self.task_obj_predict and not args.get("disable_visual_and_tag_objective", False):
loss_fcts = {
'l2': SmoothL1Loss(reduction='none'),
'ce': CrossEntropyLoss(ignore_index=-1, reduction='none'),
"kl": torch.nn.KLDivLoss(reduction = "batchmean")
}
total_visn_loss = 0.
visn_prediction_scores_dict = self.obj_predict_head(visn_output)
for key in self.args.visual_losses.split(","):
label, mask_conf = obj_labels[key]
if key == "attr" or key == "obj":
label = label.long()
elif key == "feat":
label = label.float()
else:
assert(0)
output_dim, loss_fct_name, label_shape, weight = VISUAL_CONFIG.visual_loss_config[key]
visn_loss_fct = loss_fcts[loss_fct_name]
visn_prediction_scores = visn_prediction_scores_dict[key]
visn_loss = visn_loss_fct(
visn_prediction_scores.view(-1, output_dim),
label.view(*label_shape),
)
if visn_loss.dim() > 1: # Regression Losses
visn_loss = visn_loss.mean(1)
visn_loss = (visn_loss * mask_conf.view(-1)).mean() * weight
total_visn_loss += visn_loss
losses += (visn_loss.detach(),)
losses_dict[key] = visn_loss.detach()
total_loss += total_visn_loss
if ans is not None and self.task_qa and input_ids is not None:
answer_loss = loss_fct(
answer_score.view(-1, self.num_answers),
ans.view(-1)
)
# Since this Github version pre-trains with QA loss from the beginning,
# I exclude "*2" here to match the effect of QA losses.
# Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
# Now : (loss *1) for 12 epochs
#
# * 2 # Multiply by 2 because > half of the data will not have label
total_loss += answer_loss
losses += (answer_loss.detach(),)
losses_dict["qa"] = answer_loss.detach()
if visual_tags_objective is not None and not args.get("disable_visual_and_tag_objective", False):
if args.get("use_bert_input_for_tags", False):
tags_output, _ = self.cls(tags_output, tags_output[:, 0], calculate_seq_score = False)
masked_tag_loss = loss_fct(
tags_output.view(-1, self.config.vocab_size),
visual_tags_objective.view(-1)
)
else:
tags_output, _ = self.symbolic_head(tags_output, tags_output[:, 0])
masked_tag_loss = loss_fct(
tags_output.view(-1, 2003),
visual_tags_objective.view(-1)
)
total_loss += masked_tag_loss
losses_dict["Masked Tags"] = masked_tag_loss.detach()
if visual_tags_mismatch is not None:
matched_loss = loss_fct(
cross_relationship_score.view(-1, 2),
visual_tags_mismatch.view(-1)
)
total_loss += matched_loss
losses += (matched_loss.detach(),)
losses_dict["Tag mismatch"] = matched_loss.detach()
if answer_score is None:
return total_loss, torch.stack(losses).unsqueeze(0), answer_score, losses_dict
return total_loss, torch.stack(losses).unsqueeze(0) if len(losses) is not None else (), answer_score.detach(), losses_dict
class LXRTFeatureExtraction(BertPreTrainedModel):
"""
BERT model for classification.
"""
def __init__(self, config, mode='lxr'):
"""
:param config:
:param mode: Number of visual layers
"""
super().__init__(config)
self.config = config
self.bert = LXRTModel(config)
self.mode = mode
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, visual_feats=None,
visual_attention_mask=None, return_both = False, visual_feats_seg_ids = None):
feat_seq, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
visual_feats=visual_feats,
visual_attention_mask=visual_attention_mask,
visual_feats_seg_ids = visual_feats_seg_ids)
if return_both:
return feat_seq, pooled_output
if 'x' == self.mode:
return pooled_output
elif 'x' in self.mode and ('l' in self.mode or 'r' in self.mode):
return feat_seq, pooled_output
elif 'l' in self.mode or 'r' in self.mode:
return feat_seq | 69,048 | 45.124916 | 308 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/h5_data.py | import h5py
from copy import deepcopy
import numpy as np
import json
from torch.utils.data import Dataset
import torch
import random
from param import args
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import gc
from src.tools import sharearray
import os
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
all_ = []
for i in range(0, len(lst), n):
data_index = lst[i:i + n]
if len(data_index) == n: # Do not do any incomplete batch
all_.append(data_index)
return all_
class CustomBatchSampler():
# We upsample certain datasets
def __init__(self, datasets, batch_size, upsample_ratios = [1, 1, 1], reduce_to_non_batch_sampler = False):
self.datasets = datasets
self.batch_size = batch_size
self.lengths = [len(i) for i in self.datasets]
self.upsample_ratios = upsample_ratios
self.rotate_index = [0] * len(self.upsample_ratios)
self.reduce_to_non_batch_sampler = reduce_to_non_batch_sampler
_flag = False
for i in self.upsample_ratios:
if i < 1:
_flag = True
self.all_indexes = [torch.randperm(i).tolist() for i in self.lengths]
assert(not args.get("old_sampler", False))
if args.get("gradient_accumulation_steps", None):
self.batch_size = batch_size * args.gradient_accumulation_steps
self.prepare_indexes()
def prepare_indexes(self):
self.all_batched_indexes = []
current_index = 0
for index, i in enumerate(self.lengths):
#if args.get("debug", False):
# random_indexes = list(range(i))
#else:
tmp_indexes = []
if self.upsample_ratios[index] < 1:
sample_num = int(1 / self.upsample_ratios[index])
random_indexes = self.all_indexes[index][self.rotate_index[index]:][::sample_num]
self.rotate_index[index] = self.rotate_index[index] + 1 #% sample_num
if self.rotate_index[index] == sample_num:
self.all_indexes[index] = torch.randperm(i).tolist()
self.rotate_index[index] = 0 # Reset rotate index
random.shuffle(random_indexes)
random_indexes = [j + current_index for j in random_indexes]
random_indexes = chunks(random_indexes, self.batch_size)
#self.all_batched_indexes.extend(random_indexes)
else:
random_indexes = torch.randperm(i).tolist()
random_indexes = [j + current_index for j in random_indexes]
random_indexes = chunks(random_indexes, self.batch_size)
#self.all_batched_indexes.extend(random_indexes)
random.shuffle(random_indexes)
self.all_batched_indexes.append(random_indexes)
if self.upsample_ratios[index] > 1:
for k in range(self.upsample_ratios[index] - 1):
#if args.get("debug", False):
# random_indexes = list(range(i))
#else:
random_indexes = torch.randperm(i).tolist()
random_indexes = [j + current_index for j in random_indexes]
random_indexes = chunks(random_indexes, self.batch_size)
#self.all_batched_indexes.extend(random_indexes)
random.shuffle(random_indexes)
self.all_batched_indexes[index].extend(random_indexes)
current_index += i
all_flatterned_indexes = []
original_recorder = [len(i) for i in self.all_batched_indexes]
original_recorder = [i / sum(original_recorder) for i in original_recorder]
index_recorder = np.array([len(i) - 1 for i in self.all_batched_indexes])
while np.any(index_recorder >= 0):
choosed_index = np.random.choice(len(original_recorder), p=original_recorder)
if index_recorder[choosed_index] >= 0:
all_flatterned_indexes.append(self.all_batched_indexes[choosed_index][index_recorder[choosed_index]])
index_recorder[choosed_index] -= 1
self.all_batched_indexes = all_flatterned_indexes
if self.reduce_to_non_batch_sampler:
new_ = []
for i in self.all_batched_indexes:
for j in i:
new_.append([j])
self.all_batched_indexes = new_
if args.get("gradient_accumulation_steps", None):
flattened_indexes = []
for indexes in self.all_batched_indexes:
flattened_indexes.extend(indexes)
self.all_batched_indexes = chunks(flattened_indexes, self.batch_size // args.gradient_accumulation_steps)
return current_index
def __iter__(self):
self.prepare_indexes()
return iter(self.all_batched_indexes)
def __len__(self):
return len(self.all_batched_indexes)
class ConcateDataset(Dataset):
def __init__(self, datasets):
self.datasets = datasets
def __getitem__(self, index):
#return self.datasets[1][0]
#
len_of_datasets = [len(i) for i in self.datasets]
for i in range(0, len(len_of_datasets)):
'''if i == len(self.len_of_datasets) - 1 and index >= self.len_of_datasets[i]:
index = index % self.len_of_datasets[i]'''
if index < len_of_datasets[i]:
return self.datasets[i][index]
else:
index -= len_of_datasets[i]
def __len__(self):
return sum([len(i) for i in self.datasets])
class ConcateH5():
def __init__(self, list_of_h5):
self.list_of_h5 = list_of_h5
self.len_of_h5 = [len(i) for i in list_of_h5]
self.current_copy_index = None
self.current_copy = None
def __getitem__(self, index):
for i in range(0, len(self.len_of_h5)):
if index < self.len_of_h5[i]:
return self.list_of_h5[i][index]
else:
index -= self.len_of_h5[i]
def __len__(self):
return sum(self.len_of_h5)
class ImageFeatureDataset():
def __init__(self, h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, h5_wh, ids_to_index, h5_num_boxes = None, version_3 = False):
self.h5_features = h5_features
self.h5_boxes = h5_boxes
self.h5_objects_id = h5_objects_id
self.h5_objects_conf = h5_objects_conf
self.h5_attrs_id = h5_attrs_id
self.h5_attrs_conf = h5_attrs_conf
self.h5_wh = h5_wh
self.ids_to_index = ids_to_index
self.h5_num_boxes = h5_num_boxes
self.all_indexes = None
self.version_3 = version_3
def __getitem__(self, img_id):
image_index = self.ids_to_index[img_id]
if self.h5_num_boxes is not None:
obj_num = self.h5_num_boxes[image_index]
else:
obj_num = 36
feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0]
# For VCR, we did not keep the labels rather we kept the confidence
if self.version_3:
obj_confs = np.array(self.h5_objects_conf[image_index][:, 1:])
attr_confs = np.array(self.h5_attrs_conf[image_index][:, 1:])
obj_labels = np.argmax(obj_confs, axis=1)
attr_labels = np.argmax(attr_confs, axis=1)
obj_confs = np.max(obj_confs, axis=1)
attr_confs = np.max(attr_confs, axis = 1)
else:
obj_labels = self.h5_objects_id[image_index]
obj_confs = self.h5_objects_conf[image_index]
attr_labels = self.h5_attrs_id[image_index]
attr_confs = self.h5_attrs_conf[image_index]
return image_index, obj_num, feats, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs
def get_everything_except_features(self, img_id):
image_index = self.ids_to_index[img_id]
obj_num = 36
#feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0]
obj_labels = self.h5_objects_id[image_index]
obj_confs = self.h5_objects_conf[image_index]
attr_labels = self.h5_attrs_id[image_index]
attr_confs = self.h5_attrs_conf[image_index]
return image_index, obj_num, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs
@classmethod
def create(cls,
sources,
Split2ImgFeatPath_h5,
load_custom_h5_version2=False, load_custom_h5_version3=False,
text_only = False, on_memory=False):
current_counter = 0
ids_to_index = {}
h5_features_list = []
h5_boxes_list = []
h5_objects_id_list = []
h5_objects_conf_list = []
h5_attrs_id_list = []
h5_attrs_conf_list = []
h5_wh_list = []
h5_num_boxes_list = []
for split in sources:
if load_custom_h5_version2:
h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes = cls.load_custom_h5_version2(Split2ImgFeatPath_h5[split], text_only = text_only, on_memory = on_memory)
elif load_custom_h5_version3:
h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes = cls.load_custom_h5_version3(Split2ImgFeatPath_h5[split], on_memory = on_memory)
else:
h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf = cls.load_custom_h5(Split2ImgFeatPath_h5[split], on_memory=on_memory, text_only=text_only)
h5_num_boxes = [36] * len(h5_features)
print(Split2ImgFeatPath_h5[split], len(h5_boxes))
h5_features_list.append(h5_features)
h5_boxes_list.append(h5_boxes)
h5_objects_id_list.append(h5_objects_id)
h5_objects_conf_list.append(h5_objects_conf)
h5_attrs_id_list.append(h5_attrs_id)
h5_attrs_conf_list.append(h5_attrs_conf)
h5_num_boxes_list.append(h5_num_boxes)
if load_custom_h5_version2 or load_custom_h5_version3:
with open(Split2ImgFeatPath_h5[split].replace("h5", "txt").replace('no_features', "image_ids"), "r") as f:
image_ids = f.readlines()
for index, i in enumerate(image_ids):
# we will skip images with no boxes, might need some sanity check
if h5_num_boxes[index] == 0:
continue
ids_to_index[i.replace("\n", "")] = index + current_counter
current_counter += len(image_ids)
else:
with open(Split2ImgFeatPath_h5[split].replace("h5", "json"), "r") as f:
metadata = json.load(f)
wh_list = []
for index, i in enumerate(metadata):
ids_to_index[i["img_id"]] = index + current_counter
wh_list.append((i['img_w'], i['img_h']))
current_counter += len(metadata)
h5_wh_list.append(wh_list)
print("Created {}".format(sources))
h5_features = ConcateH5(h5_features_list)
h5_boxes = ConcateH5(h5_boxes_list)
h5_objects_id = ConcateH5(h5_objects_id_list)
h5_objects_conf = ConcateH5(h5_objects_conf_list)
h5_attrs_id = ConcateH5(h5_attrs_id_list)
h5_attrs_conf = ConcateH5(h5_attrs_conf_list)
h5_wh = ConcateH5(h5_wh_list)
h5_num_boxes_list = ConcateH5(h5_num_boxes_list)
return cls(h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, h5_wh, ids_to_index, h5_num_boxes = h5_num_boxes_list, version_3 = load_custom_h5_version3)
@staticmethod
def load_custom_h5(h5_file_name, on_memory=False, text_only = False):
h5_file = h5py.File(h5_file_name, "r")
if on_memory:
print("Reading h5 {}".format(h5_file))
h5_features = sharearray.cache(h5_file_name.split("/")[-1], lambda: h5_file['features'])
gc.collect()
else:
h5_features = h5_file['features']
h5_boxes = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "boxes"), np.array(h5_file['boxes']))
h5_objects_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "objects_id"), np.array(h5_file['objects_id']))
h5_objects_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "objects_conf"), np.array(h5_file['objects_conf']))
h5_attrs_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attrs_id"), np.array(h5_file['attrs_id']))
h5_attrs_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attrs_conf"), np.array(h5_file['attrs_conf']))
for index in range(len(h5_attrs_id)):
assert( np.all(h5_attrs_id[index] == np.array(h5_file['attrs_id'][index])))
if on_memory:
h5_file.close()
del h5_file
gc.collect()
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf
@staticmethod
def load_custom_h5_version2(h5_file_name, on_memory=False, text_only=False): # This version used in the conceptual caption
if not text_only:
h5_file_feature = h5py.File(h5_file_name.replace("no_features", "features"), "r")
h5_file = h5py.File(h5_file_name, "r")
if on_memory:
print("Reading h5 {}".format(h5_file_name.replace("no_features", "features")))
h5_features = sharearray.cache(h5_file_name.replace("no_features", "features").split("/")[-1], lambda: h5_file_feature['image_features'])
gc.collect()
else:
if not text_only:
h5_features = h5_file_feature['image_features']
h5_boxes = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "boxes"), lambda: h5_file['boxes'])
h5_num_boxes = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "num_boxes"), lambda: h5_file['num_boxes'])
if not args.get("kl_divergence", False):
h5_objects_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "object_ids"), lambda: np.array(h5_file['object_ids'])[:, :, 0]) #deepcopy(np.array(h5_file['object_ids'])[:, :, 0])
h5_objects_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "object_pro"), lambda: np.array(h5_file['object_pro'])[:, :, 0]) #deepcopy(np.array(h5_file['object_pro'])[:, :, 0])
h5_attrs_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attribute_ids"), lambda: np.array(h5_file['attribute_ids'])[:, :, 0]) #deepcopy(np.array(h5_file['attribute_ids'])[:, :, 0])
h5_attrs_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attribute_pro"), lambda: np.array(h5_file['attribute_pro'])[:, :, 0]) #deepcopy(np.array(h5_file['attribute_pro'])[:, :, 0])
else:
h5_objects_id = deepcopy(np.array(h5_file['object_ids']))
h5_objects_conf = deepcopy(np.array(h5_file['object_pro']))
h5_attrs_id = deepcopy(np.array(h5_file['attribute_ids']))
h5_attrs_conf = deepcopy(np.array(h5_file['attribute_pro']))
gc.collect()
img_h = deepcopy(np.array(h5_file['img_h'])).tolist()
img_w = deepcopy(np.array(h5_file['img_w'])).tolist()
wh_list = []
for i in range(len(img_h)):
wh_list.append((img_w[i], img_h[i]))
h5_file.close()
del h5_file
gc.collect()
if text_only:
h5_features = [0] * len(h5_num_boxes)
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes
@staticmethod
def load_custom_h5_version3(h5_file_name, on_memory=False, keep_top_1=True): # This version used in the conceptual caption
h5_file_feature = h5py.File(h5_file_name.replace("no_features", "features"), "r")
h5_file = h5py.File(h5_file_name, "r")
if on_memory:
print("Reading h5 {}".format(h5_file_name.replace("no_features", "features")))
h5_features = sharearray.cache(h5_file_name.replace("no_features", "features").split("/")[-1], lambda: h5_file_feature['image_features'])
gc.collect()
else:
h5_features = h5_file_feature['image_features']
h5_boxes = deepcopy(np.array(h5_file['boxes']))
h5_num_boxes = deepcopy(np.array(h5_file['num_boxes']))
h5_objects_conf = h5_file['object_pro']
h5_attrs_conf = h5_file['attribute_pro']
img_h = deepcopy(np.array(h5_file['img_h'])).tolist()
img_w = deepcopy(np.array(h5_file['img_w'])).tolist()
wh_list = []
for i in range(len(img_h)):
wh_list.append((img_w[i], img_h[i]))
h5_objects_id = np.zeros(len(wh_list)) # Place holder
h5_attrs_id = np.zeros(len(wh_list)) # Place holder
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes
| 17,660 | 44.518041 | 225 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,209 | 32.104839 | 112 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa_data.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import json
import os
import pickle
import numpy as np
import torch
from torch.utils.data import Dataset
import h5py
from copy import deepcopy
from param import args
from utils import load_obj_tsv
from pretrain.tag_data_utilis import create_tags
from lxrt.tokenization import BertTokenizer
from lxrt.h5_data import ImageFeatureDataset
# Load part of the dataset for fast checking.
# Notice that here is the number of images instead of the number of data,
# which means all related data to the images would be used.
TINY_IMG_NUM = 512
FAST_IMG_NUM = 5000
# The path to data and image features.
VQA_DATA_ROOT = 'data/vqa/'
MSCOCO_IMGFEAT_ROOT = 'data/mscoco_imgfeat/'
SPLIT2NAME = {
'train': 'train2014',
'valid': 'val2014',
'minival': 'val2014',
'nominival': 'val2014',
'test': 'test2015',
}
Split2ImgFeatPath = {
'train': 'data/mscoco_imgfeat/train2014_obj36.h5',
'valid': 'data/mscoco_imgfeat/val2014_obj36.h5',
'minival': 'data/mscoco_imgfeat/val2014_obj36.h5',
'nominival': 'data/mscoco_imgfeat/val2014_obj36.h5',
"test": 'data/mscoco_imgfeat/test2015_obj36.h5',
}
class VQADataset:
"""
A VQA data example in json file:
{
"answer_type": "other",
"img_id": "COCO_train2014_000000458752",
"label": {
"net": 1
},
"question_id": 458752000,
"question_type": "what is this",
"sent": "What is this photo taken looking through?"
}
"""
def __init__(self, splits: str):
self.name = splits
self.splits = splits.split(',')
# Loading datasets
self.data = []
for split in self.splits:
self.data.extend(json.load(open("data/vqa/%s.json" % split)))
print("Load %d data from split(s) %s." % (len(self.data), self.name))
# Convert list to dict (for evaluation)
self.id2datum = {
datum['question_id']: datum
for datum in self.data
}
# Answers
self.ans2label = json.load(open("data/vqa/trainval_ans2label.json"))
self.label2ans = json.load(open("data/vqa/trainval_label2ans.json"))
assert len(self.ans2label) == len(self.label2ans)
@property
def num_answers(self):
return len(self.ans2label)
def __len__(self):
return len(self.data)
class ConcateH5():
def __init__(self, list_of_h5):
self.list_of_h5 = list_of_h5
self.len_of_h5 = [len(i) for i in list_of_h5]
def __getitem__(self, index):
for i in range(0, len(self.len_of_h5)):
if index < self.len_of_h5[i]:
return self.list_of_h5[i][index]
else:
index -= self.len_of_h5[i]
def __len__(self):
return sum(self.len_of_h5)
"""
An example in obj36 tsv:
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
FIELDNAMES would be keys in the dict returned by load_obj_tsv.
"""
mapping_rawdataset_name_to_json = {
"train": "train",
"nominival": "val",
"minival": "val"
}
class VQATorchDataset(Dataset):
def __init__(self, dataset: VQADataset, args):
super().__init__()
self.raw_dataset = dataset
if args.tiny:
topk = TINY_IMG_NUM
elif args.fast:
topk = FAST_IMG_NUM
else:
topk = None
self.limit_to_symbolic_split = args.get("limit_to_symbolic_split", False)
if self.limit_to_symbolic_split:
dataDir = "/local/harold/ubert/bottom-up-attention/data/vg/"
coco_ids = set()
self.mapping_cocoid_to_imageid = {}
with open(os.path.join(dataDir, 'image_data.json')) as f:
metadata = json.load(f)
for item in metadata:
if item['coco_id']:
coco_ids.add(int(item['coco_id']))
self.mapping_cocoid_to_imageid[int(item['coco_id'])] = item["image_id"]
from lib.data.vg_gqa import vg_gqa
self.vg_gqa = vg_gqa(None, split = "val" if self.raw_dataset.name == "minival" else "train", transforms=None, num_im=-1)
self.custom_coco_data = args.get("custom_coco_data", False)
self.use_h5_file = args.get("use_h5_file", False)
if self.use_h5_file:
self.image_feature_dataset = ImageFeatureDataset.create(dataset.splits, Split2ImgFeatPath, on_memory = args.get("on_memory", False))
self.ids_to_index = self.image_feature_dataset.ids_to_index
# Screen data
used_data = []
for datum in self.raw_dataset.data:
if datum['img_id'] in self.ids_to_index:
used_data.append(datum)
else:
# Loading detection features to img_data
img_data = []
for split in dataset.splits:
# Minival is 5K images in MS COCO, which is used in evaluating VQA/LXMERT-pre-training.
# It is saved as the top 5K features in val2014_***.tsv
load_topk = 5000 if (split == 'minival' and topk is None) else topk
img_data.extend(load_obj_tsv(
os.path.join(MSCOCO_IMGFEAT_ROOT, '%s_obj36.tsv' % (SPLIT2NAME[split])),
topk=load_topk))
# Convert img list to dict
self.imgid2img = {}
for img_datum in img_data:
self.imgid2img[img_datum['img_id']] = img_datum
used_data = self.raw_dataset.data
used_data = used_data[::args.get("partial_dataset", 1)]
self.data = used_data
# Only kept the data with loaded image features
print("Use %d data in torch dataset" % (len(self.data)))
print()
if args.get("add_tags", False):
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
from lxrt.symbolic_vocabulary import SymbolicVocab
self.symbolic_vocab = SymbolicVocab(args.objects_vocab, args.attributes_vocab)
def load_custom_h5(self, h5_file):
h5_features = h5_file['features']
h5_boxes = deepcopy(np.array(h5_file['boxes']))
h5_objects_id = deepcopy(np.array(h5_file['objects_id']))
h5_objects_conf = deepcopy(np.array(h5_file['objects_conf']))
h5_attrs_id = deepcopy(np.array(h5_file['attrs_id']))
h5_attrs_conf = deepcopy(np.array(h5_file['attrs_conf']))
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf
def __len__(self):
return len(self.data)
def __getitem__(self, item: int):
datum = self.data[item]
img_id = datum['img_id']
ques_id = datum['question_id']
ques = datum['sent']
if self.custom_coco_data:
image_index = self.ids_to_index[img_id]
obj_num = None
feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0]
obj_confs = None
attr_labels = None
attr_confs = None
elif self.use_h5_file:
'''image_index = self.ids_to_index[img_id]
obj_num = 36
feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0] '''
image_index, obj_num, feats, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs = self.image_feature_dataset[img_id]
else:
# Get image info
img_info = self.imgid2img[img_id]
obj_num = img_info['num_boxes']
feats = img_info['features'].copy()
boxes = img_info['boxes'].copy()
assert obj_num == len(boxes) == len(feats)
img_h, img_w = img_info['img_h'], img_info['img_w']
# Normalize the boxes (to 0 ~ 1)
boxes = boxes.copy()
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
if args.get("add_tags", False):
tags = create_tags(obj_labels=obj_labels, attr_labels=attr_labels, obj_confs=None, attr_confs=None, tokenizer=self.tokenizer, symbolic_vocab = self.symbolic_vocab, visual_tags_box = boxes, use_bert_input=True)
else:
tags = None
# Provide label (target)
if 'label' in datum:
label = datum['label']
target = torch.zeros(self.raw_dataset.num_answers)
for ans, score in label.items():
target[self.raw_dataset.ans2label[ans]] = score
return ques_id, feats, boxes, ques, tags, target
else:
return ques_id, feats, boxes, ques, tags
class VQAEvaluator:
def __init__(self, dataset: VQADataset):
self.dataset = dataset
def evaluate(self, quesid2ans: dict):
score = 0.
for quesid, ans in quesid2ans.items():
datum = self.dataset.id2datum[quesid]
label = datum['label']
if ans in label:
score += label[ans]
return score / len(quesid2ans)
def dump_result(self, quesid2ans: dict, path):
"""
Dump results to a json file, which could be submitted to the VQA online evaluation.
VQA json file submission requirement:
results = [result]
result = {
"question_id": int,
"answer": str
}
:param quesid2ans: dict of quesid --> ans
:param path: The desired path of saved file.
"""
with open(path, 'w') as f:
result = []
for ques_id, ans in quesid2ans.items():
result.append({
'question_id': ques_id,
'answer': ans
})
json.dump(result, f, indent=4, sort_keys=True)
| 10,280 | 34.329897 | 221 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa_model.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from param import args
from lxrt.entry import LXRTEncoder, convert_sents_to_features_tensors, convert_tags_to_tensorts, pad_np_arrays
from lxrt.modeling import BertLayerNorm, GeLU
from lxrt.tokenization import BertTokenizer
import numpy as np
# Max length including <bos> and <eos>
MAX_VQA_LENGTH = 20
class VQAModel(nn.Module):
def __init__(self, num_answers):
super().__init__()
# Build LXRT encoder
self.lxrt_encoder = LXRTEncoder(
args,
max_seq_length=MAX_VQA_LENGTH
)
hid_dim = self.lxrt_encoder.dim
# VQA Answer heads
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_answers)
)
self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
def multi_gpu(self):
self.lxrt_encoder.model.module.bert = nn.DataParallel(self.lxrt_encoder.model.module.bert)
def forward(self, feat, pos, sent, tags):
"""
b -- batch_size, o -- object_number, f -- visual_feature_size
:param feat: (b, o, f)
:param pos: (b, o, 4)
:param sent: (b,) Type -- list of string
:param leng: (b,) Type -- int numpy array
:return: (b, num_answer) The logit of each answers.
"""
#x = self.lxrt_encoder(sent, (feat, pos))
input_ids, input_mask, segment_ids = convert_sents_to_features_tensors(sent, max_seq_length = MAX_VQA_LENGTH, tokenizer=self.tokenizer)
visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids = convert_tags_to_tensorts(tags)
feat = pad_np_arrays(feat, padding_value=0, dtype=np.float32)
pos = pad_np_arrays(pos, padding_value=0, dtype=np.float32)
stuff, pooled_output = self.lxrt_encoder.model.module.bert(
input_ids, segment_ids, input_mask,
visual_feats=(feat, pos),
visual_attention_mask=None,
visual_feats_seg_ids=None,
visual_tags=visual_tags, visual_tags_mask=visual_tags_mask, visual_tags_box=visual_tags_box, visual_tags_type=visual_tags_type, visual_tags_segment_ids=visual_tags_segment_ids,
)
logit = self.logit_fc(pooled_output)
return logit
| 2,612 | 34.310811 | 192 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import os
import collections
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import h5py
import pandas as pd
from param import args
from pretrain.qa_answer_table import load_lxmert_qa, load_lxmert_from_sgg_and_lxmert_pretrain, load_lxmert_from_pretrain_noqa
from tasks.vqa_model import VQAModel
from tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator
from utils import load_lxmert_sgg
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
def get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = VQADataset(splits)
tset = VQATorchDataset(dset, args)
evaluator = VQAEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True,
collate_fn=lambda x: x
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
class VQA:
def __init__(self):
# Datasets
self.train_tuple = get_data_tuple(
args.train, bs=args.batch_size, shuffle=True, drop_last=True
)
if args.valid != "":
valid_bsize = args.get("valid_batch_size", 16)
self.valid_tuple = get_data_tuple(
args.valid, bs=valid_bsize,
shuffle=False, drop_last=False
)
else:
self.valid_tuple = None
# Model
self.model = VQAModel(self.train_tuple.dataset.num_answers)
# Load pre-trained weights
if args.load_lxmert is not None:
self.model.lxrt_encoder.load(args.load_lxmert)
if args.get("load_lxmert_pretrain", None) is not None:
load_lxmert_from_pretrain_noqa(args.load_lxmert_pretrain, self.model)
if args.load_lxmert_qa is not None:
load_lxmert_qa(args.load_lxmert_qa, self.model,
label2ans=self.train_tuple.dataset.label2ans)
# GPU options
self.model = self.model.cuda()
if args.multiGPU:
self.model.lxrt_encoder.multi_gpu()
self.model.multi_gpu()
# Loss and Optimizer
self.bce_loss = nn.BCEWithLogitsLoss()
if 'bert' in args.optim:
batch_per_epoch = len(self.train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
print("BertAdam Total Iters: %d" % t_total)
from lxrt.optimization import BertAdam
self.optim = BertAdam(list(self.model.parameters()),
lr=args.lr,
warmup=0.1,
t_total=t_total)
else:
self.optim = args.optimizer(self.model.parameters(), args.lr)
# Output Directory
self.output = args.output
os.makedirs(self.output, exist_ok=True)
def train(self, train_tuple, eval_tuple):
dset, loader, evaluator = train_tuple
iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)
best_valid = 0.
train_results = []
report_every = args.get("report_every", 100)
for epoch in range(args.epochs):
quesid2ans = {}
for i, batch in iter_wrapper(enumerate(loader)):
ques_id, feats, boxes, sent, tags, target = zip(*batch)
self.model.train()
self.optim.zero_grad()
target = torch.stack(target).cuda()
logit = self.model(feats, boxes, sent, tags)
assert logit.dim() == target.dim() == 2
loss = self.bce_loss(logit, target)
loss = loss * logit.size(1)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 5.)
self.optim.step()
train_results.append(pd.Series({"loss":loss.detach().mean().item()}))
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid] = ans
if i % report_every == 0 and i > 0:
print("Epoch: {}, Iter: {}/{}".format(epoch, i, len(loader)))
print(" {}\n~~~~~~~~~~~~~~~~~~\n".format(pd.DataFrame(train_results[-report_every:]).mean()))
log_str = "\nEpoch %d: Train %0.2f\n" % (epoch, evaluator.evaluate(quesid2ans) * 100.)
if self.valid_tuple is not None: # Do Validation
valid_score = self.evaluate(eval_tuple)
if valid_score > best_valid and not args.get("special_test", False):
best_valid = valid_score
self.save("BEST")
log_str += "Epoch %d: Valid %0.2f\n" % (epoch, valid_score * 100.) + \
"Epoch %d: Best %0.2f\n" % (epoch, best_valid * 100.)
if epoch >= 5:
self.save("Epoch{}".format(epoch))
print(log_str, end='')
print(args.output)
self.save("LAST")
def predict(self, eval_tuple: DataTuple, dump=None):
"""
Predict the answers to questions in a data split.
:param eval_tuple: The data tuple to be evaluated.
:param dump: The path of saved file to dump results.
:return: A dict of question_id to answer.
"""
self.model.eval()
dset, loader, evaluator = eval_tuple
quesid2ans = {}
for i, batch in enumerate(tqdm(loader)):
_ = list(zip(*batch))
ques_id, feats, boxes, sent, tags = _[:5]#, target = zip(*batch)
with torch.no_grad():
#target = torch.stack(target).cuda()
logit = self.model(feats, boxes, sent, tags)
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid] = ans
if dump is not None:
evaluator.dump_result(quesid2ans, dump)
return quesid2ans
def evaluate(self, eval_tuple: DataTuple, dump=None):
"""Evaluate all data in data_tuple."""
quesid2ans = self.predict(eval_tuple, dump)
return eval_tuple.evaluator.evaluate(quesid2ans)
@staticmethod
def oracle_score(data_tuple):
dset, loader, evaluator = data_tuple
quesid2ans = {}
for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):
_, label = target.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
return evaluator.evaluate(quesid2ans)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(self.output, "%s.pth" % name))
def load(self, path):
print("Load model from %s" % path)
state_dict = torch.load("%s.pth" % path)
self.model.load_state_dict(state_dict)
if __name__ == "__main__":
# Build Class
vqa = VQA()
# Load VQA model weights
# Note: It is different from loading LXMERT pre-trained weights.
if args.load is not None:
vqa.load(args.load)
# Test or Train
if args.test is not None:
args.fast = args.tiny = False # Always loading all data in test
if 'test' in args.test:
vqa.predict(
get_data_tuple(args.test, bs=64,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'test_predict.json')
)
elif 'val' in args.test:
# Since part of valididation data are used in pre-training/fine-tuning,
# only validate on the minival set.
result = vqa.evaluate(
get_data_tuple('minival', bs=64,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'minival_predict.json')
)
print(result)
else:
assert False, "No such test option for %s" % args.test
else:
print('Splits in Train data:', vqa.train_tuple.dataset.splits)
if vqa.valid_tuple is not None:
print('Splits in Valid data:', vqa.valid_tuple.dataset.splits)
#print("Valid Oracle: %0.2f" % (vqa.oracle_score(vqa.valid_tuple) * 100))
else:
print("DO NOT USE VALIDATION")
vqa.train(vqa.train_tuple, vqa.valid_tuple)
| 8,707 | 36.86087 | 125 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/vg_gqa_imgfeat/extract_gqa_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect, _get_blobs
from fast_rcnn.nms_wrapper import nms
import caffe
import argparse
import pprint
import base64
import numpy as np
import cv2
import csv
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 36
MAX_BOXES = 36
def load_image_ids(img_root):
pathXid = []
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
return pathXid
def generate_tsv(prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
# never use set, it loses the order!!! F***
wanted_ids = set([image_id[1] for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
found_ids.add(item['img_id'])
missing = wanted_ids - found_ids
if len(missing) == 0:
print('already completed {:d}'.format(len(image_ids)))
else:
print('missing {:d}/{:d}'.format(len(missing), len(image_ids)))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for im_file, image_id in tqdm(image_ids):
if image_id in missing:
try:
writer.writerow(get_detections_from_im(net, im_file, image_id))
except Exception as e:
print(e)
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
"""
:param net:
:param im_file: full path to an image
:param image_id:
:param conf_thresh:
:return: all information from detection and attr prediction
"""
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes][:, 1:], axis=1)
objects_conf = np.max(cls_prob[keep_boxes][:, 1:], axis=1)
attrs = np.argmax(attr_prob[keep_boxes][:, 1:], axis=1)
attrs_conf = np.max(attr_prob[keep_boxes][:, 1:], axis=1)
return {
"img_id": image_id,
"img_h": np.size(im, 0),
"img_w": np.size(im, 1),
"objects_id": base64.b64encode(objects), # int64
"objects_conf": base64.b64encode(objects_conf), # float32
"attrs_id": base64.b64encode(attrs), # int64
"attrs_conf": base64.b64encode(attrs_conf), # float32
"num_boxes": len(keep_boxes),
"boxes": base64.b64encode(cls_boxes[keep_boxes]), # float32
"features": base64.b64encode(pool5[keep_boxes]) # float32
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--imgroot', type=str, default='/workspace/images/')
parser.add_argument('--split', type=str, default='valid')
parser.add_argument('--caffemodel', type=str, default='./resnet101_faster_rcnn_final_iter_320000.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Setup the configuration, normally do not need to touch these:
args = parse_args()
args.cfg_file = BUTD_ROOT + "experiments/cfgs/faster_rcnn_end2end_resnet.yml" # s = 500
args.prototxt = BUTD_ROOT + "models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt"
args.outfile = "%s_obj36.tsv" % "vg_gqa"
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
# Load image ids, need modification for new datasets.
image_ids = load_image_ids(args.imgroot)
# Generate TSV files, noramlly do not need to modify
generate_tsv(args.prototxt, args.caffemodel, image_ids, args.outfile)
| 6,511 | 35.58427 | 113 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2_imgfeat/extract_nlvr2_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
# SPLIT to its folder name under IMG_ROOT
SPLIT2DIR = {
'train': 'train',
'valid': 'dev',
'test': 'test1',
'hidden': 'test2', # Please correct whether it is test2
}
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect, _get_blobs
from fast_rcnn.nms_wrapper import nms
import caffe
import argparse
import pprint
import base64
import numpy as np
import cv2
import csv
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 36
MAX_BOXES = 36
def load_image_ids(img_root, split_dir):
"""images in the same directory are in the same sequential region,
but with no internal ordering"""
pathXid = []
if split_dir == 'train':
img_root = os.path.join(img_root, split_dir)
for d in os.listdir(img_root):
dir_path = os.path.join(img_root, d)
for name in os.listdir(dir_path):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(dir_path, name),
idx))
else:
img_root = os.path.join(img_root, split_dir)
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
return pathXid
def generate_tsv(prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
# never use set, it loses the order!!! F***
wanted_ids = set([image_id[1] for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
found_ids.add(item['img_id'])
missing = wanted_ids - found_ids
if len(missing) == 0:
print('already completed {:d}'.format(len(image_ids)))
else:
print('missing {:d}/{:d}'.format(len(missing), len(image_ids)))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for im_file, image_id in tqdm(image_ids):
if image_id in missing:
try:
writer.writerow(get_detections_from_im(net, im_file, image_id))
except Exception as e:
print(e)
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
"""
:param net:
:param im_file: full path to an image
:param image_id:
:param conf_thresh:
:return: all information from detection and attr prediction
"""
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes][:, 1:], axis=1)
objects_conf = np.max(cls_prob[keep_boxes][:, 1:], axis=1)
attrs = np.argmax(attr_prob[keep_boxes][:, 1:], axis=1)
attrs_conf = np.max(attr_prob[keep_boxes][:, 1:], axis=1)
return {
"img_id": image_id,
"img_h": np.size(im, 0),
"img_w": np.size(im, 1),
"objects_id": base64.b64encode(objects), # int64
"objects_conf": base64.b64encode(objects_conf), # float32
"attrs_id": base64.b64encode(attrs), # int64
"attrs_conf": base64.b64encode(attrs_conf), # float32
"num_boxes": len(keep_boxes),
"boxes": base64.b64encode(cls_boxes[keep_boxes]), # float32
"features": base64.b64encode(pool5[keep_boxes]) # float32
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--imgroot', type=str, default='/workspace/images/')
parser.add_argument('--split', type=str, default='valid')
parser.add_argument('--caffemodel', type=str, default='./resnet101_faster_rcnn_final_iter_320000.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Setup the configuration, normally do not need to touch these:
args = parse_args()
args.cfg_file = BUTD_ROOT + "experiments/cfgs/faster_rcnn_end2end_resnet.yml" # s = 500
args.prototxt = BUTD_ROOT + "models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt"
args.outfile = "%s_obj36.tsv" % args.split
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
# Load image ids, need modification for new datasets.
image_ids = load_image_ids(args.imgroot, SPLIT2DIR[args.split])
# Generate TSV files, noramlly do not need to modify
generate_tsv(args.prototxt, args.caffemodel, image_ids, args.outfile)
| 7,358 | 35.430693 | 113 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/mscoco_imgfeat/extract_coco_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
# SPLIT to its folder name under IMG_ROOT
SPLIT2DIR = {
'train': 'train2014',
'valid': 'val2014',
'test': 'test2015',
}
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect, _get_blobs
from fast_rcnn.nms_wrapper import nms
import caffe
import argparse
import pprint
import base64
import numpy as np
import cv2
import csv
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 36
MAX_BOXES = 36
def load_image_ids(img_root, split_dir):
"""images in the same directory are in the same split"""
pathXid = []
img_root = os.path.join(img_root, split_dir)
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
return pathXid
def generate_tsv(prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
# never use set, it loses the order!!! F***
wanted_ids = set([image_id[1] for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
found_ids.add(item['img_id'])
missing = wanted_ids - found_ids
if len(missing) == 0:
print('already completed {:d}'.format(len(image_ids)))
else:
print('missing {:d}/{:d}'.format(len(missing), len(image_ids)))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for im_file, image_id in tqdm(image_ids):
if image_id in missing:
try:
writer.writerow(get_detections_from_im(net, im_file, image_id))
except Exception as e:
print(e)
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
"""
:param net:
:param im_file: full path to an image
:param image_id:
:param conf_thresh:
:return: all information from detection and attr prediction
"""
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes][:, 1:], axis=1)
objects_conf = np.max(cls_prob[keep_boxes][:, 1:], axis=1)
attrs = np.argmax(attr_prob[keep_boxes][:, 1:], axis=1)
attrs_conf = np.max(attr_prob[keep_boxes][:, 1:], axis=1)
return {
"img_id": image_id,
"img_h": np.size(im, 0),
"img_w": np.size(im, 1),
"objects_id": base64.b64encode(objects), # int64
"objects_conf": base64.b64encode(objects_conf), # float32
"attrs_id": base64.b64encode(attrs), # int64
"attrs_conf": base64.b64encode(attrs_conf), # float32
"num_boxes": len(keep_boxes),
"boxes": base64.b64encode(cls_boxes[keep_boxes]), # float32
"features": base64.b64encode(pool5[keep_boxes]) # float32
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--imgroot', type=str, default='/workspace/images/')
parser.add_argument('--split', type=str, default='valid')
parser.add_argument('--caffemodel', type=str, default='./resnet101_faster_rcnn_final_iter_320000.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Setup the configuration, normally do not need to touch these:
args = parse_args()
args.cfg_file = BUTD_ROOT + "experiments/cfgs/faster_rcnn_end2end_resnet.yml" # s = 500
args.prototxt = BUTD_ROOT + "models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt"
args.outfile = "%s_obj36.tsv" % args.split
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
# Load image ids, need modification for new datasets.
image_ids = load_image_ids(args.imgroot, SPLIT2DIR[args.split])
# Generate TSV files, noramlly do not need to modify
generate_tsv(args.prototxt, args.caffemodel, image_ids, args.outfile)
| 6,810 | 35.42246 | 113 | py |
visualbert | visualbert-master/visualbert/models/model_wrapper.py | # Handles model training (optimizer), loading, saving
import argparse
import os
import shutil
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
from allennlp.nn.util import device_mapping
from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint, load_state_dict_flexible
from visualbert.pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
from allennlp.models import Model
class ModelWrapper():
def __init__(self, args, train_dataset_length):
self.scheduler = None
self.args = args
self.args.gradient_accumulation_steps = args.get("gradient_accumulation_steps", 1)
self.args.fp16 = args.get("fp16", False)
self.initialize_model(args)
self.initialize_opimizer(args, train_dataset_length)
self.global_step = 0
self.called_time = 0
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def step(self, batch, eval_mode=False):
if eval_mode:
with torch.no_grad():
output_dict = self.model(**batch)
if output_dict['loss'] is not None:
loss = output_dict['loss'].mean()
output_dict['loss'] = loss
return output_dict
self.optimizer.zero_grad()
output_dict = self.model(**batch)
loss = output_dict['loss']
cnn_loss = output_dict.get("cnn_regularization_loss", None)
if cnn_loss is not None and self.model.module.cnn_loss_ratio != 0:
loss = loss + cnn_loss * self.model.module.cnn_loss_ratio
output_dict['cnn_regularization_loss'] = cnn_loss.mean().item()
loss = loss.mean() # This is because on MultiGPU, loss is a tensor of size GPU_NUM
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.get("fp16", False):
self.optimizer.backward(loss)
else:
loss.backward()
if (self.called_time + 1) % self.args.gradient_accumulation_steps == 0:
if self.args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = self.args.learning_rate * self.warmup_linear.get_lr(self.global_step, self.args.warmup_proportion)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr_this_step
self.optimizer.step()
self.global_step += 1
self.called_time += 1
return output_dict
def initialize_opimizer(self, args, train_dataset_length):
param_optimizer = list(self.model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
# There seems to be something that we can't
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ]
num_train_optimization_steps = int(
train_dataset_length / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
self.num_train_optimization_steps = num_train_optimization_steps
if args.get("fp16", False):
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
self.optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
self.optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
self.warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
self.optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
def initialize_model(self, args):
model = Model.from_params(vocab=None, params=Params(args.model))
if args.get("fp16", False):
model.half()
print("Using FP 16, Model Halfed")
self.model = DataParallel(model).cuda()
def load_state_dict(self, state_dict_to_load):
if isinstance(self.model, DataParallel):
load_state_dict_flexible(self.model, state_dict_to_load["model"])
load_state_dict_flexible(self.optimizer, state_dict_to_load["optimizer"])
def state_dict(self):
if isinstance(self.model, DataParallel):
save_dict = {"model":self.model.module.state_dict(),
"optimizer":self.optimizer.state_dict()}
else:
save_dict = {"model":self.model.state_dict(),
"optimizer":self.optimizer.state_dict()}
return save_dict
def save_checkpoint(self, serialization_dir, epoch, val_metric_per_epoch, is_best = False):
assert(serialization_dir)
model_path = os.path.join(serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self.model.module.state_dict() if isinstance(self.model, DataParallel) else self.model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self.optimizer.state_dict()
}
training_path = os.path.join(serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
print("Best validation performance so far. Copying weights to '{}/best.th'.".format(serialization_dir))
shutil.copyfile(model_path, os.path.join(serialization_dir, "best.th"))
def save_checkpoint_step(self, serialization_dir, step, epoch, is_best = False):
assert(serialization_dir)
model_path = os.path.join(serialization_dir, "model_step_{}_epoch_{}.th".format(step, epoch))
model_state = self.model.module.state_dict() if isinstance(self.model, DataParallel) else self.model.state_dict()
torch.save(model_state, model_path)
training_state = {'step': step,
'epoch': epoch,
'val_metric_per_epoch': None,
'optimizer': self.optimizer.state_dict()
}
training_path = os.path.join(serialization_dir,
"training_step_{}_epoch_{}.th".format(step, epoch))
torch.save(training_state, training_path)
def restore_checkpoint(self, serialization_dir, epoch_to_load):
# Restore from a training dir
return restore_checkpoint(self.model, self.optimizer, serialization_dir, epoch_to_load)
def restore_checkpoint_pretrained(self, restore_bin):
# Restore from a given model path
state_dict = torch.load(restore_bin, map_location=device_mapping(-1))
if isinstance(self.model, DataParallel):
model_to_load = self.model.module
else:
model_to_load = self.model
own_state = model_to_load.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped:" + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name)
def freeze_detector(self):
if hasattr(self.model.module, "detector"):
detector = self.model.module.detector
for submodule in detector.backbone.modules():
if isinstance(submodule, BatchNorm2d):
submodule.track_running_stats = False
for p in submodule.parameters():
p.requires_grad = False
else:
print("No detector found.")
@staticmethod
def read_and_insert_args(args, confg):
import commentjson
from attrdict import AttrDict
with open(confg) as f:
config_json = commentjson.load(f)
dict_args = vars(args)
config_json.update(dict_args)
args = AttrDict(config_json)
args.model.bert_model_name = args.bert_model_name
return args
| 10,127 | 39.674699 | 134 | py |
visualbert | visualbert-master/visualbert/models/model.py | # Modified from VCR.
from typing import Dict, List, Any
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.parallel
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, FeedForward, InputVariationalDropout, TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy, Average
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from allennlp.nn.util import masked_softmax, weighted_sum, replace_masked_values
from allennlp.nn import InitializerApplicator
from pytorch_pretrained_bert.modeling import BertForMultipleChoice, TrainVisualBERTObjective #BertForMultipleChoice, BertForVisualMultipleChoice, BertForVisualPreTraining, BertForPreTraining, BertForVisualQA
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
@Model.register("VisualBERTDetector")
class VisualBERTDetector(Model):
def __init__(self,
vocab: Vocabulary,
class_embs: bool=True,
bert_model_name: str="bert-base-uncased",
cnn_loss_ratio: float=0.0,
special_visual_initialize: bool=False,
text_only: bool=False,
visual_embedding_dim: int=512,
hard_cap_seq_len: int=None,
cut_first: str='text',
embedding_strategy: str='plain',
random_initialize: bool=False,
training_head_type: str="pretraining",
bypass_transformer: bool=False,
pretrained_detector: bool=True,
output_attention_weights: bool=False
):
super(VisualBERTDetector, self).__init__(vocab)
from utils.detector import SimpleDetector
self.detector = SimpleDetector(pretrained=pretrained_detector, average_pool=True, semantic=class_embs, final_dim=512)
##################################################################################################
self.bert = TrainVisualBERTObjective.from_pretrained(
bert_model_name,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(-1)),
training_head_type = training_head_type,
visual_embedding_dim = visual_embedding_dim,
hard_cap_seq_len = hard_cap_seq_len,
cut_first = cut_first,
embedding_strategy = embedding_strategy,
bypass_transformer = bypass_transformer,
random_initialize = random_initialize,
output_attention_weights = output_attention_weights)
if special_visual_initialize:
self.bert.bert.embeddings.special_intialize()
self.training_head_type = training_head_type
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
self.cnn_loss_ratio = cnn_loss_ratio
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def embed_span(self, span, span_tags, span_mask, object_reps):
"""
:param span: Thing that will get embed and turned into [batch_size, ..leading_dims.., L, word_dim]
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:param span_mask: [batch_size, ..leading_dims.., span_mask
:return:
"""
retrieved_feats = self._collect_obj_reps(span_tags, object_reps)
span_rep = torch.cat((span['bert'], retrieved_feats), -1)
# add recurrent dropout here
if self.rnn_input_dropout:
span_rep = self.rnn_input_dropout(span_rep)
return self.span_encoder(span_rep, span_mask), retrieved_feats
def forward(self,
images: torch.Tensor = None,
objects: torch.LongTensor = None,
segms: torch.Tensor = None,
boxes: torch.Tensor = None,
box_mask: torch.LongTensor = None,
question: Dict[str, torch.Tensor] = None,
question_tags: torch.LongTensor = None,
question_mask: torch.LongTensor = None,
answers: Dict[str, torch.Tensor] = None,
answer_tags: torch.LongTensor = None,
answer_mask: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
label: torch.LongTensor = None,
bert_input_ids: torch.LongTensor = None,
bert_input_mask: torch.LongTensor = None,
bert_input_type_ids: torch.LongTensor = None,
masked_lm_labels: torch.LongTensor = None,
is_random_next: torch.LongTensor= None,
image_text_alignment: torch.LongTensor = None,
output_all_encoded_layers = False) -> Dict[str, torch.Tensor]:
# Trim off boxes that are too long. this is an issue b/c dataparallel, it'll pad more zeros that are
# not needed
max_len = int(box_mask.sum(1).max().item())
objects = objects[:, :max_len]
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
segms = segms[:, :max_len]
'''for tag_type, the_tags in (('question', question_tags), ('answer', answer_tags)):
if int(the_tags.max()) > max_len:
raise ValueError("Oh no! {}_tags has maximum of {} but objects is of dim {}. Values are\n{}".format(
tag_type, int(the_tags.max()), objects.shape, the_tags
))'''
obj_reps = self.detector(images=images, boxes=boxes, box_mask=box_mask, classes=objects, segms=segms)
#print("obj_reps", obj_reps['obj_reps'].size())
#print("bert_input_ids", bert_input_ids.size())
#print("box_mask", box_mask.size())
if len(bert_input_ids.size()) == 2: # Using complete shuffle mode
obj_reps_expanded = obj_reps['obj_reps']
box_mask_expanded = box_mask
else:
obj_reps_expanded = obj_reps['obj_reps'].unsqueeze(1).expand(box_mask.size(0), bert_input_mask.size(1), box_mask.size(-1), obj_reps['obj_reps'].size(-1))
box_mask_expanded = box_mask.unsqueeze(1).expand(box_mask.size(0), bert_input_mask.size(1), box_mask.size(-1))
#bert_input_mask = torch.cat((bert_input_mask, box_mask_expanded), dim = -1)
output_dict = self.bert(
input_ids = bert_input_ids,
token_type_ids = bert_input_type_ids,
input_mask = bert_input_mask,
visual_embeddings = obj_reps_expanded,
position_embeddings_visual = None,
image_mask = box_mask_expanded,
visual_embeddings_type = None,
image_text_alignment = image_text_alignment,
label = label,
masked_lm_labels = masked_lm_labels,
is_random_next = is_random_next,
output_all_encoded_layers = output_all_encoded_layers)
#class_probabilities = F.softmax(logits, dim=-1)
cnn_loss = obj_reps['cnn_regularization_loss']
if self.cnn_loss_ratio == 0.0:
output_dict["cnn_regularization_loss"] = None
else:
output_dict["cnn_regularization_loss"] = cnn_loss * self.cnn_loss_ratio
# Multi-process safe??
if label is not None and self.training_head_type != "pretraining":
logits = output_dict["logits"]
logits = logits.detach().float()
label = label.float()
self._accuracy(logits, label)
if self.training_head_type == "pretraining":
output_dict["logits"] = None # Because every image may has different number of image features, the lengths of the logits on different GPUs will be different. This will cause DataParallel to throw errors.
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
@Model.register("VisualBERTFixedImageEmbedding")
class VisualBERTFixedImageEmbedding(Model):
def __init__(self,
vocab: Vocabulary,
class_embs: bool=True,
bert_model_name: str="bert-base-uncased",
cnn_loss_ratio: float=0.0,
special_visual_initialize: bool=False,
text_only: bool=False,
training_head_type: str='',
visual_embedding_dim: int=512,
hard_cap_seq_len: int=None,
cut_first: str='text',
embedding_strategy: str='plain',
random_initialize: bool=False,
bypass_transformer: bool=False,
output_attention_weights: bool=False
):
super(VisualBERTFixedImageEmbedding, self).__init__(vocab)
self.text_only = text_only
self.training_head_type = training_head_type
self.bert = TrainVisualBERTObjective.from_pretrained(
bert_model_name,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(-1)),
training_head_type = training_head_type,
visual_embedding_dim = visual_embedding_dim,
hard_cap_seq_len = hard_cap_seq_len,
cut_first = cut_first,
embedding_strategy = embedding_strategy,
bypass_transformer = bypass_transformer,
random_initialize = random_initialize,
output_attention_weights = output_attention_weights)
if special_visual_initialize:
self.bert.bert.embeddings.special_intialize()
if self.training_head_type == "nlvr" or self.training_head_type == "multichoice":
self._accuracy = CategoricalAccuracy()
if "vqa" in self.training_head_type:
self._accuracy = Average()
if self.training_head_type == "flickr":
self._accuracy = Average()
def forward(self,
#bert text input
bert_input_ids,
bert_input_mask,
bert_input_type_ids,
# image input
image_dim_variable = None,
image_feat_variable = None,
#
image_text_alignment = None,
visual_embeddings_type = None,
# fine-tuning label
label = None,
flickr_position = None, # For flickr we also need to provide the position
# pretraining lables
masked_lm_labels = None,
is_random_next = None,
output_all_encoded_layers = False
) -> Dict[str, torch.Tensor]:
# image_feat_variable = batch x ( num_choice x ) image_feature_length x dim
# Prepare Mask
if image_feat_variable is not None:
image_mask = torch.arange(image_feat_variable.size(-2)).expand(*image_feat_variable.size()[:-1]).cuda()
if len(image_dim_variable.size()) < len(image_mask.size()):
image_dim_variable = image_dim_variable.unsqueeze(-1)
assert(len(image_dim_variable.size()) == len(image_mask.size()))
image_mask = image_mask < image_dim_variable
image_mask = image_mask.long()
else:
image_mask = None
output_dict = self.bert(
input_ids = bert_input_ids,
token_type_ids = bert_input_type_ids,
input_mask = bert_input_mask,
visual_embeddings = image_feat_variable,
position_embeddings_visual = None,
image_mask = image_mask,
visual_embeddings_type = visual_embeddings_type,
image_text_alignment = image_text_alignment,
label = label,
flickr_position = flickr_position,
masked_lm_labels = masked_lm_labels,
is_random_next = is_random_next,
output_all_encoded_layers = output_all_encoded_layers)
if self.training_head_type == "nlvr" or self.training_head_type == "multichoice":
logits = output_dict["logits"]
self._accuracy(logits, label)
# Multi-process safe??
if "vqa" in self.training_head_type or self.training_head_type == "flickr":
if output_dict["accuracy"] is not None:
self._accuracy(output_dict["accuracy"])
output_dict["cnn_regularization_loss"] = None
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
if self.training_head_type == "nlvr" or self.training_head_type == "multichoice" or "vqa" in self.training_head_type or self.training_head_type == "flickr":
return {'accuracy': self._accuracy.get_metric(reset)}
return {'accuracy': 0.0}
@staticmethod
def compute_score_with_logits(logits, labels):
logits = masked_unk_softmax(logits, 1, 0)
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size())
one_hots = one_hots.cuda() if use_cuda else one_hots
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
class SimpleReportMetric():
def __init__(self):
self.total = 0.0
self.called_time = 0
def __call__(self, number, *args):
if isinstance(number, torch.Tensor):
number = number.item()
self.total += number
self.called_time += 1
def get_metric(self, reset):
return
| 14,578 | 42.912651 | 215 | py |
visualbert | visualbert-master/visualbert/models/train.py | """
Training script. Should be pretty adaptable to whatever.
"""
import argparse
import os
import shutil
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
from allennlp.nn.util import device_mapping
from visualbert.utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint, restore_checkpoint_flexible, load_state_dict_flexible, compute_score_with_logits
from visualbert.dataloaders.vcr import VCR, VCRLoader
try:
from visualbert.dataloaders.coco_dataset import COCODataset
except:
print("Import COCO dataset failed.")
try:
from visualbert.dataloaders.nlvr_dataset import NLVRDataset
except:
print("Import NLVR2 dataset failed.")
try:
from visualbert.dataloaders.vqa_dataset import VQADataset
except:
print("Import VQA dataset failed.")
try:
from visualbert.dataloaders.flickr_dataset import Flickr30kFeatureDataset
except:
print("Import Flickr30K dataset failed.")
from pytorch_pretrained_bert.optimization import BertAdam
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
'''import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (40960, rlimit[1]))
print("Setting to 40960")
except:
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))'''
from allennlp.models import Model
from visualbert.models.model_wrapper import ModelWrapper
from visualbert.models import model
#################################
from attrdict import AttrDict
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-folder',
dest='folder',
help='folder location',
type=str,
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-config',
dest='config',
help='config location',
type=str,
)
args = parser.parse_args()
args = ModelWrapper.read_and_insert_args(args, args.config)
#####################################################
if os.path.exists(args.folder):
create_flag = 0
else:
create_flag = 1
print("Making directories")
os.makedirs(args.folder, exist_ok=True)
import sys
run_log_counter = 0
while(os.path.exists(args.folder + '/run_{}.log'.format(run_log_counter))):
run_log_counter += 1
file_log = open(args.folder + '/run_{}.log'.format(run_log_counter),'w') # File where you need to keep the logs
file_log.write("")
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
file_log.write(data) # Write the data of stdout here to a text file as well
def flush(self):
pass
sys.stdout = Unbuffered(sys.stdout)
NUM_GPUS = torch.cuda.device_count()
NUM_CPUS = multiprocessing.cpu_count()
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if args.get("fp16", False):
_to_fp16(td)
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
if td[k] is not None:
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(non_blocking=True)
return td
def _to_fp16(td):
for k in td:
if isinstance(td[k], torch.FloatTensor):
td[k] = td[k].to(dtype=torch.float16)
num_workers = args.get("num_workers", 2)
val_workers = args.get("val_workers", 0)
TEST_DATA_READING = False
if TEST_DATA_READING:
num_workers = 0
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
def get_dataset_loader(args, dataset_name):
# The VCR approach toward
if dataset_name == "vcr":
train, val, test = VCR.splits(
mode='rationale' if args.rationale else 'answer',
only_use_relevant_dets = args.get('only_use_relevant_dets', True),
do_lower_case = args.do_lower_case,
bert_model_name = args.bert_model_name,
max_seq_length = args.max_seq_length,
pretraining = args.pretraining,
pretraining_include_qa_and_qar = args.pretraining_include_qa_and_qar,
complete_shuffle = args.get("complete_shuffle", False),
use_alignment = args.get('use_alignment', False),
add_all_features = args.add_all_features,
answer_labels_path = args.get("answer_labels_path", None),
vcr_annots_dir = args.vcr_annots_dir,
vcr_image_dir = args.vcr_image_dir
)
elif dataset_name == "coco":
train, val, test = COCODataset.splits(args)
elif dataset_name == "nlvr":
train, val, test = NLVRDataset.splits(args)
elif dataset_name == "vqa":
train, val, test = VQADataset.splits(args)
elif dataset_name == "wiki":
train, val, test = WikiDataset.splits(args)
elif dataset_name == "flickr":
train, val, test = Flickr30kFeatureDataset.splits(args)
else:
assert(0)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
train_loader_params = deepcopy(loader_params)
val_loader_params = deepcopy(loader_params)
val_loader_params["num_workers"] = val_workers
test_loader_params = deepcopy(loader_params)
test_loader_params["num_workers"] = val_workers
train_loader = VCRLoader.from_dataset(train, **train_loader_params)
val_loader = VCRLoader.from_dataset(val, **val_loader_params)
test_loader = VCRLoader.from_dataset(test, **test_loader_params)
train_set_size = len(train)
return train_loader, val_loader, test_loader, train_set_size
train_loader, val_loader, test_loader, train_set_size = get_dataset_loader(args, args.dataset)
ARGS_RESET_EVERY = args.get("print_every", 100)
train_model = ModelWrapper(args, train_set_size)
#Loading from pre-trained model
if args.restore_bin:
train_model.restore_checkpoint_pretrained(args.restore_bin)
#Loading from previous checkpoint
if create_flag == 0:
start_epoch, val_metric_per_epoch = train_model.restore_checkpoint(serialization_dir=args.folder, epoch_to_load = args.get("epoch_to_load", None))
if val_metric_per_epoch is None:
val_metric_per_epoch = []
else:
create_flag = 1
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.config, args.folder) # Always copy the config
if args.get("freeze_detector", True):
train_model.freeze_detector()
param_shapes = print_para(train_model.model)
print(args)
print("########### Starting from {}".format(start_epoch))
num_batches = 0
stop_epoch = args.num_train_epochs
save_every = args.get("save_every", None)
for epoch_num in range(start_epoch, stop_epoch):
train_results = []
norms = []
train_model.model.train()
if not args.get("skip_training", False):
for b, (time_per_batch, batch) in enumerate(time_batch(tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
output_dict = train_model.step(batch)
num_batches += 1
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
'crl': output_dict.get("cnn_regularization_loss", 0.0),
'next_sentence_loss': output_dict["next_sentence_loss"].mean().item() if "next_sentence_loss" in output_dict else 0.0,
'masked_lm_loss': output_dict["masked_lm_loss"].mean().item() if "masked_lm_loss" in output_dict else 0.0,
'accuracy': (train_model.model.module).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0)[
'accuracy'],
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
print("e{:2d}b{:5d}/{:5d}. \nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
if save_every is not None and b % save_every == 0 and b != 0:
train_model.save_checkpoint_step(args.folder, b, epoch_num)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
try:
### This is the eval part
val_probs = []
val_labels = []
val_size = 0.0
val_loss_sum = 0.0
val_acc = 0.0
val_acc_upper = 0.0
val_instance_counter = 0.0
val_next_sentence_loss_sum = 0.0
train_model.eval()
val_counter = 0
############ Different reporting parameters
# for vqa, nlvr, flickr
do_test = args.get("do_test", False) ## This one is for vqa
if do_test:
val_loader = test_loader
val_dataset = val_loader.dataset
vcr_save_result = args.get("vcr_save_result", False) # This one is for vcr
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader if args.no_tqdm else tqdm(val_loader), reset_every=ARGS_RESET_EVERY)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = train_model.step(batch, eval_mode = True)
if not args.pretraining:
# Pretty clumsy code
if args.model.training_head_type == "vqa":
val_probs.append(output_dict['logits'].detach().cpu())
if not do_test:
val_labels.append(batch['label'].detach().cpu())
elif args.model.training_head_type == "flickr":
# This is because of multi-GPU
val_acc += (output_dict["accuracy"] * output_dict["entity_num"].float()).sum(-1).item()
val_acc_upper += (output_dict["upperbound_accuracy"] * output_dict["entity_num"].float()).sum(-1).item()
val_instance_counter += output_dict["entity_num"].sum(-1).item()
elif args.model.training_head_type == "multichoice":
val_probs.append(output_dict['logits'].detach().cpu().numpy())
if not do_test:
val_labels.append(batch['label'].detach().cpu().numpy())
elif args.model.training_head_type == "nlvr":
val_probs.append(output_dict['logits'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
else:
val_labels.append(batch['label'].detach().cpu().numpy())
if not do_test:
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].size(0)
val_counter += batch['label'].size(0)
if "next_sentence_loss" in output_dict:
val_next_sentence_loss_sum += output_dict['next_sentence_loss'].mean().item() * batch['label'].size(0)
if not args.pretraining:
if args.model.training_head_type == "vqa":
if do_test:
val_probs = np.concatenate(val_probs, 0)
val_probs = torch.Tensor(val_probs)
val_probs = val_probs.squeeze(1)
val_dataset.generate_test_file(val_probs, os.path.join(args.folder, "result.json"))
print("Finished testing")
assert(0)
else:
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_probs = torch.Tensor(val_probs)
val_labels = torch.Tensor(val_labels)
val_probs = val_probs.squeeze(1)
acc = torch.sum(compute_score_with_logits(val_probs, val_labels)) / val_labels.size(0)
acc = acc.squeeze(-1).item()
elif args.model.training_head_type == "flickr":
acc = val_acc / val_instance_counter
val_acc_upper = val_acc_upper / val_instance_counter
print("Upper bound: {:.5f}".format(val_acc_upper))
elif args.model.training_head_type == "multichoice": #VCR
if not do_test:
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
if vcr_save_result:
if do_test:
file_name = "test"
else:
file_name = "val"
save_file_name = os.path.join(args.folder, file_name + "_qa.np")
if args.rationale:
save_file_name = os.path.join(args.folder, file_name + "_qar.np")
if do_test:
np.save(save_file_name, val_probs)
else:
np.savez(save_file_name+'z', val_probs=val_probs, val_labels=val_labels)
#np.save(save_file_name, (val_probs, val_labels))
print("Saved result to {}".format(save_file_name))
assert(0)
acc = float(np.mean(val_labels == val_probs.argmax(1)))
elif args.model.training_head_type == "nlvr":
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
if args.get("report", False):
val_probs = val_probs.argmax(1)
assert(val_probs.shape[0]) == len(val_dataset)
result = []
for index, i in enumerate(val_dataset.items):
label = "True" if val_probs[index] == 1 else "False"
result.append(i["identifier"] + "," + label)
with open(os.path.join(args.folder, "results.csv"), "w") as f:
f.write("\n".join(result))
assert(0)
acc = float(np.mean(val_labels == val_probs.argmax(1)))
if not do_test:
val_loss_avg = val_loss_sum / val_counter
print("Val epoch {} has acc {:.5f} and loss {:.5f}".format(epoch_num, acc, val_loss_avg), flush=True)
else:
print("Val epoch {} has acc {:.5f}".format(epoch_num, acc), flush=True)
assert(0)
val_metric_per_epoch.append(acc)
else:
val_loss_avg = val_loss_sum / val_counter
val_next_sentence_loss_avg = val_next_sentence_loss_sum / val_counter
print("Val epoch {} has loss {:.5f}, next sentence loss {:.5f}".format(epoch_num, val_loss_avg, val_next_sentence_loss_avg), flush=True)
val_metric_per_epoch.append(-val_loss_avg)
if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - args.patience):
print("Stopping at epoch {:2d}".format(epoch_num))
break
############### Save model
if not args.get("skip_training", False):
train_model.save_checkpoint(args.folder, epoch_num, val_metric_per_epoch, is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1))
except KeyboardInterrupt:
if not args.get("skip_training", False):
train_model.save_checkpoint(args.folder, epoch_num, None, is_best=False)
print("Something Went Wrong with Evaluation. Stopped.")
assert(0)
except:
if not args.get("skip_training", False):
train_model.save_checkpoint(args.folder, epoch_num, None, is_best=False)
print("Something Went Wrong with Evaluation. Ignored.")
if args.get("skip_training", False):
assert(0)
| 16,973 | 39.901205 | 166 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
# Consistent with HuggingFace BERT version 3fc63f126ddf883ba9659f13ec046c3639db7b7e
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss | 13,112 | 42.134868 | 139 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ModuleNotFoundError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 932 | 39.565217 | 137 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Modified by Harold. Added VisualBERT.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from copy import deepcopy
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_attention_weights = config.output_attention_weights
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attention_weights:
return context_layer, attention_probs
else:
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.output_attention_weights = config.output_attention_weights
def forward(self, input_tensor, attention_mask):
if self.output_attention_weights:
self_output, attention_weights = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_weights
else:
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.output_attention_weights = config.output_attention_weights
def forward(self, hidden_states, attention_mask):
if self.output_attention_weights:
attention_output, attention_weights = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_weights
else:
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
self.output_attention_weights = config.output_attention_weights
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
if self.output_attention_weights:
attn_data_list = []
all_encoder_layers = []
for layer_module in self.layer:
hidden_states, attention_weights = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
attn_data_list.append(attention_weights)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers, attn_data_list
else:
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, random_initialize = False, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if random_initialize:
return model
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
elif masked_lm_labels is not None and next_sentence_label is None: # If we did not specify the next_sentence_label
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(PreTrainedBertModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(PreTrainedBertModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(PreTrainedBertModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(PreTrainedBertModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(PreTrainedBertModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
#########################
#### Added by Harold ####
#########################
class BertEmbeddingsWithVisualEmbedding(nn.Module):
"""Construct the embeddings from word, position, token_type embeddings and visual embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsWithVisualEmbedding, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
#### Below are specific for encoding visual features
# Segment and position embedding for image features
self.token_type_embeddings_visual = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.position_embeddings_visual = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.projection = nn.Linear(config.visual_embedding_dim, config.hidden_size)
def special_intialize(self, method_type = 0):
### This is a bit unorthodox. The better way might be to add an inititilizer to AllenNLP.
# This function is used to initialize the token_type_embeddings_visual and positiona_embedding_visual, just incase.
self.token_type_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.token_type_embeddings.weight.data), requires_grad = True)
self.position_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.position_embeddings.weight.data), requires_grad = True)
return
def forward(self, input_ids, token_type_ids=None, visual_embeddings=None, visual_embeddings_type=None, position_embeddings_visual=None, image_text_alignment = None, confidence = None):
'''
input_ids = [batch_size, sequence_length]
token_type_ids = [batch_size, sequence_length]
visual_embedding = [batch_size, image_feature_length, image_feature_dim]
image_text_alignment = [batch_size, image_feature_length, alignment_dim]
confidence = [batch_size, image_feature_length] of type LongTensor
'''
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
if visual_embeddings is not None:
visual_embeddings = self.projection(visual_embeddings)
token_type_embeddings_visual = self.token_type_embeddings_visual(visual_embeddings_type)
if image_text_alignment is not None:
# image_text_alignment = Batch x image_length x alignment_number. Each element denotes the position of the word corresponding to the image feature. -1 is the padding value.
image_text_alignment_mask = (image_text_alignment != -1).long()
# Get rid of the -1.
image_text_alignment = image_text_alignment_mask * image_text_alignment
# position_embeddings_visual = Batch x image_length x alignment length x dim
position_embeddings_visual = self.position_embeddings(image_text_alignment) * image_text_alignment_mask.to(dtype=next(self.parameters()).dtype).unsqueeze(-1)
position_embeddings_visual = position_embeddings_visual.sum(2)
# We want to averge along the alignment_number dimension.
image_text_alignment_mask = image_text_alignment_mask.to(dtype=next(self.parameters()).dtype).sum(2)
image_text_alignment_mask[image_text_alignment_mask==0] = 1 # Avoid devide by zero error
position_embeddings_visual = position_embeddings_visual / image_text_alignment_mask.unsqueeze(-1)
position_ids_visual = torch.zeros(*visual_embeddings.size()[:-1], dtype = torch.long).cuda()
# When fine-tuning the detector , the image_text_alignment is sometimes padded too long.
if position_embeddings_visual.size(1) != visual_embeddings.size(1):
assert(position_embeddings_visual.size(1) >= visual_embeddings.size(1))
position_embeddings_visual = position_embeddings_visual[:, :visual_embeddings.size(1), :]
position_embeddings_visual = position_embeddings_visual + self.position_embeddings_visual(position_ids_visual)
else:
position_ids_visual = torch.zeros(*visual_embeddings.size()[:-1], dtype = torch.long).cuda()
position_embeddings_visual = self.position_embeddings_visual(position_ids_visual)
v_embeddings = visual_embeddings + position_embeddings_visual + token_type_embeddings_visual
# Concate the two:
embeddings = torch.cat((embeddings, v_embeddings), dim = 1) # concat the visual embeddings after the attentions
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertVisualModel(PreTrainedBertModel):
def __init__(self, config):
super(BertVisualModel, self).__init__(config)
self.embeddings = BertEmbeddingsWithVisualEmbedding(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.bypass_transformer = config.bypass_transformer
if self.bypass_transformer:
self.additional_layer = BertLayer(config)
self.output_attention_weights = config.output_attention_weights
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask, visual_embeddings, position_embeddings_visual, visual_embeddings_type, image_text_alignment, confidence, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids, visual_embeddings = visual_embeddings, position_embeddings_visual = position_embeddings_visual, visual_embeddings_type = visual_embeddings_type, image_text_alignment = image_text_alignment,
confidence = confidence)
if self.bypass_transformer and visual_embeddings is not None:
assert(not output_all_encoded_layers) # Don't support this for the bypass model
text_length = input_ids.size(1)
text_embedding_output = embedding_output[:, :text_length, :]
visual_part = embedding_output[:, text_length:, :]
text_extended_attention_mask = extended_attention_mask[:, :, :text_length, :text_length]
encoded_layers = self.encoder(text_embedding_output,
text_extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
new_input = torch.cat((sequence_output, visual_part), dim = 1)
final_sequence_output = self.additional_layer(new_input, extended_attention_mask)
pooled_output = self.pooler(final_sequence_output)
return final_sequence_output, pooled_output
if self.output_attention_weights:
encoded_layers, attn_data_list = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output, attn_data_list
else:
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class TrainVisualBERTObjective(PreTrainedBertModel):
def __init__(self, config, training_head_type, visual_embedding_dim = 512, hard_cap_seq_len = None, cut_first = "text", embedding_strategy = "plain", bypass_transformer = False, output_attention_weights= False):
super(TrainVisualBERTObjective, self).__init__(config)
config.visual_embedding_dim = visual_embedding_dim
config.embedding_strategy = embedding_strategy
config.bypass_transformer = bypass_transformer
config.output_attention_weights = output_attention_weights
self.output_attention_weights = output_attention_weights
self.cut_first = cut_first
self.hard_cap_seq_len = hard_cap_seq_len
self.bert = BertVisualModel(config)
self.training_head_type = training_head_type
if self.training_head_type == "pretraining":
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
elif self.training_head_type == "multichoice":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.num_choices = 4 # For VCR
elif self.training_head_type == "vqa":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 3129)
elif self.training_head_type == "vqa_advanced":
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
elif self.training_head_type == "nlvr":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 2)
elif self.training_head_type == "flickr":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.flickr_attention = FlickrAttention(config)
self.apply(self.init_bert_weights)
def forward(
self,
input_ids,
token_type_ids,
input_mask,
visual_embeddings,
position_embeddings_visual,
image_mask,
image_text_alignment = None,
confidence = None,
visual_embeddings_type=None,
label=None,
flickr_position = None,
masked_lm_labels=None,
image_lm_lables=None,
is_random_next=None,
output_all_encoded_layers = False):
# We want to convert everything into: batch x sequence_length x (dim).
flat_input_ids = transform_to_batch_sequence(input_ids)
flat_token_type_ids = transform_to_batch_sequence(token_type_ids)
flat_input_mask = transform_to_batch_sequence(input_mask)
flat_image_mask = transform_to_batch_sequence(image_mask)
flat_masked_lm_labels = transform_to_batch_sequence(masked_lm_labels)
flat_position_embeddings_visual = transform_to_batch_sequence(position_embeddings_visual)
flat_confidence = transform_to_batch_sequence(confidence)
flat_image_text_alignment = transform_to_batch_sequence_dim(image_text_alignment)
flat_visual_embeddings = transform_to_batch_sequence_dim(visual_embeddings)
if visual_embeddings_type is not None:
visual_embeddings_type = transform_to_batch_sequence(visual_embeddings_type)
else:
if flat_image_mask is not None:
visual_embeddings_type = torch.zeros_like(flat_image_mask, dtype = torch.long)
else:
visual_embeddings_type = None
if flat_image_mask is not None:
flat_attention_mask = torch.cat((flat_input_mask, flat_image_mask), dim = -1)
assert(image_lm_lables is None) # Do not support this yet
if flat_masked_lm_labels is not None:
assert(flat_masked_lm_labels.size(-1) == flat_input_mask.size(-1))
new_lm_labels = torch.ones_like(flat_attention_mask) * -1
size_masked_lm_labels = flat_masked_lm_labels.size()
assert(len(size_masked_lm_labels) == 2)
new_lm_labels[:size_masked_lm_labels[0], :size_masked_lm_labels[1]] = flat_masked_lm_labels
flat_masked_lm_labels = new_lm_labels
else:
flat_attention_mask = flat_input_mask
if self.output_attention_weights:
sequence_output, pooled_output, attention_weights = self.bert(
flat_input_ids,
flat_token_type_ids,
flat_attention_mask,
visual_embeddings = flat_visual_embeddings,
position_embeddings_visual = flat_position_embeddings_visual,
visual_embeddings_type = visual_embeddings_type,
image_text_alignment = flat_image_text_alignment,
confidence = flat_confidence,
output_all_encoded_layers=output_all_encoded_layers)
output_dict = {}
output_dict["attention_weights"] = attention_weights
output_dict['loss'] = None
return output_dict
sequence_output, pooled_output = self.bert(
flat_input_ids,
flat_token_type_ids,
flat_attention_mask,
visual_embeddings = flat_visual_embeddings,
position_embeddings_visual = flat_position_embeddings_visual,
visual_embeddings_type = visual_embeddings_type,
image_text_alignment = flat_image_text_alignment,
confidence = flat_confidence,
output_all_encoded_layers=output_all_encoded_layers)
output_dict = {}
if output_all_encoded_layers:
output_dict["sequence_output"] = sequence_output
output_dict["pooled_output"] = pooled_output
output_dict["loss"] = None
return output_dict
if self.training_head_type == "pretraining":
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
output_dict["logits"] = prediction_scores
output_dict["seq_relationship_score"] = seq_relationship_score
output_dict["loss"] = None
if flat_masked_lm_labels is not None and is_random_next is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.contiguous().view(-1, self.config.vocab_size), flat_masked_lm_labels.contiguous().view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.contiguous().view(-1, 2), is_random_next.contiguous().view(-1))
output_dict["next_sentence_loss"] = next_sentence_loss
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss + next_sentence_loss
if flat_masked_lm_labels is not None and is_random_next is None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.contiguous().view(-1, self.config.vocab_size), flat_masked_lm_labels.contiguous().view(-1))
#output_dict["next_sentence_loss"] = None
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss
return output_dict
elif self.training_head_type == "multichoice":
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_choices)
output_dict["logits"] = reshaped_logits
output_dict["loss"] = None
if label is not None:
loss_fct = CrossEntropyLoss()
output_dict["loss"] = loss_fct(reshaped_logits, label.contiguous())
return output_dict
elif self.training_head_type == "vqa":
index_to_gather = flat_input_mask.sum(1) - 2
pooled_output = torch.gather(sequence_output, 1, index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1)))
flat_input_ids = torch.gather(flat_input_ids, 1, index_to_gather.unsqueeze(-1).expand(index_to_gather.size(0), 1))
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, 3129)
output_dict["logits"] = logits
output_dict["loss"] = None
output_dict["accuracy"] = None
if label is not None:
loss_fct = torch.nn.KLDivLoss(reduction = "batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = log_softmax(reshaped_logits)
output_dict["loss"] = loss_fct(reshaped_logits, label.contiguous())
output_dict["accuracy"] = torch.sum(compute_score_with_logits(reshaped_logits, label)) / label.size(0)
return output_dict
elif self.training_head_type == "vqa_advanced":
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
output_dict["logits"] = prediction_scores
output_dict["seq_relationship_score"] = seq_relationship_score
output_dict["loss"] = None
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.contiguous().view(-1, self.config.vocab_size), flat_masked_lm_labels.contiguous().view(-1))
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss
prediction_tokens = torch.max(prediction_scores, -1)[1].view(input_ids.size(0), -1).cpu().numpy() # batch x sequence length , records the predicted words
lm_labels = flat_masked_lm_labels.view(input_ids.size(0), -1).cpu().numpy()
counter = 0.0
flags = []
for i in range(lm_labels.shape[0]):
flag = True
for j in range(lm_labels.shape[1]):
if lm_labels[i][j] != -1 and prediction_tokens[i][j] != lm_labels[i][j]:
flag = False
break
if flag:
counter += 1
flags.append(flag)
output_dict["accuracy"] = counter / prediction_tokens.shape[0]
return output_dict
elif self.training_head_type == "nlvr":
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous()
output_dict["logits"] = logits
output_dict["loss"] = None
if label is not None:
loss_fct = CrossEntropyLoss()
output_dict["loss"] = loss_fct(reshaped_logits, label.contiguous())
return output_dict
elif self.training_head_type == "flickr":
if flickr_position is not None:
entities_num = (flickr_position != -1).long().view(-1).sum(-1)
flickr_position_mask = (flickr_position != -1).long()
# Make the -1 become 0
flickr_position = flickr_position * flickr_position_mask
# Selected_positions = batch x selected position x dim
selected_positions = batched_index_select(sequence_output, 1, flickr_position)
# Visual Features = batch x visual_feature_length x dim
visual_features = sequence_output[:, flat_input_mask.size(1): ,:]
assert(visual_features.size(1) == flat_image_mask.size(1))
scores = self.flickr_attention(selected_positions, visual_features, flat_image_mask)
# scores = batch x selected position x visual_feature
# scores = selected_positions.bmm(visual_features.transpose(1,2))
loss_fct = torch.nn.KLDivLoss(reduction = "batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
scores = log_softmax(scores)
label = label.contiguous()
# label = batch x selected_postion x needed position
output_dict["loss"] = loss_fct(scores, label)
acc, upper_acc = compute_score_with_logits_flickr(scores, label)
output_dict["accuracy"] = acc / entities_num
output_dict["upperbound_accuracy"] = upper_acc / entities_num
output_dict["entity_num"] = entities_num
return output_dict
class FlickrAttention(nn.Module):
def __init__(self, config):
super(FlickrAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = 1#config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query, key, attention_mask):
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = (1.0 - attention_mask) * -10000.0
mixed_query_layer = self.query(query)
mixed_key_layer = self.key(key)
# We don't need value layers
#mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
#value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_scores = attention_scores.squeeze(1)
return attention_scores
def compute_score_with_logits_flickr(logits, labels, recall = 1):
# Manually changed the recall here when evaluating... A bit clumsy
labels_mask = (labels != 0.0).float()
upper_bound_labels = labels.sum(-1).view(-1).sum(-1)
labels = torch.ones_like(labels) * labels_mask
if recall != 1:
# Evaluation model. We could slow down.
# labels = batch x seq x target length
logits = logits.topk(k=recall, dim = -1)[1].data.cpu().numpy()
counter = 0.0
labels = labels.data.cpu().numpy()
for i in range(logits.shape[0]):
for j in range(logits.shape[1]):
possibles = logits[i][j]
current_label = labels[i][j][possibles]
if current_label.sum(-1) != 0:
counter += 1
counter = torch.Tensor([counter]).cuda()
return counter, upper_bound_labels
logits = torch.max(logits, -1)[1].data # argmax
logits = logits.unsqueeze(-1)
scores = torch.gather(input = labels, dim = 2, index = logits)
scores = scores.view(-1).sum(-1)
return scores, upper_bound_labels
def transform_to_batch_sequence(tensor):
if tensor is not None:
if len(tensor.size()) == 2:
return tensor
else:
assert(len(tensor.size()) == 3)
return tensor.contiguous().view(-1, tensor.size(-1))
else:
return None
def transform_to_batch_sequence_dim(tensor):
if tensor is not None:
if len(tensor.size()) == 3:
return tensor
else:
assert(len(tensor.size()) == 4)
return tensor.contiguous().view(-1, tensor.size(-2), tensor.size(-1))
else:
return None
def masked_unk_softmax(x, dim, mask_idx):
x1 = F.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def compute_score_with_logits(logits, labels):
logits = masked_unk_softmax(logits, 1, 0)
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros_like(labels)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def batched_index_select(t, dim, inds):
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out | 84,216 | 48.077506 | 259 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/fine_tuning.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_pretrained_bert.modeling import BertForPreTraining
from pytorch_pretrained_bert.tokenization import BertTokenizer
#from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from torch.utils.data import Dataset
import random
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class BERTDataset(Dataset):
def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.seq_len = seq_len
self.on_memory = on_memory
self.corpus_lines = corpus_lines # number of non-empty lines in input corpus
self.corpus_path = corpus_path
self.encoding = encoding
self.current_doc = 0 # to avoid random sentence from same doc
# for loading samples directly from file
self.sample_counter = 0 # used to keep track of full epochs on file
self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair
# for loading samples in memory
self.current_random_doc = 0
self.num_docs = 0
self.sample_to_doc = [] # map sample index to doc and line
# load samples into memory
if on_memory:
self.all_docs = []
doc = []
self.corpus_lines = 0
with open(corpus_path, "r", encoding=encoding) as f:
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
line = line.strip()
if line == "":
self.all_docs.append(doc)
doc = []
#remove last added sample because there won't be a subsequent line anymore in the doc
self.sample_to_doc.pop()
else:
#store as one sample
sample = {"doc_id": len(self.all_docs),
"line": len(doc)}
self.sample_to_doc.append(sample)
doc.append(line)
self.corpus_lines = self.corpus_lines + 1
# if last row in file is not empty
if self.all_docs[-1] != doc:
self.all_docs.append(doc)
self.sample_to_doc.pop()
self.num_docs = len(self.all_docs)
# load samples later lazily from disk
else:
if self.corpus_lines is None:
with open(corpus_path, "r", encoding=encoding) as f:
self.corpus_lines = 0
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
if line.strip() == "":
self.num_docs += 1
else:
self.corpus_lines += 1
# if doc does not end with empty line
if line.strip() != "":
self.num_docs += 1
self.file = open(corpus_path, "r", encoding=encoding)
self.random_file = open(corpus_path, "r", encoding=encoding)
def __len__(self):
# last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
return self.corpus_lines - self.num_docs - 1
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
if not self.on_memory:
# after one epoch we start again from beginning of file
if cur_id != 0 and (cur_id % len(self) == 0):
self.file.close()
self.file = open(self.corpus_path, "r", encoding=self.encoding)
t1, t2, is_next_label = self.random_sent(item)
# tokenize
tokens_a = self.tokenizer.tokenize(t1)
tokens_b = self.tokenizer.tokenize(t2)
# combine to one sample
cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label)
# transform sample to features
cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(cur_features.is_next))
return cur_tensors
def random_sent(self, index):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
t1, t2 = self.get_corpus_line(index)
if random.random() > 0.5:
label = 0
else:
t2 = self.get_random_line()
label = 1
assert len(t1) > 0
assert len(t2) > 0
return t1, t2, label
def get_corpus_line(self, item):
"""
Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
:param item: int, index of sample.
:return: (str, str), two subsequent sentences from corpus
"""
t1 = ""
t2 = ""
assert item < self.corpus_lines
if self.on_memory:
sample = self.sample_to_doc[item]
t1 = self.all_docs[sample["doc_id"]][sample["line"]]
t2 = self.all_docs[sample["doc_id"]][sample["line"]+1]
# used later to avoid random nextSentence from same doc
self.current_doc = sample["doc_id"]
return t1, t2
else:
if self.line_buffer is None:
# read first non-empty line of file
while t1 == "" :
t1 = next(self.file).strip()
t2 = next(self.file).strip()
else:
# use t2 from previous iteration as new t1
t1 = self.line_buffer
t2 = next(self.file).strip()
# skip empty rows that are used for separating documents and keep track of current doc id
while t2 == "" or t1 == "":
t1 = next(self.file).strip()
t2 = next(self.file).strip()
self.current_doc = self.current_doc+1
self.line_buffer = t2
assert t1 != ""
assert t2 != ""
return t1, t2
def get_random_line(self):
"""
Get random line from another document for nextSentence task.
:return: str, content of one line
"""
# Similar to original tf repo: This outer loop should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document we're processing.
for _ in range(10):
if self.on_memory:
rand_doc_idx = random.randint(0, len(self.all_docs)-1)
rand_doc = self.all_docs[rand_doc_idx]
line = rand_doc[random.randrange(len(rand_doc))]
else:
rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
#pick random line
for _ in range(rand_index):
line = self.get_next_line()
#check if our picked random line is really from another doc like we want it to be
if self.current_random_doc != self.current_doc:
break
return line
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
tokens_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
tokens_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # nextSentence
self.lm_labels = lm_labels # masked words for language model
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.is_next = is_next
self.lm_label_ids = lm_label_ids
def random_word(tokens, tokenizer, probability = 0.15):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < probability:
prob /= probability
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
def convert_example_to_features(example, max_seq_length, tokenizer):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.tokens_a
tokens_b = example.tokens_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens_a, t1_label = random_word(tokens_a, tokenizer)
tokens_b, t2_label = random_word(tokens_b, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
if example.guid < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("LM label: %s " % (lm_label_ids))
logger.info("Is next sentence label: %s " % (example.is_next))
features = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_next=example.is_next)
return features
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_file",
default=None,
type=str,
required=True,
help="The input train corpus.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--on_memory",
action='store_true',
help="Whether to load train samples into memory or use disk")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type = float, default = 0,
help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
#train_examples = None
num_train_optimization_steps = None
if args.do_train:
print("Loading Train Dataset", args.train_file)
train_dataset = BERTDataset(args.train_file, tokenizer, seq_len=args.max_seq_length,
corpus_lines=None, on_memory=args.on_memory)
num_train_optimization_steps = int(
len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
model = BertForPreTraining.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
#TODO: check if this works with current data generator from disk that relies on next(file)
# (it doesn't return item back by index)
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
logger.info("** ** * Saving fine - tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if args.do_train:
torch.save(model_to_save.state_dict(), output_model_file)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
if __name__ == "__main__":
main() | 27,941 | 42.187017 | 139 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,021 | 32.425 | 98 | py |
visualbert | visualbert-master/visualbert/dataloaders/vcr.py | # Modifed from R2C
"""
Dataloaders for VCR
"""
import json
import pickle
import os
from collections import defaultdict
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from tqdm import tqdm
from .vcr_data_utils import data_iter, data_iter_test, data_iter_item
from .bert_data_utils import InputExample, InputFeatures, get_one_image_feature_npz_screening_parameters, get_image_feat_reader, faster_RCNN_feat_reader, screen_feature
from .bert_field import IntArrayField
from visualbert.pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
# Here's an example jsonl
# {
# "movie": "3015_CHARLIE_ST_CLOUD",
# "objects": ["person", "person", "person", "car"],
# "interesting_scores": [0],
# "answer_likelihood": "possible",
# "img_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.jpg",
# "metadata_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.json",
# "answer_orig": "No she does not",
# "question_orig": "Does 3 feel comfortable?",
# "rationale_orig": "She is standing with her arms crossed and looks disturbed",
# "question": ["Does", [2], "feel", "comfortable", "?"],
# "answer_match_iter": [3, 0, 2, 1],
# "answer_sources": [3287, 0, 10184, 2260],
# "answer_choices": [
# ["Yes", "because", "the", "person", "sitting", "next", "to", "her", "is", "smiling", "."],
# ["No", "she", "does", "not", "."],
# ["Yes", ",", "she", "is", "wearing", "something", "with", "thin", "straps", "."],
# ["Yes", ",", "she", "is", "cold", "."]],
# "answer_label": 1,
# "rationale_choices": [
# ["There", "is", "snow", "on", "the", "ground", ",", "and",
# "she", "is", "wearing", "a", "coat", "and", "hate", "."],
# ["She", "is", "standing", "with", "her", "arms", "crossed", "and", "looks", "disturbed", "."],
# ["She", "is", "sitting", "very", "rigidly", "and", "tensely", "on", "the", "edge", "of", "the",
# "bed", ".", "her", "posture", "is", "not", "relaxed", "and", "her", "face", "looks", "serious", "."],
# [[2], "is", "laying", "in", "bed", "but", "not", "sleeping", ".",
# "she", "looks", "sad", "and", "is", "curled", "into", "a", "ball", "."]],
# "rationale_sources": [1921, 0, 9750, 25743],
# "rationale_match_iter": [3, 0, 2, 1],
# "rationale_label": 1,
# "img_id": "train-0",
# "question_number": 0,
# "annot_id": "train-0",
# "match_fold": "train-0",
# "match_index": 0,
# }
class VCR(Dataset):
def __init__(self,
split,
mode,
only_use_relevant_dets=True,
add_image_as_a_box=True,
conditioned_answer_choice=0,
do_lower_case = True,
bert_model_name = "",
max_seq_length = 128,
pretraining = False,
pretraining_include_qa_and_qar = False,
complete_shuffle = False,
use_alignment = False,
add_all_features = False,
answer_labels_path = None,
vcr_annots_dir = None,
vcr_image_dir = None
):
# Should clean this mess when I find the time...
self.split = split
self.mode = mode
self.only_use_relevant_dets = only_use_relevant_dets
self.pretraining_include_qa_and_qar = pretraining_include_qa_and_qar
self.add_all_features = add_all_features
self.use_alignment = use_alignment
self.add_image_as_a_box = add_image_as_a_box
self.conditioned_answer_choice = conditioned_answer_choice
self.vcr_annots_dir = vcr_annots_dir
self.vcr_image_dir = vcr_image_dir
with open(os.path.join(self.vcr_annots_dir, '{}.jsonl'.format(split)), 'r') as f:
self.items = [json.loads(s) for s in f]
if split not in ('test', 'train', 'val'):
raise ValueError("Mode must be in test, train, or val. Supplied {}".format(mode))
if mode not in ('answer', 'rationale'):
raise ValueError("split must be answer or rationale")
self.vocab = Vocabulary()
with open(os.path.join(os.path.dirname(self.vcr_annots_dir), 'dataloaders', 'cocoontology.json'), 'r') as f:
coco = json.load(f)
self.coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
self.coco_obj_to_ind = {o: i for i, o in enumerate(self.coco_objects)}
self.do_lower_case = do_lower_case
self.bert_model_name = bert_model_name
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = pretraining
# This is for pretraining
self.masked_lm_prob = 0.15
self.max_predictions_per_seq = 20
self.complete_shuffle = complete_shuffle
##########
self.only_qar = True if self.mode=='rationale' else False
if answer_labels_path is not None:
# Only when we are testing rationale...
assert(self.only_qar)
if answer_labels_path == 0:
for index, i in enumerate(self.items):
i["answer_label"] = 0
elif answer_labels_path == 1:
for index, i in enumerate(self.items):
i["answer_label"] = 1
elif answer_labels_path == 2:
for index, i in enumerate(self.items):
i["answer_label"] = 2
elif answer_labels_path == 3:
for index, i in enumerate(self.items):
i["answer_label"] = 3
else:
self.answer_labels = np.load(answer_labels_path)
self.answer_labels = self.answer_labels.argmax(1)
if self.split == "test":
assert(self.answer_labels.shape[0] == len(self))
for index, i in enumerate(self.items):
i["answer_label"] = self.answer_labels[index]
else:
self.answer_labels = None
@property
def is_train(self):
return self.split == 'train'
@classmethod
def splits(cls, **kwargs):
""" Helper method to generate splits of the dataset"""
kwargs_copy = {x: y for x, y in kwargs.items()}
if 'mode' not in kwargs:
kwargs_copy['mode'] = 'answer'
train = cls(split='train', **kwargs_copy)
val = cls(split='val', **kwargs_copy)
test = cls(split='test', **kwargs_copy)
return train, val, test
def __len__(self):
if self.complete_shuffle:
if self.pretraining_include_qa_and_qar:
return len(self.items) * 8
else:
return len(self.items) * 4
return len(self.items)
def _get_dets_to_use(self, item, only_use_answer = False, only_use_qar = False): # Need to fix this match
"""
We might want to use fewer detectiosn so lets do so.
:param item:
:param question:
:param answer_choices:
:return:
"""
# Load questions and answers
question = item['question']
answer_choices = item['{}_choices'.format(self.mode)]
if self.mode == "answer":
question = item['question']
answer_choices = item['{}_choices'.format(self.mode)]
elif self.mode == "rationale":
question = item['question'] + item['answer_choices'][item['answer_label']]
answer_choices = item['{}_choices'.format(self.mode)]
if self.pretraining_include_qa_and_qar:
answer_choices = item['answer_choices'] + item['rationale_choices']
if self.add_all_features:
question = item['question']
answer_choices = item['answer_choices'] + item['rationale_choices']
if self.only_use_relevant_dets:
dets2use = np.zeros(len(item['objects']), dtype=bool)
people = np.array([x == 'person' for x in item['objects']], dtype=bool)
for sent in answer_choices + [question]:
for possibly_det_list in sent:
if isinstance(possibly_det_list, list):
for tag in possibly_det_list:
if tag >= 0 and tag < len(item['objects']): # sanity check
dets2use[tag] = True
elif possibly_det_list.lower() in ('everyone', 'everyones'):
dets2use |= people
if not dets2use.any():
dets2use |= people
else:
dets2use = np.ones(len(item['objects']), dtype=bool)
# we will use these detections
dets2use = np.where(dets2use)[0]
old_det_to_new_ind = np.zeros(len(item['objects']), dtype=np.int32) - 1
old_det_to_new_ind[dets2use] = np.arange(dets2use.shape[0], dtype=np.int32)
# If we add the image as an extra box then the 0th will be the image.
if self.add_image_as_a_box:
old_det_to_new_ind[dets2use] += 1
old_det_to_new_ind = old_det_to_new_ind.tolist()
return dets2use, old_det_to_new_ind
def __getitem__(self, index):
if self.complete_shuffle:
if self.pretraining_include_qa_and_qar:
index = index // 8
which = index % 8
else:
index = index // 4
which = index % 4
else:
which = None
item = deepcopy(self.items[index])
###################################################################
# Load questions and answers
answer_choices = item['{}_choices'.format(self.mode)]
if self.complete_shuffle and which < 4:
only_use_answer = True
else:
only_use_answer = False
if self.complete_shuffle and which >= 4:
only_use_qar = True
else:
only_use_qar = False
dets2use, old_det_to_new_ind = self._get_dets_to_use(item, only_use_answer = only_use_answer, only_use_qar = only_use_qar)
# The only_use_qar is ambigious...
instance_dict = {}
if self.split != 'test':
instance_dict['label'] = LabelField(item['{}_label'.format(self.mode)], skip_indexing=True)
instance_dict['metadata'] = MetadataField({'annot_id': item['annot_id'], 'ind': index, 'movie': item['movie'],
'img_fn': item['img_fn'],
'question_number': item['question_number']})
###################################################################
# Load image now and rescale it. Might have to subtract the mean and whatnot here too.
image = load_image(os.path.join(self.vcr_image_dir, item['img_fn']))
#image = self.imagedatas(item['img_fn'])
image, window, img_scale, padding = resize_image(image, random_pad=self.is_train)
image = to_tensor_and_normalize(image)
c, h, w = image.shape
###################################################################
# Load boxes.
with open(os.path.join(self.vcr_image_dir, item['metadata_fn']), 'r') as f:
metadata = json.load(f)
# [nobj, 14, 14]
segms = np.stack([make_mask(mask_size=14, box=metadata['boxes'][i], polygons_list=metadata['segms'][i]) for i in dets2use])
# Chop off the final dimension, that's the confidence
boxes = np.array(metadata['boxes'])[dets2use, :-1]
# Possibly rescale them if necessary
boxes *= img_scale
boxes[:, :2] += np.array(padding[:2])[None]
boxes[:, 2:] += np.array(padding[:2])[None]
obj_labels = [self.coco_obj_to_ind[item['objects'][i]] for i in dets2use.tolist()]
if self.add_image_as_a_box:
boxes = np.row_stack((window, boxes))
segms = np.concatenate((np.ones((1, 14, 14), dtype=np.float32), segms), 0)
obj_labels = [self.coco_obj_to_ind['__background__']] + obj_labels
examples = data_iter_item(item, tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
endingonly=False,
include_qar = self.pretraining_include_qa_and_qar,
only_qar = self.only_qar)
self.getitem_bert_part(examples, item, instance_dict, which)
if self.use_alignment: # Alignment between objects and text
######################
examples_alginment_pack = []
for i in range(len(examples)):
if self.pretraining_include_qa_and_qar:
if i < 4:
raw_text_a = item["question"]
raw_text_b = item['answer_choices'][i]
else:
raw_text_a = item["question"] + item['answer_choices'][item['answer_label']]
raw_text_b = item['rationale_choices'][i - 4]
elif self.only_qar:
raw_text_a = item["question"] + item['answer_choices'][item['answer_label']] # This is the correct alignment right now.
raw_text_b = item['rationale_choices'][i]
else:
raw_text_a = item["question"]
raw_text_b = item['answer_choices'][i]
true_text_a = examples[i][0].text_a
true_text_b = examples[i][0].text_b
text_alignment_a = examples[i][1]
text_alignment_b = examples[i][2]
examples_alginment_pack.append((raw_text_a, raw_text_b, true_text_a, true_text_b, text_alignment_a, text_alignment_b))
image_box_position = []
if which is not None:
raw_text_a, raw_text_b, true_text_a, true_text_b, text_alignment_a, text_alignment_b = examples_alginment_pack[which]
box_record = defaultdict(list)
self.get_alignment_original(raw_text_a, text_alignment_a, old_det_to_new_ind, box_record, offset = 1)
self.get_alignment_original(raw_text_b, text_alignment_b, old_det_to_new_ind, box_record, offset = 1 + len(text_alignment_a) + 1)
image_text_alignment = ListField([IntArrayField(np.array(box_record[i]), padding_value = -1) for i in range(len(boxes))])
else:
for raw_text_a, raw_text_b, true_text_a, true_text_b, text_alignment_a, text_alignment_b in examples_alginment_pack:
box_record = defaultdict(list)
self.get_alignment_original(raw_text_a, text_alignment_a, old_det_to_new_ind, box_record, offset = 1)
self.get_alignment_original(raw_text_b, text_alignment_b, old_det_to_new_ind, box_record, offset = 1 + len(text_alignment_a) + 1)
image_box_position.append(ListField([IntArrayField(np.array(box_record[i]), padding_value = -1) for i in range(len(boxes))]))
image_text_alignment = ListField(image_box_position)
######################
instance_dict["image_text_alignment"] = image_text_alignment
instance_dict['segms'] = ArrayField(segms, padding_value=0)
instance_dict['objects'] = ListField([LabelField(x, skip_indexing=True) for x in obj_labels])
if not np.all((boxes[:, 0] >= 0.) & (boxes[:, 0] < boxes[:, 2])):
import ipdb
ipdb.set_trace()
assert np.all((boxes[:, 1] >= 0.) & (boxes[:, 1] < boxes[:, 3]))
assert np.all((boxes[:, 2] <= w))
assert np.all((boxes[:, 3] <= h))
instance_dict['boxes'] = ArrayField(boxes, padding_value=-1)
instance = Instance(instance_dict)
instance.index_fields(self.vocab)
return image, instance
def get_alignment_original(self, raw_text_mixed, text_alignment, old_det_to_new_ind, box_record, offset):
# raw_text_mixed is the raw text information in VCR dataset
# text_alignment is the result from BERT tokenizer recording the alignment between raw tokens and subword tokens.
counter = 0
for i in raw_text_mixed:
if isinstance(i, list):
for box_index in i:
new_box_index = old_det_to_new_ind[box_index]
assert(new_box_index != -1)
# Need to record which box corresponds to which person.
for i in text_alignment:
if i == counter:
box_record[new_box_index].append(i + offset)
break
counter += 1
else:
counter += 1
def getitem_bert_part(self, examples, item, instance_dict, which = None):
# In examples, each element: InputExample, Alignment for context, algiment for answer
if self.pretraining:
if self.complete_shuffle:
assert(which is not None)
feature = InputFeatures.convert_one_example_to_features_pretraining(
example = examples[which][0],
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
feature.insert_field_into_dict(instance_dict)
return
features = []
for i in examples:
inputexample_instance = i[0]
example = InputFeatures.convert_one_example_to_features_pretraining(
example = inputexample_instance,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
features.append(example)
InputFeatures.convert_list_features_to_allennlp_list_feild(features, instance_dict)
else:
features = InputFeatures.convert_examples_to_features(
examples=[x[0] for x in examples],
tokenizer=self.tokenizer)
InputFeatures.convert_list_features_to_allennlp_list_feild(features, instance_dict)
@staticmethod
def collate_fn(data):
if isinstance(data[0], Instance):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
else:
images, instances = zip(*data)
images = torch.stack(images, 0)
batch = Batch(instances)
td = batch.as_tensor_dict()
if 'question' in td:
td['question_mask'] = get_text_field_mask(td['question'], num_wrapping_dims=1)
td['question_tags'][td['question_mask'] == 0] = -2 # Padding
if "answer" in td:
td['answer_mask'] = get_text_field_mask(td['answers'], num_wrapping_dims=1)
td['answer_tags'][td['answer_mask'] == 0] = -2
td['box_mask'] = torch.all(td['boxes'] >= 0, -1).long()
td['images'] = images
return td
class VCRLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, filtering out None,
but also loads everything as a (cuda) variable
"""
@classmethod
def from_dataset(cls, data, batch_size=3, num_workers=6, num_gpus=3, **kwargs):
loader = cls(
dataset=data,
batch_size=batch_size * num_gpus,
shuffle=data.is_train,
num_workers=num_workers,
collate_fn=data.collate_fn,
drop_last=False,
pin_memory=False,
**kwargs,
)
return loader | 20,515 | 42.191579 | 168 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_dataset.py | import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import json
import os
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from dataloaders.bert_field import IntArrayField
import numpy as np
from allennlp.data.fields import ListField
import os
import json
import _pickle as cPickle
import numpy as np
import utils
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from xml.etree.ElementTree import parse
import torch
from torch.utils.data import Dataset
import itertools
import re
COUNTING_ONLY = False
from .flickr_ban.dataset import _load_flickr30k, _load_flickr30k_our
from .bert_data_utils import *
from .vcr_data_utils import retokenize_with_alignment
class Flickr30kFeatureDataset(Dataset):
def __init__(self, name, args, dictionary = None, data_root='data/flickr30k/', chunk = None, entries = None):
super(Flickr30kFeatureDataset, self).__init__()
self.add_spatial_features = args.add_spatial_features
self.dictionary = dictionary
self.use_visual_genome = args.get("use_visual_genome", True)
if self.use_visual_genome:
self.img_id2idx = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % name), 'rb'))
h5_path = os.path.join(data_root, '%s.hdf5' % name)
with h5py.File(h5_path, 'r') as hf:
self.features = np.array(hf.get('image_features'))
self.spatials = np.array(hf.get('spatial_features'))
self.bbox = np.array(hf.get('image_bb'))
self.pos_boxes = np.array(hf.get('pos_boxes'))
self.entries = _load_flickr30k(data_root, self.img_id2idx, self.bbox, self.pos_boxes, limit = None, cache_name = name)
else:
self.features_chunk = chunk
self.entries = entries
self.pretraining = args.pretraining
from pytorch_pretrained_bert.tokenization import BertTokenizer
self.do_lower_case = args.do_lower_case
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.masked_lm_prob = args.get("masked_lm_prob", 0.15)
@classmethod
def splits(cls, args):
data_root = args.data_root
if args.get("use_visual_genome", True):
chunk = None
train_entries = None
val_entries = None
test_entries = None
else:
assert(0)
'''
chunk = torch.load(chunk_data)
image_screening_parameters = args.image_screening_parameters
for image_id in chunk.keys():
image_feat_variable, image_boxes, confidence, image_h, image_w = chunk[image_id]
image_feat_variable, image_boxes, confidence = screen_feature(image_feat_variable, image_boxes, confidence, image_screening_parameters)
chunk[image_id] = (image_feat_variable, image_boxes, confidence, image_h, image_w)
train_ids = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % "train"), 'rb'))
val_ids = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % "val"), 'rb'))
test_ids = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % "test"), 'rb'))
val_ids = list(val_ids.keys())
train_ids = list(train_ids.keys())
test_ids = list(test_ids.keys())
entities_data_path = os.path.join(data_root, args.entries_path)
if not os.path.exists(entities_data_path):
entries = _load_flickr30k_our(data_root, chunk)
with open(entities_data_path, 'wb') as f:
cPickle.dump(entries, f)
else:
entries = cPickle.load(open(entities_data_path, "rb"))
train_entries = [i for i in entries if int(i["image"][:-4]) in train_ids]
val_entries = [i for i in entries if int(i["image"][:-4]) in val_ids]
test_entries = [i for i in entries if int(i["image"][:-4]) in test_ids]
'''
train = cls(name = "train", args = args, data_root = data_root, chunk = chunk, entries = train_entries)
train.is_train = True
val = cls(name = "val", args = args, data_root = data_root, chunk = chunk, entries = val_entries)
val.is_train = False
test = cls(name = "test", args = args, data_root = data_root, chunk = chunk, entries = test_entries)
test.is_train = False
return train, val, test
def tokenize(self, max_length=82):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['sentence'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['p_token'] = tokens
def tensorize(self, max_box=100, max_entities=16, max_length=82):
self.features = torch.from_numpy(self.features)
self.spatials = torch.from_numpy(self.spatials)
for entry in self.entries:
phrase = torch.from_numpy(np.array(entry['p_token']))
entry['p_token'] = phrase
assert len(entry['target_indices']) == entry['entity_num']
assert len(entry['entity_indices']) == entry['entity_num']
target_tensors = []
for i in range(entry['entity_num']):
target_tensor = torch.zeros(1, max_box)
if len(entry['target_indices'][i]) > 0:
target_idx = torch.from_numpy(np.array(entry['target_indices'][i]))
target_tensor = torch.zeros(max_box).scatter_(0, target_idx, 1).unsqueeze(0)
target_tensors.append(target_tensor)
assert len(target_tensors) <= max_entities, '> %d entities!' % max_entities
for i in range(max_entities - len(target_tensors)):
target_tensor = torch.zeros(1, max_box)
target_tensors.append(target_tensor)
entry['entity_ids'].append(0)
# padding entity_indices with non-overlapping indices
entry['entity_indices'] += [x for x in range(max_length) if x not in entry['entity_indices']]
entry['entity_indices'] = entry['entity_indices'][:max_entities]
entry['target'] = torch.cat(target_tensors, 0)
# entity positions in (e) tensor
entry['e_pos'] = torch.LongTensor(entry['entity_indices'])
entry['e_num'] = torch.LongTensor([entry['entity_num']])
entry['entity_ids'] = torch.LongTensor(entry['entity_ids'])
entry['entity_types'] = torch.LongTensor(entry['entity_types'])
def __getitem__(self, index):
entry = self.entries[index]
sentence = entry['sentence']
e_pos = entry['entity_indices']
e_num = entry['entity_num']
target = entry['target_indices']
entity_ids = entry['entity_ids']
entity_types = entry['entity_types']
#v, b, p, e, n, a, idx, types
if self.use_visual_genome:
features = self.features[self.pos_boxes[entry['image']][0]:self.pos_boxes[entry['image']][1], :]
spatials = self.spatials[self.pos_boxes[entry['image']][0]:self.pos_boxes[entry['image']][1], :]
else:
image_id = entry["image"]
features, cls_boxes, max_conf, image_h, image_w = self.features_chunk[image_id]
if self.add_spatial_features:
features = np.concatenate((features, spatials), axis=1)
else:
spatials = None
sample = {}
image_feat_variable = ArrayField(features)
image_dim_variable = IntArrayField(np.array(len(features)))
sample["image_feat_variable"] = image_feat_variable
sample["image_dim_variable"] = image_dim_variable
tokenized_sentence, alignment = retokenize_with_alignment(sentence.split(" "), self.tokenizer)
e_pos_after_subword = []
current_index = 0
for position in e_pos:
for index, i in enumerate(alignment):
if i == position:
if index == len(alignment) - 1 or alignment[index+1] != i:
e_pos_after_subword.append(index + 1) # Because the added [CTX] token
if len(e_pos_after_subword) != len(e_pos) or len(e_pos_after_subword) != len(target):
assert(0)
# Need to convert target into soft scores:
target_len = features.shape[0]
new_target = []
for i in target:
new_i = [0.0] * target_len
if len(i) != 0:
score = 1.0 / len(i)
for j in i:
new_i[j] = score
new_target.append(new_i)
# target = entity_num x v_feature_size
target = ArrayField(np.array(new_target, dtype="float"), padding_value = 0.0)
original_position = IntArrayField(np.array(e_pos_after_subword, dtype="int"), padding_value = -1)
sample["label"] = target # Remember that sometimes that label is empty for certain entities, that's because the boxes we provided do not have a match.
sample["flickr_position"] = original_position
bert_example = InputExample(unique_id = -1, text_a = tokenized_sentence, text_b = None, is_correct = None, max_seq_length = self.max_seq_length)
if self.pretraining:
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
bert_feature.insert_field_into_dict(sample)
else:
bert_feature = InputFeatures.convert_one_example_to_features(
example = bert_example,
tokenizer=self.tokenizer)
bert_feature.insert_field_into_dict(sample)
return Instance(sample)
def __len__(self):
return len(self.entries)
@staticmethod
def collate_fn(data):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
| 11,696 | 40.626335 | 158 | py |
visualbert | visualbert-master/visualbert/dataloaders/box_utils.py | import os
import random
import numpy as np
import scipy
import warnings
from torchvision.datasets.folder import default_loader
from torchvision.transforms import functional
USE_IMAGENET_PRETRAINED = True
##### Image
def load_image(img_fn):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
return default_loader(img_fn)
# # Load image
# image = skimage.io.imread(img_fn)
# # If grayscale. Convert to RGB for consistency.
# if image.ndim != 3:
# image = skimage.color.gray2rgb(image)
# # If has an alpha channel, remove it for consistency
# if image.shape[-1] == 4:
# image = image[..., :3]
# return image
# Let's do 16x9
# Two common resolutions: 16x9 and 16/6 -> go to 16x8 as that's simple
# let's say width is 576. for neural motifs it was 576*576 pixels so 331776. here we have 2x*x = 331776-> 408 base
# so the best thing that's divisible by 4 is 384. that's
def resize_image(image, desired_width=768, desired_height=384, random_pad=False):
"""Resizes an image keeping the aspect ratio mostly unchanged.
Returns:
image: the resized image
window: (x1, y1, x2, y2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [left, top, right, bottom]
"""
# Default window (x1, y1, x2, y2) and default scale == 1.
w, h = image.size
width_scale = desired_width / w
height_scale = desired_height / h
scale = min(width_scale, height_scale)
# Resize image using bilinear interpolation
if scale != 1:
image = functional.resize(image, (round(h * scale), round(w * scale)))
w, h = image.size
y_pad = desired_height - h
x_pad = desired_width - w
top_pad = random.randint(0, y_pad) if random_pad else y_pad // 2
left_pad = random.randint(0, x_pad) if random_pad else x_pad // 2
padding = (left_pad, top_pad, x_pad - left_pad, y_pad - top_pad)
assert all([x >= 0 for x in padding])
image = functional.pad(image, padding)
window = [left_pad, top_pad, w + left_pad, h + top_pad]
return image, window, scale, padding
if USE_IMAGENET_PRETRAINED:
def to_tensor_and_normalize(image):
return functional.normalize(functional.to_tensor(image), mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
else:
# For COCO pretrained
def to_tensor_and_normalize(image):
tensor255 = functional.to_tensor(image) * 255
return functional.normalize(tensor255, mean=(102.9801, 115.9465, 122.7717), std=(1, 1, 1))
| 2,765 | 35.88 | 119 | py |
visualbert | visualbert-master/visualbert/dataloaders/nlvr_dataset.py | import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import json
import os
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from dataloaders.bert_field import IntArrayField
import numpy as np
from allennlp.data.fields import ListField
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
from .bert_data_utils import *
class NLVRDataset(Dataset):
def __init__(self, args):
super(NLVRDataset, self).__init__()
self.args = args
self.annots_path = args.annots_path
self.split = args.split
self.text_only = args.get("text_only", False)
self.no_next_sentence = args.get("no_next_sentence", False)
with open(self.annots_path, 'r') as f:
self.items = [json.loads(s) for s in f]
self.image_feat_reader = faster_RCNN_feat_reader()
self.image_screening_parameters = self.args.image_screening_parameters
if args.get("chunk_path", None) is not None:
self.chunk = torch.load(args.chunk_path)
average = 0.0
new_chunk = {}
for image_id in self.chunk.keys():
image_feat_variable, image_boxes, confidence = self.chunk[image_id]
if "npz" not in image_id:
new_chunk[image_id+".npz"] = screen_feature(image_feat_variable, image_boxes,confidence, self.image_screening_parameters)
average += new_chunk[image_id+".npz"][2]
else:
new_chunk[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, self.image_screening_parameters)
average += new_chunk[image_id][2]
self.chunk = new_chunk
print("{} features on average.".format(average/len(self.chunk)))
##########
self.do_lower_case = args.do_lower_case
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
# 1. Initialize the BERT tokenizer
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = args.pretraining
# This is for pretraining
self.masked_lm_prob = 0.15
self.max_predictions_per_seq = 20
def get_image_features_by_training_index(self, index, which_one):
item = self.items[index]
image_file_name = "{}img{}.png.npz".format(item['identifier'][:-1], which_one)
return self.chunk[image_file_name]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
item = self.items[index]
sample = {}
if not self.text_only:
image_feat_variable_0, image_boxes_0, image_dim_variable_0 = self.get_image_features_by_training_index(index, 0)
image_feat_variable_1, image_boxes_1, image_dim_variable_1 = self.get_image_features_by_training_index(index, 1)
visual_embeddings_type_0 = np.zeros(image_feat_variable_0.shape[0])
visual_embeddings_type_1 = np.ones(image_feat_variable_1.shape[0])
visual_embeddings_type = numpy.concatenate((visual_embeddings_type_0, visual_embeddings_type_1), axis = 0)
image_feat_variable = numpy.concatenate((image_feat_variable_0, image_feat_variable_1), axis = 0)
image_dim_variable = image_dim_variable_0 + image_dim_variable_1
image_feat_variable = torch.Tensor(image_feat_variable)
#image_boxes = ArrayField(image_boxes)
image_dim_variable = torch.LongTensor([image_dim_variable])
visual_embeddings_type = torch.LongTensor(visual_embeddings_type)
sample["image_feat_variable"] = image_feat_variable
#sample["image_boxes"] = image_boxes
sample["image_dim_variable"] = image_dim_variable
sample["visual_embeddings_type"] = visual_embeddings_type
caption_a = item["sentence"]
if item.get("label", None) is not None:
sample["label"] = torch.LongTensor([1 if item["label"] == "True" else 0])
else:
sample["label"] = torch.LongTensor([0]) # Pseudo label
if self.pretraining:
subword_tokens_a = self.tokenizer.tokenize(caption_a)
if self.no_next_sentence:
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)
else:
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=1 if item["label"] == "True" else 0, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = 0.15)
bert_feature.insert_tensor_into_dict(sample)
else:
subword_tokens_a = self.tokenizer.tokenize(caption_a)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=1 if item.get("label", None) == "True" else 0, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features(
example = bert_example,
tokenizer=self.tokenizer)
bert_feature.insert_tensor_into_dict(sample)
return sample
@classmethod
def splits(cls, args):
data_root = args.data_root
args_copy = deepcopy(args)
args_copy.split = "train"
args_copy.annots_path = os.path.join(data_root, "{}.json".format(args_copy.split))
if args.image_screening_parameters["image_feature_cap"] is not None and args.image_screening_parameters["image_feature_cap"] > 36:
args_copy.chunk_path = os.path.join(data_root, "features_{}_150.th".format(args_copy.split))
else:
args_copy.chunk_path = os.path.join(data_root, "features_chunk_{}.th".format(args_copy.split))
if args.get("do_test", False):
trainset = None
validationset = None
else:
trainset = cls(args_copy)
trainset.is_train = True
args_copy = deepcopy(args)
args_copy.split = "dev"
args_copy.annots_path = os.path.join(data_root, "{}.json".format(args_copy.split))
if args.image_screening_parameters["image_feature_cap"] is not None and args.image_screening_parameters["image_feature_cap"] > 36:
args_copy.chunk_path = os.path.join(data_root, "features_{}_150.th".format(args_copy.split))
else:
args_copy.chunk_path = os.path.join(data_root, "features_chunk_{}.th".format(args_copy.split))
validationset = cls(args_copy)
validationset.is_train = False
args_copy = deepcopy(args)
args_copy.split = "test1"
if args.get("test_on_hidden", False):
args_copy.split = "test2"
args_copy.annots_path = os.path.join(data_root, "{}.json".format(args_copy.split))
if args.image_screening_parameters["image_feature_cap"] is not None and args.image_screening_parameters["image_feature_cap"] > 36:
args_copy.chunk_path = os.path.join(data_root, "features_{}_150.th".format(args_copy.split))
else:
args_copy.chunk_path = os.path.join(data_root, "features_chunk_{}.th".format(args_copy.split))
testset = cls(args_copy)
testset.is_train = False
if args.get("do_test", False):
trainset = testset
validationset = testset
return trainset, validationset, testset
@staticmethod
def collate_fn(data):
if isinstance(data[0], dict):
for index, i in enumerate(data):
if "image_feat_variable" in i:
i["image_feat_variable"] = ArrayTensorField(i["image_feat_variable"])
i["image_dim_variable"] = IntArrayTensorField(i["image_dim_variable"])
i["visual_embeddings_type"] = IntArrayTensorField(i["visual_embeddings_type"])
i["bert_input_ids"] = IntArrayTensorField(i["bert_input_ids"])
i["bert_input_mask"] = IntArrayTensorField(i["bert_input_mask"])
i["bert_input_type_ids"] = IntArrayTensorField(i["bert_input_type_ids"])
if "masked_lm_labels" in i:
i["masked_lm_labels"] = IntArrayTensorField(i["masked_lm_labels"], padding_value = -1)
if "is_random_next" in i:
i["is_random_next"] = IntArrayTensorField(i["is_random_next"])
i['label'] = IntArrayTensorField(i['label'])
data[index] = Instance(i)
batch = Batch(data)
td = batch.as_tensor_dict()
td["label"] = td["label"].squeeze(-1)
return td | 9,904 | 44.645161 | 196 | py |
visualbert | visualbert-master/visualbert/dataloaders/bert_field.py | from typing import Dict, List, Optional
import textwrap
from overrides import overrides
from spacy.tokens import Token as SpacyToken
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
import numpy
TokenList = List[TokenType] # pylint: disable=invalid-name
# This will work for anything really
class BertField(SequenceField[Dict[str, torch.Tensor]]):
"""
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self, tokens: List[Token], embs: numpy.ndarray, padding_value: int = 0,
token_indexers=None) -> None:
self.tokens = tokens
self.embs = embs
self.padding_value = padding_value
if len(self.tokens) != self.embs.shape[0]:
raise ValueError("The tokens you passed into the BERTField, {} "
"aren't the same size as the embeddings of shape {}".format(self.tokens, self.embs.shape))
assert len(self.tokens) == self.embs.shape[0]
@overrides
def sequence_length(self) -> int:
return len(self.tokens)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {'num_tokens': self.sequence_length()}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:
num_tokens = padding_lengths['num_tokens']
new_arr = numpy.ones((num_tokens, self.embs.shape[1]),
dtype=numpy.float32) * self.padding_value
new_arr[:self.sequence_length()] = self.embs
tensor = torch.from_numpy(new_arr)
return {'bert': tensor}
@overrides
def empty_field(self):
return BertField([], numpy.array([], dtype="float32"),padding_value=self.padding_value)
@overrides
def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
# pylint: disable=no-self-use
# This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used
# to index this field.
return util.batch_tensor_dicts(tensor_list)
def __str__(self) -> str:
return f"BertField: {self.tokens} and {self.embs.shape}."
from typing import Dict
import numpy
import torch
from overrides import overrides
from allennlp.data.fields.field import Field
class IntArrayField(Field[numpy.ndarray]):
"""
Modified by Harold.
The default ArrayField by Allennlp is Float. Here we want IntArray.
"""
def __init__(self, array: numpy.ndarray, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.shape)}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
return_array = numpy.asarray(numpy.ones(max_shape, "int64") * self.padding_value)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
tensor = torch.from_numpy(return_array)
return tensor
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return IntArrayField(numpy.array([], dtype="int64"), padding_value=self.padding_value)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.shape}."
class IntArrayTensorField(Field[numpy.ndarray]):
"""
Modified by Harold.
The default ArrayField by Allennlp is Float. Here we want IntArray.
# We need an torch Tensor Field!!
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self, array, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.size())}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
return_array = torch.ones(max_shape, dtype = torch.int64) * self.padding_value
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.size())
if len(self.array.size()) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.size()))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
return return_array
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return IntArrayTensorField(torch.LongTensor([]), padding_value=self.padding_value)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.shape}."
class ArrayTensorField(Field[numpy.ndarray]):
def __init__(self, array, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.size())}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
return_array = torch.ones(max_shape, dtype = torch.float) * self.padding_value
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.size())
if len(self.array.size()) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.size()))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
return return_array
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return ArrayTensorField(torch.Tensor([]), padding_value=self.padding_value)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.size()}." | 8,295 | 40.273632 | 119 | py |
visualbert | visualbert-master/visualbert/dataloaders/vqa_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from torch.utils.data import Dataset
import numpy as np
from copy import deepcopy
import torch
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from dataloaders.bert_field import IntArrayField
from .bert_data_utils import *
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
imdb_version = 1 # Not sure what this does... Just follow it
import random
import json
def compute_answer_scores(answers, num_of_answers, unk_idx):
scores = np.zeros((num_of_answers), np.float32)
for answer in set(answers):
if answer == unk_idx:
scores[answer] = 0
else:
answer_count = answers.count(answer)
scores[answer] = min(np.float32(answer_count)*0.3, 1)
return scores
def read_in_image_feats(image_dirs, image_readers, image_file_name):
image_feats = []
for i, image_dir in enumerate(image_dirs):
image_feat_path = os.path.join(image_dir, image_file_name)
tmp_image_feat = image_readers[i].read(image_feat_path)
image_feats.append(tmp_image_feat)
return image_feats
class VQADataset(Dataset):
def __init__(self, args, chunk_train = None, chunk_val = None): # Using args is not exactly a very good coding habit...
super(VQADataset, self).__init__()
if isinstance(args.imdb_file, list) or isinstance(args.imdb_file, tuple): # For training dataset, the imdb_file is a list of strs, containing train and val:
imdb = np.load(args.imdb_file[0], allow_pickle = True)[1:]
for i in args.imdb_file[1:]:
imdb_i = np.load(i, allow_pickle = True)[1:]
imdb = np.concatenate((imdb, imdb_i))
else:
if args.imdb_file.endswith('.npy'):
imdb = np.load(args.imdb_file, allow_pickle = True)[1:]
else:
raise TypeError('unknown imdb format.')
self.items = imdb
self.chunk_train = chunk_train
self.chunk_val = chunk_val
self.args = args
self.data_root = args.data_root
self.use_visual_genome = args.use_visual_genome
self.pretraining = args.pretraining
self.include_res152 = args.get('include_res152', False)
self.no_next_sentence = args.get("no_next_sentence", False)
self.false_caption_ratio = args.get("false_caption_ratio", 0.5)
# the answer dict is always loaded, regardless of self.load_answer
self.answer_dict = VocabDict(args.vocab_answer_file)
self.do_lower_case = args["do_lower_case"]
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
self.pretraining = args.pretraining
self.masked_lm_prob = args.get("masked_lm_prob", 0.15)
self.tokenizer = BertTokenizer.from_pretrained(args["bert_model_name"], do_lower_case=args["do_lower_case"])
self.advanced_vqa = True if args.model.training_head_type == "vqa_advanced" else False
if self.advanced_vqa:
tokenized_list = []
for i in self.answer_dict.word_list:
tokenized_list.append(self.tokenizer.tokenize(i))
max_len = max(len(i) for i in tokenized_list)
for index, i in enumerate(tokenized_list):
if len(i) < max_len:
tokenized_list[index] = i + ["[MASK]"] * (max_len - len(i))
self.tokenized_list = tokenized_list
def __len__(self):
return len(self.items)
def get_image_features_by_training_index(self, index):
if not self.use_visual_genome:
item = self.items[index]
image_file_name = self.items[index]['image_name'] + ".jpg.npz"
try:
return self.chunk_train[image_file_name]
except:
return self.chunk_val[image_file_name]
else:
iminfo = self.items[index]
image_file_name = iminfo['image_name'] + ".npy"
if "train" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/train2014")
elif "val" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/val2014")
elif "test" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/test2015")
detectron_features = np.load(os.path.join(folder, image_file_name))
image_feat_variable = detectron_features
image_dim_variable = image_feat_variable.shape[0]
visual_embeddings_type = None
return image_feat_variable, None, image_dim_variable
def __getitem__(self, index):
iminfo = self.items[index]
image_feat_variable, image_boxes, image_dim_variable = self.get_image_features_by_training_index(index)
sample = {}
image_feat_variable = ArrayField(image_feat_variable)
image_dim_variable = IntArrayField(np.array(image_dim_variable))
sample["image_feat_variable"] = image_feat_variable
sample["image_dim_variable"] = image_dim_variable
answer = None
valid_answers_idx = np.zeros((10), np.int32)
valid_answers_idx.fill(-1)
answer_scores = np.zeros(self.answer_dict.num_vocab, np.float32)
if 'answer' in iminfo:
answer = iminfo['answer']
elif 'valid_answers' in iminfo:
valid_answers = iminfo['valid_answers']
answer = np.random.choice(valid_answers)
valid_answers_idx[:len(valid_answers)] = (
[self.answer_dict.word2idx(ans) for ans in valid_answers])
ans_idx = (
[self.answer_dict.word2idx(ans) for ans in valid_answers])
answer_scores = (compute_answer_scores(ans_idx,
self.answer_dict.num_vocab,
self.answer_dict.UNK_idx))
if answer is not None:
answer_idx = self.answer_dict.word2idx(answer)
if self.advanced_vqa:
new_answer = self.tokenized_list[self.answer_dict.word2idx(answer)]
subword_tokens = self.tokenizer.tokenize(" ".join(iminfo['question_tokens']))
subword_tokens = ["[CLS]"] + subword_tokens + ["?"] # We will use the last word to do predictio
masked_lm_labels = [-1] * len(subword_tokens)
for i in new_answer:
subword_tokens.append("[MASK]")
masked_lm_labels.append(self.tokenizer.vocab[i])
subword_tokens.append("[SEP]")
masked_lm_labels.append(-1)
input_ids = []
for i in subword_tokens:
input_ids.append(self.tokenizer.vocab[i])
bert_feature = InputFeatures(
unique_id = -1,
tokens = subword_tokens,
input_ids = input_ids,
input_mask = [1] * len(input_ids),
input_type_ids = [0] * len(input_ids),
is_correct = 1,
lm_label_ids = masked_lm_labels
)
bert_feature.insert_field_into_dict(sample)
else:
if self.pretraining:
item = iminfo
if self.no_next_sentence:
answer = answer
label = None
subword_tokens_a = self.tokenizer.tokenize(" ".join(item['question_tokens'])) + ["?"]
subword_tokens_b = self.tokenizer.tokenize(" ".join(answer))
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a + subword_tokens_b, text_b = None, is_correct = None, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = 0.15)
else:
assert(0) # Should not use this part
'''if random.random() > self.false_caption_ratio:
answer = answer
label = 1
else:
while(True):
wrong_answer = np.random.choice(self.answer_dict.word_list)
if wrong_answer not in valid_answers:
wrong_answer = answer
label = 0
break
subword_tokens_a = self.tokenizer.tokenize(" ".join(item['question_tokens'])) + ["?"]
subword_tokens_b = self.tokenizer.tokenize(" ".join(answer))
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct = label, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = 0.15)'''
bert_feature.insert_field_into_dict(sample)
else:
item = iminfo
subword_tokens = self.tokenizer.tokenize(" ".join(item['question_tokens']))
if self.no_next_sentence:
subword_tokens = subword_tokens + ["?", "[MASK]"] # We will use the last word to do predictio
subwords_b = None
else:
subword_tokens = subword_tokens + ["?"]
subwords_b = ["[MASK]"]
bert_example = InputExample(unique_id = -1, text_a = subword_tokens, text_b = subwords_b,max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features(bert_example,tokenizer =self.tokenizer)
bert_feature.insert_field_into_dict(sample)
if answer is not None:
sample['label'] = ArrayField(np.array(answer_scores))
return Instance(sample)
@staticmethod
def collate_fn(data):
if isinstance(data[0], Instance):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
@classmethod
def splits(cls, args):
""" Helper method to generate splits of the dataset"""
data_root = args.data_root
if not args.use_visual_genome:
assert(0) # This part should not be used
'''chunk_train = torch.load(os.path.join(data_root, "coco/features_chunk_train.th"))
chunk_val = torch.load(os.path.join(data_root, "coco/features_chunk_val.th"))
print("Processing imges...")
average = 0.0
for image_id in chunk_train.keys():
image_feat_variable, image_boxes, confidence = chunk_train[image_id]
chunk_train[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += chunk_train[image_id][2]
print("{} features on average.".format(average/len(chunk_train)))
for image_id in chunk_val.keys():
image_feat_variable, image_boxes, confidence = chunk_val[image_id]
chunk_val[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += chunk_val[image_id][2]'''
else:
chunk_train = None
chunk_val = None
args_copy = deepcopy(args)
args_copy.vocab_answer_file = os.path.join(data_root, "data/answers_vqa.txt")
args_copy.imdb_file = [os.path.join(data_root, "data/imdb/imdb_train2014.npy"), os.path.join(data_root, "data/imdb/imdb_val2014.npy")] #imdb_val2train2014, imdb_val2014
train = cls(args_copy, chunk_train = chunk_train, chunk_val = chunk_val)
train.is_train = True
args_copy_1 = deepcopy(args_copy)
args_copy_1.imdb_file = os.path.join(data_root, "data/imdb/imdb_minival2014.npy")
val = cls(args_copy_1, chunk_train = chunk_train, chunk_val = chunk_val)
val.is_train = False
args_copy_2 = deepcopy(args_copy)
args_copy_2.imdb_file = os.path.join(data_root, "data/imdb/imdb_test2015.npy")
test = cls(args_copy_2)
test.is_train = False
return train, val, test
def generate_test_file(self, logits, out_file):
assert(len(self.items) == logits.size(0))
out_list = []
for index, i in enumerate(self.items):
question_id = i["question_id"]
out_list.append(
{
"question_id": question_id,
"answer": self.answer_dict.idx2word(logits[index].argmax(0))
}
)
with open(out_file, "w") as f:
json.dump(out_list, f)
import re
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)')
def tokenize(sentence):
sentence = sentence.lower()
sentence = (
sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s'))
tokens = SENTENCE_SPLIT_REGEX.split(sentence)
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def load_str_list(fname):
with open(fname) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
class VocabDict:
def __init__(self, vocab_file):
self.word_list = load_str_list(vocab_file)
self.word2idx_dict = {w: n_w for n_w, w in enumerate(self.word_list)}
self.num_vocab = len(self.word_list)
self.UNK_idx = (self.word2idx_dict['<unk>']
if '<unk>' in self.word2idx_dict else None)
def idx2word(self, n_w):
return self.word_list[n_w]
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_idx is not None:
return self.UNK_idx
else:
raise ValueError('word %s not in dictionary \
(while dictionary does not contain <unk>)' % w)
def tokenize_and_index(self, sentence):
inds = [self.word2idx(w) for w in tokenize(sentence)]
return inds
| 14,744 | 41.615607 | 184 | py |
visualbert | visualbert-master/visualbert/dataloaders/bert_data_utils.py | # Functions to convert raw strings into BERT input feature (InputFeatures' class method)
# Some functions for reading image features
# To take care of padding, we will use AllenNLP's Field;
# Caveat: we pad sequences with zero with one exception: BERT's pre-training language model objective mask's padding should be -1.
import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import os
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
import h5py
from copy import deepcopy
import copy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import ListField
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from .box_utils import load_image, resize_image, to_tensor_and_normalize
from .mask_utils import make_mask
from .bert_field import BertField, IntArrayField, IntArrayTensorField, ArrayTensorField
class InputExample(object):
def __init__(self, unique_id=None, text_a=None, text_b=None, is_correct=True, lm_labels=None, max_seq_length = None):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
self.is_correct = is_correct # This sort of serves as the correct label as well as the is_next label
# This should always be None. Right?
assert(lm_labels is None)
self.lm_labels = lm_labels # masked words for language model
if max_seq_length is not None:
self.perform_truncate(max_seq_length)
def perform_truncate(self, max_seq_length):
if self.text_b is None:
len_total = len(self.text_a) + 2
self.text_a = self.text_a[:max_seq_length - 2]
else:
len_total = len(self.text_a) + len(self.text_b) + 3
if len_total > max_seq_length:
take_away_from_ctx = min((len_total - max_seq_length + 1) // 2, max(len(self.text_a) - 32, 0))
take_away_from_answer = len_total - max_seq_length + take_away_from_ctx
# Follows VCR, perform truncate from the front...
self.text_a = self.text_a[take_away_from_ctx:]
self.text_b = self.text_b[take_away_from_answer:]
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids, is_correct, lm_label_ids=None):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.is_correct = is_correct
self.lm_label_ids = lm_label_ids
# For compatiblity with Huggingface Models:
self.segment_ids = input_type_ids
self.is_next = is_correct
# Convert one sentence_a + sentence_b to pre-training example
@classmethod
def convert_one_example_to_features(cls, example, tokenizer):
# note, this is different because weve already tokenized
tokens_a = example.text_a
# tokens_b = example.text_b
tokens_b = None
if example.text_b:
tokens_b = example.text_b
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
return cls(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
is_correct=example.is_correct)
@classmethod
def convert_examples_to_features(cls, examples, tokenizer):
# This one does not pad
max_len = 0
features = []
for (ex_index, example) in enumerate(examples):
feature = cls.convert_one_example_to_features(example, tokenizer)
if max_len < len(feature.input_ids):
max_len = len(feature.input_ids)
features.append(feature)
for i in features:
# Zero-pad up to the sequence length.
while len(i.input_ids) < max_len:
i.input_ids.append(0)
i.input_mask.append(0)
i.input_type_ids.append(0)
assert len(i.input_ids) == max_len
assert len(i.input_mask) == max_len
assert len(i.input_type_ids) == max_len
return features
@classmethod
def convert_one_example_to_features_pretraining(cls, example, tokenizer, probability):
############ Modifed by Harold
# This function does not care about padding, and we leave it to AllenNLP's field to take care of that.
# But we need to be extra carefule about the padding index.
# Not everything is padded with zero.
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.text_a
tokens_b = None
if example.text_b:
tokens_b = example.text_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
#_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens_a, t1_label = random_word(tokens_a, tokenizer, probability)
if tokens_b:
tokens_b, t2_label = random_word(tokens_b, tokenizer, probability)
# concatenate lm labels and account for CLS, SEP, SEP
if tokens_b:
lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
else:
lm_label_ids = ([-1] + t1_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
return cls(unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_correct=example.is_correct)
def convert_to_allennlp_feild(self):
self.input_ids_field = IntArrayField(np.array(self.input_ids, dtype="int"), padding_value = 0)
self.input_mask_field = IntArrayField(np.array(self.input_mask, dtype="int"))
self.input_type_ids_field = IntArrayField(np.array(self.segment_ids, dtype="int"))
# Padding Value = -1
if self.lm_label_ids is not None:
self.masked_lm_labels_field = IntArrayField(np.array(self.lm_label_ids, dtype="int"), padding_value = -1)
else:
self.masked_lm_labels_field = None
if self.is_next is not None:
self.is_random_next_field = IntArrayField(np.array(self.is_next, dtype="int"))
else:
self.is_random_next_field = None
def convert_to_pytorch_tensor(self):
# For multi-process efficiency, we will use pytorch tensor instead of ...
self.input_ids_field = torch.tensor(self.input_ids, dtype=torch.int64)
self.input_mask_field = torch.tensor(self.input_mask, dtype=torch.int64)
self.input_type_ids_field = torch.tensor(self.segment_ids, dtype=torch.int64)
# Padding Value = -1
if self.lm_label_ids is not None:
self.masked_lm_labels_field = torch.tensor(self.lm_label_ids, dtype=torch.int64)
else:
self.masked_lm_labels_field = None
if self.is_next is not None:
self.is_random_next_field = torch.tensor([int(self.is_next)], dtype=torch.int64)
else:
self.is_random_next_field = None
def insert_field_into_dict(self, instance_dict):
self.convert_to_allennlp_feild()
instance_dict["bert_input_ids"] = self.input_ids_field
instance_dict["bert_input_mask"] = self.input_mask_field
instance_dict["bert_input_type_ids"] = self.input_type_ids_field
if self.masked_lm_labels_field is not None:
instance_dict["masked_lm_labels"] = self.masked_lm_labels_field
if self.is_random_next_field is not None:
instance_dict["is_random_next"] = self.is_random_next_field
def insert_tensor_into_dict(self, instance_dict):
self.convert_to_pytorch_tensor()
instance_dict["bert_input_ids"] = self.input_ids_field
instance_dict["bert_input_mask"] = self.input_mask_field
instance_dict["bert_input_type_ids"] = self.input_type_ids_field
if self.masked_lm_labels_field is not None:
instance_dict["masked_lm_labels"] = self.masked_lm_labels_field
if self.is_random_next_field is not None:
instance_dict["is_random_next"] = self.is_random_next_field
@staticmethod
def convert_list_features_to_allennlp_list_feild(list_features, instance_dict):
input_ids_list = []
input_mask_list = []
input_type_ids_list = []
masked_lm_labels_list = []
is_random_next_list = []
# Every element in the list_features is a feature instance
for i in list_features:
i.convert_to_allennlp_feild()
input_ids_list.append(i.input_ids_field)
input_mask_list.append(i.input_mask_field)
input_type_ids_list.append(i.input_type_ids_field)
masked_lm_labels_list.append(i.masked_lm_labels_field)
is_random_next_list.append(i.is_random_next_field)
input_ids_list = ListField(input_ids_list)
input_mask_list = ListField(input_mask_list)
input_type_ids_list = ListField(input_type_ids_list)
if masked_lm_labels_list[0]:
masked_lm_labels_list = ListField(masked_lm_labels_list)
is_random_next_list = ListField(is_random_next_list)
else:
masked_lm_labels_list = None
is_random_next_list = None
instance_dict["bert_input_ids"] = input_ids_list
instance_dict["bert_input_mask"] = input_mask_list
instance_dict["bert_input_type_ids"] = input_type_ids_list
if masked_lm_labels_list:
instance_dict["masked_lm_labels"] = masked_lm_labels_list
instance_dict["is_random_next"] = is_random_next_list
return
class faster_RCNN_feat_reader:
def read(self, image_feat_path):
return np.load(image_feat_path)
class CHW_feat_reader:
def read(self, image_feat_path):
feat = np.load(image_feat_path)
assert (feat.shape[0] == 1), "batch is not 1"
feat = feat.squeeze(0)
return feat
class dim_3_reader:
def read(self, image_feat_path):
tmp = np.load(image_feat_path)
_, _, c_dim = tmp.shape
image_feat = np.reshape(tmp, (-1, c_dim))
return image_feat
class HWC_feat_reader:
def read(self, image_feat_path):
tmp = np.load(image_feat_path)
assert (tmp.shape[0] == 1), "batch is not 1"
_, _, _, c_dim = tmp.shape
image_feat = np.reshape(tmp, (-1, c_dim))
return image_feat
class padded_faster_RCNN_feat_reader:
def __init__(self, max_loc):
self.max_loc = max_loc
def read(self, image_feat_path):
image_feat = np.load(image_feat_path)
image_loc, image_dim = image_feat.shape
tmp_image_feat = np.zeros((self.max_loc, image_dim), dtype=np.float32)
tmp_image_feat[0:image_loc, ] = image_feat
image_feat = tmp_image_feat
return (image_feat, image_loc)
class padded_faster_RCNN_with_bbox_feat_reader:
def __init__(self, max_loc):
self.max_loc = max_loc
def read(self, image_feat_path):
image_feat_bbox = np.load(image_feat_path)
image_boxes = image_feat_bbox.item().get('image_bboxes')
tmp_image_feat = image_feat_bbox.item().get('image_feat')
image_loc, image_dim = tmp_image_feat.shape
tmp_image_feat_2 = np.zeros((self.max_loc, image_dim),
dtype=np.float32)
tmp_image_feat_2[0:image_loc, ] = tmp_image_feat
tmp_image_box = np.zeros((self.max_loc, 4), dtype=np.int32)
tmp_image_box[0:image_loc] = image_boxes
return (tmp_image_feat_2, image_loc, tmp_image_box)
def read(self, image_feat_path):
image_feat = np.load(image_feat_path)
image_loc, image_dim = image_feat.shape
tmp_image_feat = np.zeros((self.max_loc, image_dim), dtype=np.float32)
tmp_image_feat[0:image_loc, ] = image_feat
image_feat = tmp_image_feat
return (image_feat, image_loc)
def parse_npz_img_feat(feat):
return feat['x']
def get_image_feat_reader(ndim, channel_first, image_feat, max_loc=None):
if ndim == 2 or ndim == 0:
if max_loc is None:
return faster_RCNN_feat_reader()
else:
if isinstance(image_feat.item(0), dict):
return padded_faster_RCNN_with_bbox_feat_reader(max_loc)
else:
return padded_faster_RCNN_feat_reader(max_loc)
elif ndim == 3 and not channel_first:
return dim_3_reader()
elif ndim == 4 and channel_first:
return CHW_feat_reader()
elif ndim == 4 and not channel_first:
return HWC_feat_reader()
else:
raise TypeError("unkown image feature format")
def compute_answer_scores(answers, num_of_answers, unk_idx):
scores = np.zeros((num_of_answers), np.float32)
for answer in set(answers):
if answer == unk_idx:
scores[answer] = 0
else:
answer_count = answers.count(answer)
scores[answer] = min(np.float32(answer_count)*0.3, 1)
return scores
def read_in_image_feats(image_dirs, image_readers, image_file_name):
image_feats = []
for i, image_dir in enumerate(image_dirs):
image_feat_path = os.path.join(image_dir, image_file_name)
tmp_image_feat = image_readers[i].read(image_feat_path)
image_feats.append(tmp_image_feat)
return image_feats
def get_one_image_feature(path, reader, image_feature_cap):
image_feat = reader.read(path)
image_loc = image_feat[1]
if len(image_feat) == 3:
image_boxes = image_feat[2]
else:
image_boxes = None
returned_feat = image_feat[0]
if image_feature_cap != -1:
if image_feature_cap < image_loc:
returned_feat = returned_feat[:image_feature_cap, :]
if image_boxes is not None:
image_boxes = image_boxes[:image_feature_cap]
image_loc = image_feature_cap
return returned_feat, image_boxes, image_loc
def get_one_image_feature_npz_screening_parameters(path, reader, image_screening_parameters, return_confidence = False):
result = reader.read(path)
image_feat = result["box_features"]
max_conf = result["max_conf"]
cls_boxes = result["cls_boxes"]
confidence_cap = image_screening_parameters.get("confidence_cap", None)
image_feature_cap = image_screening_parameters.get("image_feature_cap", None)
if confidence_cap:
keep_boxes = np.where(max_conf >= confidence_cap)[0]
if keep_boxes.shape[0] == 0:
image_feat = image_feat[:1] # Just keep one feature...
cls_boxes = cls_boxes[:1]
max_conf = max_conf[:1]
else:
image_feat = image_feat[keep_boxes]
cls_boxes = cls_boxes[keep_boxes]
max_conf = max_conf[keep_boxes]
if image_feature_cap:
image_loc = image_feat.shape[0]
if image_feature_cap < image_loc:
image_feat = image_feat[:image_feature_cap, :]
cls_boxes = cls_boxes[:image_feature_cap]
max_conf = max_conf[:image_feature_cap]
image_loc = image_feat.shape[0]
if return_confidence:
return image_feat, cls_boxes, max_conf
else:
return image_feat, cls_boxes, image_loc
def screen_feature(image_feat, cls_boxes, max_conf, image_screening_parameters, mandatory_keep = None):
confidence_cap = image_screening_parameters.get("confidence_cap", None)
image_feature_cap = image_screening_parameters.get("image_feature_cap", None)
min_cap = image_screening_parameters.get("min_cap", 1)
max_cap = image_screening_parameters.get("max_cap", 300)
keep_boxes = np.arange(image_feat.shape[0])
if confidence_cap:
keep_boxes = np.where(max_conf >= confidence_cap)[0]
if keep_boxes.shape[0] < min_cap:
keep_boxes = np.arange(min_cap)
#image_feat = image_feat[:min_cap]
#cls_boxes = cls_boxes[:min_cap]
if image_feature_cap:
if image_feature_cap < keep_boxes.shape[0]:
keep_boxes = np.arange(image_feature_cap)
if max_cap:
if max_cap < keep_boxes.shape[0]:
keep_boxes = np.arange(max_cap)
if mandatory_keep is not None:
keep_boxes = np.union1d(keep_boxes, mandatory_keep)
image_feat = image_feat[keep_boxes]
cls_boxes = cls_boxes[keep_boxes]
image_loc = image_feat.shape[0]
return image_feat, cls_boxes, image_loc
| 21,319 | 39.378788 | 130 | py |
visualbert | visualbert-master/visualbert/dataloaders/coco_dataset.py | import os
import random
import json
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import numpy
import torch
from torch.utils.data import Dataset
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from dataloaders.bert_field import IntArrayField
from allennlp.data.fields import ListField
from .bert_data_utils import *
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
from pycocotools.coco import COCO
class COCODataset(Dataset):
def __init__(self, args, visual_genome_chunk = False):
super(COCODataset, self).__init__()
self.args = args
self.coco = COCO(args.annots_path)
self.annots_path = args.annots_path
self.split_name = args.split_name
self.data_root = args.data_root
self.visual_genome_chunk = visual_genome_chunk
self.masks = args.masks
self.image_feature_type = args.image_feature_type
self.text_only = args.get("text_only", False)
self.add_spatial_features = args.get("add_spatial_features", False)
self.expanded = False
########## Loading Annotations
self.items = self.coco.loadAnns(self.coco.getAnnIds())
print("{} of captions in total.".format(len(self.items)))
self.image_feat_reader = faster_RCNN_feat_reader()
if args.get("chunk_path", None) is not None and self.image_feature_type == "nlvr":
print("Loading images...")
self.chunk = torch.load(args.chunk_path)
average = 0.0
counter = 0
new_chunk = {}
for image_id in self.chunk.keys():
image_feat_variable, image_boxes, confidence = self.chunk[image_id]
if ".npz" in image_id:
new_chunk[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += new_chunk[image_id][2]
else:
new_chunk[image_id+".npz"] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += new_chunk[image_id+".npz"][2]
print("{} features on average.".format(average/len(self.chunk)))
self.chunk = new_chunk
self.do_lower_case = args.do_lower_case
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = args.pretraining
self.masked_lm_prob = args.get("masked_lm_prob", 0.15)
with open(os.path.join('./cocoontology.json'), 'r') as f:
coco = json.load(f)
self.coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
self.coco_obj_to_ind = {o: i for i, o in enumerate(self.coco_objects)}
if self.image_feature_type == "r2c":
items = []
counter = 0
for i in self.items:
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.jpg".format(i['image_id'])
else:
image_file_name = "COCO_{}2014_{:0>12d}.jpg".format(self.split_name, i['image_id'])
if isinstance(self.masks[image_file_name], dict):
items.append(i)
else:
# For some images, the detector seems to have Null output. Thus we just skip them. This will not affect much.
counter += 1
print("Discarded {} instances in {}.".format(counter, self.split_name))
self.items = items
def get_image_features_by_training_index(self, index):
item = self.items[index]
if self.args.image_feature_type == "flickr":
v_item = self.visual_genome_chunk[item['image_id']]
image_feat_variable = v_item["features"]
image_boxes = None
image_dim_variable = image_feat_variable.shape[0]
if self.add_spatial_features:
image_w = float(v_item['image_w'])
image_h = float(v_item['image_h'])
bboxes = v_item["boxes"]
box_width = bboxes[:, 2] - bboxes[:, 0]
box_height = bboxes[:, 3] - bboxes[:, 1]
scaled_width = box_width / image_w
scaled_height = box_height / image_h
scaled_x = bboxes[:, 0] / image_w
scaled_y = bboxes[:, 1] / image_h
box_width = box_width[..., np.newaxis]
box_height = box_height[..., np.newaxis]
scaled_width = scaled_width[..., np.newaxis]
scaled_height = scaled_height[..., np.newaxis]
scaled_x = scaled_x[..., np.newaxis]
scaled_y = scaled_y[..., np.newaxis]
spatial_features = np.concatenate(
(scaled_x,
scaled_y,
scaled_x + scaled_width,
scaled_y + scaled_height,
scaled_width,
scaled_height),
axis=1)
image_feat_variable = np.concatenate((image_feat_variable, spatial_features), axis=1)
return image_feat_variable, image_boxes, image_dim_variable
if self.args.image_feature_type == "vqa_fix_100":
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.npy".format(item['image_id'])
else:
image_file_name = "COCO_{}2014_{:0>12d}.npy".format(self.split_name, item['image_id'])
if "train" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/train2014")
elif "val" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/val2014")
image_feat_variable = np.load(os.path.join(folder, image_file_name))
image_dim_variable = image_feat_variable.shape[0]
return image_feat_variable, None, image_dim_variable
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.jpg.npz".format(item['image_id'])
return self.chunk_val[image_file_name]
else:
image_file_name = "COCO_{}2014_{:0>12d}.jpg.npz".format(self.split_name, item['image_id'])
if self.args.get("chunk_path", None) is not None:
return self.chunk[image_file_name]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
if self.image_feature_type == "r2c":
return self.__getitem_detector__(index)
item = self.items[index]
sample = {}
if not self.text_only:
image_feat_variable, image_boxes, image_dim_variable = self.get_image_features_by_training_index(index)
image_feat_variable = ArrayField(image_feat_variable)
image_dim_variable = IntArrayField(np.array(image_dim_variable))
sample["image_feat_variable"] = image_feat_variable
sample["image_dim_variable"] = image_dim_variable
sample["label"] = image_dim_variable
else:
sample["label"] = IntArrayField(np.array([0]))
caption_a = item["caption"]
imageID = item["image_id"]
if self.expanded and index >= self.train_size:
coco = self.coco_val
else:
coco = self.coco
rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])
if self.args.get("two_sentence", True):
if random.random() > 0.5:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]
flag = True
caption_b = item_b["caption"]
subword_tokens_a = self.tokenizer.tokenize(caption_a)
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)
elif not self.args.get("no_next_sentence", False):
if random.random() < self.args.false_caption_ratio:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = item
flag = True
caption_b = item_b["caption"]
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)
else:
caption_b = item["caption"]
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
bert_feature.insert_field_into_dict(sample)
return Instance(sample)
def __getitem_detector__(self, index):
item = self.items[index]
sample = {}
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.jpg".format(item['image_id'])
else:
image_file_name = "COCO_{}2014_{:0>12d}.jpg".format(self.split_name, item['image_id'])
image_info = self.masks[image_file_name]
if "train" in image_file_name:
image_file_path = os.path.join(self.data_root, "train2014", image_file_name)
elif "val" in image_file_name:
image_file_path = os.path.join(self.data_root, "val2014", image_file_name)
###################################################################
# Most of things adapted from VCR
# Load image now and rescale it. Might have to subtract the mean and whatnot here too.
image = load_image(image_file_path)
image, window, img_scale, padding = resize_image(image, random_pad=self.is_train)
image = to_tensor_and_normalize(image)
c, h, w = image.shape
###################################################################
metadata = self.masks[image_file_name] # Get the metadata
# Load boxes.
# We will use all detections
dets2use = np.arange(len(metadata['boxes']))
# [nobj, 14, 14]
segms = np.stack([make_mask(mask_size=14, box=metadata['boxes'][i], polygons_list=metadata['segms'][i]) for i in dets2use])
# Chop off the final dimension, that's the confidence
boxes = np.array(metadata['boxes'])[dets2use, :-1]
# Possibly rescale them if necessary
boxes *= img_scale
boxes[:, :2] += np.array(padding[:2])[None]
boxes[:, 2:] += np.array(padding[:2])[None]
try:
metadata['names'] = [i.split(" ")[1][1:-1] for i in metadata["names"]]
except:
pass
obj_labels = [self.coco_obj_to_ind[metadata['names'][i]] for i in dets2use.tolist()]
boxes = np.row_stack((window, boxes))
segms = np.concatenate((np.ones((1, 14, 14), dtype=np.float32), segms), 0)
obj_labels = [self.coco_obj_to_ind['__background__']] + obj_labels
sample['segms'] = ArrayField(segms, padding_value=0)
sample['objects'] = ListField([LabelField(x, skip_indexing=True) for x in obj_labels])
if not np.all((boxes[:, 0] >= 0.) & (boxes[:, 0] < boxes[:, 2])):
import ipdb
ipdb.set_trace()
assert np.all((boxes[:, 1] >= 0.) & (boxes[:, 1] < boxes[:, 3]))
assert np.all((boxes[:, 2] <= w))
assert np.all((boxes[:, 3] <= h))
sample['boxes'] = ArrayField(boxes, padding_value=-1)
caption_a = item["caption"]
imageID = item["image_id"]
sample["label"] = sample['objects'] # This is an useless field. Just so that they know the batch size.
if self.expanded and index >= self.train_size:
coco = self.coco_val
else:
coco = self.coco
rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])
if self.args.get("two_sentence", True):
if random.random() > 0.5:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]
flag = True # is next sentence
caption_b = item_b["caption"]
subword_tokens_a = self.tokenizer.tokenize(caption_a)
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)
elif not self.args.get("no_next_sentence", False):
if random.random() < self.args.false_caption_ratio:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = item
flag = True # is next sentence
caption_b = item_b["caption"]
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)
else:
subword_tokens_a = self.tokenizer.tokenize(caption_a)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
bert_feature.insert_field_into_dict(sample)
return image, Instance(sample)
@classmethod
def splits(cls, args):
data_root = args.data_root
if args.image_feature_type == "r2c":
# For r2c, the masks are pre-computed from a larger detector. Thus, when pre-training on COCO, we follow the same procedure.
masks = torch.load(os.path.join(data_root, "mask_train.th"))
mask_val = torch.load(os.path.join(data_root, "mask_val.th"))
for i in mask_val:
masks[i] = mask_val[i]
else:
masks = None
if args.image_feature_type == "flickr":
import base64
import csv
import sys
import zlib
import time
import mmap
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infiles = [
os.path.join(data_root, "trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv"),
os.path.join(data_root, "trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0"),
os.path.join(data_root, "trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1"),
os.path.join(data_root, "trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv")
]
chunk = {}
chunk_file = os.path.join(data_root, "trainval/resnet101_genome.th")
if not os.path.exists(chunk_file):
print("Loading COCO files for Flickr30K for the first time...")
for infile in infiles:
with open(infile, "r+") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in tqdm(reader):
item['image_id'] = int(item['image_id'])
item['image_h'] = float(item['image_h'])
item['image_w'] = float(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
# Hope the python2/3 b64decode does not mess things up.
item[field] = np.frombuffer(base64.b64decode(item[field]),
dtype=np.float32).reshape((item['num_boxes'],-1))
item["features"] = torch.from_numpy(item["features"])
item["boxes"] = torch.from_numpy(item["boxes"])
chunk[item['image_id']] = item
torch.save(chunk, chunk_file)
else:
chunk = torch.load(chunk_file)
else:
chunk = None
copy_args = deepcopy(args)
copy_args.split_name = "train"
copy_args.annots_path = os.path.join(data_root, "annotations/captions_{}2014.json".format(copy_args.split_name))
if args.image_feature_type == "nlvr":
copy_args.chunk_path = os.path.join(data_root, "coco_features_{}_150.th".format(copy_args.split_name))
copy_args.data_root = data_root
copy_args.masks = masks
trainset = cls(copy_args, chunk)
trainset.is_train = True
copy_args = deepcopy(args)
copy_args.split_name = "val"
copy_args.annots_path = os.path.join(data_root, "annotations/captions_{}2014.json".format(copy_args.split_name))
if args.image_feature_type == "nlvr":
copy_args.chunk_path = os.path.join(data_root, "coco_features_{}_150.th".format(copy_args.split_name))
copy_args.data_root = data_root
copy_args.masks = masks
validationset = cls(copy_args, chunk)
validationset.is_train = False
if args.get("expand_coco", False):
# This is to expand the COCO train
trainset.expanded = True
trainset.train_size = len(trainset.items)
trainset.items.extend(validationset.items)
trainset.coco_val = validationset.coco
if args.image_feature_type != "r2c" and args.image_feature_type != "vqa_fix_100" and args.image_feature_type != "flickr": # For NLVR, we pre-load features so we need to expand the chunk as well
trainset.chunk_val = validationset.chunk
imdb = np.load(os.path.join(data_root, "data/imdb/imdb_minival2014.npy"), allow_pickle = True)[1:]
image_names_mini_val = set([i["image_name"] + ".jpg" for i in imdb])
if args.get("exclude_minival", False):
trainset.items = [i for i in trainset.items if "COCO_val2014_{:0>12d}.jpg".format(i['image_id']) not in image_names_mini_val]
validationset.items = [i for i in validationset.items if "COCO_val2014_{:0>12d}.jpg".format(i['image_id']) in image_names_mini_val]
print("After expanding, train has {} items, val has {} items".format(len(trainset.items), len(validationset.items)))
testset = validationset # Testset will not be used so this is just a placeholder
return trainset, validationset, testset
@staticmethod
def collate_fn(data):
if isinstance(data[0], Instance):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
else:
images, instances = zip(*data)
images = torch.stack(images, 0)
batch = Batch(instances)
td = batch.as_tensor_dict()
td['box_mask'] = torch.all(td['boxes'] >= 0, -1).long()
td['images'] = images
return td
| 21,482 | 45.600868 | 205 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_ban/utils.py | # Copied from https://github.com/jnhwkim/ban-vqa
"""
This code is extended from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import errno
import os
import re
import collections
import numpy as np
import operator
import functools
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch._six import string_classes
from torch.utils.data.dataloader import default_collate
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def assert_tensor_eq(real, expected, eps=EPS):
assert (torch.abs(real-expected) < eps).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def print_model(model, logger):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
if logger:
logger.write('nParams=\t'+str(nParams))
def save_model(path, model, epoch, optimizer=None):
model_dict = {
'epoch': epoch,
'model_state': model.state_dict()
}
if optimizer is not None:
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path)
# Select the indices given by `lengths` in the second dimension
# As a result, # of dimensions is shrinked by one
# @param pad(Tensor)
# @param len(list[int])
def rho_select(pad, lengths):
# Index of the last output for each sequence.
idx_ = (lengths-1).view(-1,1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted
def trim_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = True
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if 1 < batch[0].dim(): # image features
max_num_boxes = max([x.size(0) for x in batch])
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = len(batch) * max_num_boxes * batch[0].size(-1)
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
# warning: F.pad returns Variable!
return torch.stack([F.pad(x, (0,0,0,max_num_boxes-x.size(0))).data for x in batch], 0, out=out)
else:
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [trim_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
def create_glove_embedding_init(idx2word, glove_file):
word2emb = {}
with open(glove_file, 'r', encoding='utf-8') as f:
entries = f.readlines()
emb_dim = len(entries[0].split(' ')) - 1
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = list(map(float, vals[1:]))
word2emb[word] = np.array(vals)
for idx, word in enumerate(idx2word):
if word not in word2emb:
continue
weights[idx] = word2emb[word]
return weights, word2emb
# Remove Flickr30K Entity annotations in a string
def remove_annotations(s):
return re.sub(r'\[[^ ]+ ','',s).replace(']', '')
def get_sent_data(file_path):
phrases = []
with open(file_path, 'r', encoding='utf-8') as f:
for sent in f:
str = remove_annotations(sent.strip())
phrases.append(str)
return phrases
# Find position of a given sublist
# return the index of the last token
def find_sublist(arr, sub):
sublen = len(sub)
first = sub[0]
indx = -1
while True:
try:
indx = arr.index(first, indx + 1)
except ValueError:
break
if sub == arr[indx: indx + sublen]:
return indx + sublen - 1
return -1
# Find position of a given sublist
# return the index of the last token as well as the first token
def find_sublist_full(arr, sub):
sublen = len(sub)
first = sub[0]
indx = -1
while True:
try:
indx = arr.index(first, indx + 1)
except ValueError:
break
if sub == arr[indx: indx + sublen]:
return (indx, indx + sublen - 1)
return (-1, -1)
def calculate_iou(obj1, obj2):
area1 = calculate_area(obj1)
area2 = calculate_area(obj2)
intersection = get_intersection(obj1, obj2)
area_int = calculate_area(intersection)
return area_int / (area1 + area2 - area_int)
def calculate_area(obj):
return (obj[2] - obj[0]) * (obj[3] - obj[1])
def get_intersection(obj1, obj2):
left = obj1[0] if obj1[0] > obj2[0] else obj2[0]
top = obj1[1] if obj1[1] > obj2[1] else obj2[1]
right = obj1[2] if obj1[2] < obj2[2] else obj2[2]
bottom = obj1[3] if obj1[3] < obj2[3] else obj2[3]
if left > right or top > bottom:
return [0, 0, 0, 0]
return [left, top, right, bottom]
def get_match_index(src_bboxes, dst_bboxes):
indices = set()
for src_bbox in src_bboxes:
for i, dst_bbox in enumerate(dst_bboxes):
iou = calculate_iou(src_bbox, dst_bbox)
if iou >= 0.5:
indices.add(i)
return list(indices)
# Batched index_select
def batched_index_select(t, dim, inds):
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out
| 9,306 | 29.817881 | 107 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_ban/dataset.py | # Modified from https://github.com/jnhwkim/ban-vqa
"""
This code is modified from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import os
import json
import _pickle as cPickle
import pickle
import numpy as np
from visualbert.dataloaders.flickr_ban import utils
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from xml.etree.ElementTree import parse
import torch
from torch.utils.data import Dataset
try:
from tools import compute_softscore
except:
pass
import itertools
import re
from tqdm import tqdm
COUNTING_ONLY = False
# Following Trott et al. (ICLR 2018)
# Interpretable Counting for Visual Question Answering
def is_howmany(q, a, label2ans):
if 'how many' in q.lower() or \
('number of' in q.lower() and 'number of the' not in q.lower()) or \
'amount of' in q.lower() or \
'count of' in q.lower():
if a is None or answer_filter(a, label2ans):
return True
else:
return False
else:
return False
def answer_filter(answers, label2ans, max_num=10):
for ans in answers['labels']:
if label2ans[ans].isdigit() and max_num >= int(label2ans[ans]):
return True
return False
class Dictionary(object):
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {}
if idx2word is None:
idx2word = []
self.word2idx = word2idx
self.idx2word = idx2word
@property
def ntoken(self):
return len(self.word2idx)
@property
def padding_idx(self):
return len(self.word2idx)
def tokenize(self, sentence, add_word):
sentence = sentence.lower()
sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s')
words = sentence.split()
tokens = []
if add_word:
for w in words:
tokens.append(self.add_word(w))
else:
for w in words:
# the least frequent word (`bebe`) as UNK for Visual Genome dataset
tokens.append(self.word2idx.get(w, self.padding_idx-1))
return tokens
def dump_to_file(self, path):
cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
word2idx, idx2word = cPickle.load(open(path, 'rb'))
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def _create_entry(img, question, answer):
if None!=answer:
answer.pop('image_id')
answer.pop('question_id')
entry = {
'question_id' : question['question_id'],
'image_id' : question['image_id'],
'image' : img,
'question' : question['question'],
'answer' : answer}
return entry
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s_questions.json' % \
(name + '2014' if 'test'!=name[:4] else name))
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
if 'test'!=name[:4]: # train, val
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id], question, answer))
else: # test2015
entries = []
for question in questions:
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], None, None):
entries.append(_create_entry(img_id2val[img_id], question, None))
return entries
def _load_visualgenome(dataroot, name, img_id2val, label2ans, adaptive=True):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(dataroot, 'question_answers.json')
image_data_path = os.path.join(dataroot, 'image_data.json')
ans2label_path = os.path.join(dataroot, 'cache', 'trainval_ans2label.pkl')
cache_path = os.path.join(dataroot, 'cache', 'vg_%s%s_target.pkl' % (name, '_adaptive' if adaptive else ''))
if os.path.isfile(cache_path):
entries = cPickle.load(open(cache_path, 'rb'))
else:
entries = []
ans2label = cPickle.load(open(ans2label_path, 'rb'))
vgq = json.load(open(question_path, 'r'))
_vgv = json.load(open(image_data_path, 'r')) #108,077
vgv = {}
for _v in _vgv:
if None != _v['coco_id']:
vgv[_v['id']] = _v['coco_id']
counts = [0, 0, 0, 0] # used image, used question, total question, out-of-split
for vg in vgq:
coco_id = vgv.get(vg['id'], None)
if None != coco_id:
counts[0] += 1
img_idx = img_id2val.get(coco_id, None)
if None == img_idx:
counts[3] += 1
for q in vg['qas']:
counts[2] += 1
_answer = tools.compute_softscore.preprocess_answer(q['answer'])
label = ans2label.get(_answer, None)
if None != label and None != img_idx:
counts[1] += 1
answer = {
'labels': [label],
'scores': [1.]}
entry = {
'question_id' : q['id'],
'image_id' : coco_id,
'image' : img_idx,
'question' : q['question'],
'answer' : answer}
if not COUNTING_ONLY or is_howmany(q['question'], answer, label2ans):
entries.append(entry)
print('Loading VisualGenome %s' % name)
print('\tUsed COCO images: %d/%d (%.4f)' % \
(counts[0], len(_vgv), counts[0]/len(_vgv)))
print('\tOut-of-split COCO images: %d/%d (%.4f)' % \
(counts[3], counts[0], counts[3]/counts[0]))
print('\tUsed VG questions: %d/%d (%.4f)' % \
(counts[1], counts[2], counts[1]/counts[2]))
with open(cache_path, 'wb') as f:
cPickle.dump(entries, open(cache_path, 'wb'))
return entries
def _find_coco_id(vgv, vgv_id):
for v in vgv:
if v['id']==vgv_id:
return v['coco_id']
return None
def _load_flickr30k(dataroot, img_id2idx, bbox, pos_boxes, limit = None, cache_name = None):
"""Load entries
img_id2idx: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
pattern_phrase = r'\[(.*?)\]'
pattern_no = r'\/EN\#(\d+)'
missing_entity_count = dict()
multibox_entity_count = 0
entries = []
counter = 0
cache_name = os.path.join(dataroot, "{}.cache".format(cache_name))
if os.path.exists(cache_name):
with open(cache_name, "rb") as f:
entries = pickle.load(f)
else:
for image_id, idx in tqdm(img_id2idx.items()):
if limit is not None and counter == limit:
break
counter += 1
phrase_file = os.path.join(dataroot, 'Flickr30kEntities/Sentences/%d.txt' % image_id)
anno_file = os.path.join(dataroot, 'Flickr30kEntities/Annotations/%d.xml' % image_id)
with open(phrase_file, 'r', encoding='utf-8') as f:
sents = [x.strip() for x in f]
# Parse Annotation
root = parse(anno_file).getroot()
obj_elems = root.findall('./object')
pos_box = pos_boxes[idx]
bboxes = bbox[pos_box[0]:pos_box[1]]
target_bboxes = {}
for elem in obj_elems:
if elem.find('bndbox') == None or len(elem.find('bndbox')) == 0:
continue
left = int(elem.findtext('./bndbox/xmin'))
top = int(elem.findtext('./bndbox/ymin'))
right = int(elem.findtext('./bndbox/xmax'))
bottom = int(elem.findtext('./bndbox/ymax'))
assert 0 < left and 0 < top
for name in elem.findall('name'):
entity_id = int(name.text)
assert 0 < entity_id
if not entity_id in target_bboxes:
target_bboxes[entity_id] = []
else:
multibox_entity_count += 1
target_bboxes[entity_id].append([left, top, right, bottom])
# Parse Sentence
for sent_id, sent in enumerate(sents):
sentence = utils.remove_annotations(sent)
entities = re.findall(pattern_phrase, sent)
entity_indices = []
target_indices = []
entity_ids = []
entity_types = []
#print(sentence)
for entity_i, entity in enumerate(entities):
info, phrase = entity.split(' ', 1)
entity_id = int(re.findall(pattern_no, info)[0])
entity_type = info.split('/')[2:]
entity_idx = utils.find_sublist(sentence.split(' '), phrase.split(' '))
#assert 0 <= entity_idx
if not entity_id in target_bboxes:
if entity_id >= 0:
missing_entity_count[entity_type[0]] = missing_entity_count.get(entity_type[0], 0) + 1
continue
assert 0 < entity_id
entity_ids.append(entity_id)
entity_types.append(entity_type)
target_idx = utils.get_match_index(target_bboxes[entity_id], bboxes)
entity_indices.append(entity_idx)
target_indices.append(target_idx)
if 0 == len(entity_ids):
continue
entries.append(
_create_flickr_entry(idx, sentence, entity_indices, target_indices, entity_ids, entity_types))
if 0 < len(missing_entity_count.keys()):
print('missing_entity_count=')
print(missing_entity_count)
print('multibox_entity_count=%d' % multibox_entity_count)
with open(cache_name, "wb") as f:
pickle.dump(entries,f)
return entries
def _load_flickr30k_full_entity(dataroot, img_id2idx, bbox, pos_boxes, limit = None, cache_name = None):
"""Load entries
img_id2idx: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
pattern_phrase = r'\[(.*?)\]'
pattern_no = r'\/EN\#(\d+)'
missing_entity_count = dict()
multibox_entity_count = 0
entries = []
counter = 0
cache_name = os.path.join(dataroot, "{}.cache".format(cache_name))
if os.path.exists(cache_name):
with open(cache_name, "rb") as f:
entries = pickle.load(f)
else:
for image_id, idx in tqdm(img_id2idx.items()):
if limit is not None and counter == limit:
break
counter += 1
phrase_file = os.path.join(dataroot, 'Flickr30kEntities/Sentences/%d.txt' % image_id)
anno_file = os.path.join(dataroot, 'Flickr30kEntities/Annotations/%d.xml' % image_id)
with open(phrase_file, 'r', encoding='utf-8') as f:
sents = [x.strip() for x in f]
# Parse Annotation
root = parse(anno_file).getroot()
obj_elems = root.findall('./object')
pos_box = pos_boxes[idx]
bboxes = bbox[pos_box[0]:pos_box[1]]
target_bboxes = {}
for elem in obj_elems:
if elem.find('bndbox') == None or len(elem.find('bndbox')) == 0:
continue
left = int(elem.findtext('./bndbox/xmin'))
top = int(elem.findtext('./bndbox/ymin'))
right = int(elem.findtext('./bndbox/xmax'))
bottom = int(elem.findtext('./bndbox/ymax'))
assert 0 < left and 0 < top
for name in elem.findall('name'):
entity_id = int(name.text)
assert 0 < entity_id
if not entity_id in target_bboxes:
target_bboxes[entity_id] = []
else:
multibox_entity_count += 1
target_bboxes[entity_id].append([left, top, right, bottom])
# Parse Sentence
for sent_id, sent in enumerate(sents):
sentence = utils.remove_annotations(sent)
entities = re.findall(pattern_phrase, sent)
entity_indices = []
target_indices = []
entity_ids = []
entity_types = []
original_target = []
#print(sentence)
for entity_i, entity in enumerate(entities):
info, phrase = entity.split(' ', 1)
entity_id = int(re.findall(pattern_no, info)[0])
entity_type = info.split('/')[2:]
entity_idx = utils.find_sublist_full(sentence.split(' '), phrase.split(' '))
#assert 0 <= entity_idx
if not entity_id in target_bboxes:
if entity_id >= 0:
missing_entity_count[entity_type[0]] = missing_entity_count.get(entity_type[0], 0) + 1
continue
assert 0 < entity_id
entity_ids.append(entity_id)
entity_types.append(entity_type)
target_idx = utils.get_match_index(target_bboxes[entity_id], bboxes)
entity_indices.append(entity_idx)
target_indices.append(target_idx)
original_target.append(target_bboxes[entity_id])
if 0 == len(entity_ids):
continue
entries.append(
_create_flickr_entry(idx, sentence, entity_indices, target_indices, entity_ids, entity_types, original_target = original_target))
if 0 < len(missing_entity_count.keys()):
print('missing_entity_count=')
print(missing_entity_count)
print('multibox_entity_count=%d' % multibox_entity_count)
with open(cache_name, "wb") as f:
pickle.dump(entries,f)
return entries
def _load_flickr30k_our(dataroot, features, limit = None):
"""Load entries
img_id2idx: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
pattern_phrase = r'\[(.*?)\]'
pattern_no = r'\/EN\#(\d+)'
missing_entity_count = dict()
multibox_entity_count = 0
entries = []
counter = 0
for image_id, item in tqdm(features.items()):
if limit is not None and counter == limit:
break
counter += 1
bboxes = item[1]
image_id_number = int(image_id[:-4])
phrase_file = os.path.join(dataroot, 'Flickr30kEntities/Sentences/%d.txt' % image_id_number)
anno_file = os.path.join(dataroot, 'Flickr30kEntities/Annotations/%d.xml' % image_id_number)
with open(phrase_file, 'r', encoding='utf-8') as f:
sents = [x.strip() for x in f]
# Parse Annotation
root = parse(anno_file).getroot()
obj_elems = root.findall('./object')
target_bboxes = {}
for elem in obj_elems:
if elem.find('bndbox') == None or len(elem.find('bndbox')) == 0:
continue
left = int(elem.findtext('./bndbox/xmin'))
top = int(elem.findtext('./bndbox/ymin'))
right = int(elem.findtext('./bndbox/xmax'))
bottom = int(elem.findtext('./bndbox/ymax'))
assert 0 < left and 0 < top
for name in elem.findall('name'):
entity_id = int(name.text)
assert 0 < entity_id
if not entity_id in target_bboxes:
target_bboxes[entity_id] = []
else:
multibox_entity_count += 1
target_bboxes[entity_id].append([left, top, right, bottom])
# Parse Sentence
for sent_id, sent in enumerate(sents):
sentence = utils.remove_annotations(sent)
entities = re.findall(pattern_phrase, sent)
entity_indices = []
target_indices = []
entity_ids = []
entity_types = []
#print(sentence)
for entity_i, entity in enumerate(entities):
info, phrase = entity.split(' ', 1)
entity_id = int(re.findall(pattern_no, info)[0])
entity_type = info.split('/')[2:]
entity_idx = utils.find_sublist(sentence.split(' '), phrase.split(' '))
assert 0 <= entity_idx
if not entity_id in target_bboxes:
if entity_id >= 0:
missing_entity_count[entity_type[0]] = missing_entity_count.get(entity_type[0], 0) + 1
continue
assert 0 < entity_id
entity_ids.append(entity_id)
entity_types.append(entity_type)
target_idx = utils.get_match_index(target_bboxes[entity_id], bboxes)
entity_indices.append(entity_idx)
target_indices.append(target_idx)
if 0 == len(entity_ids):
continue
entries.append(
_create_flickr_entry(image_id, sentence, entity_indices, target_indices, entity_ids, entity_types))
if 0 < len(missing_entity_count.keys()):
print('missing_entity_count=')
print(missing_entity_count)
print('multibox_entity_count=%d' % multibox_entity_count)
return entries
# idx, sentence, entity_indices, target_indices, entity_ids, entity_types
def _create_flickr_entry(img, sentence, entity_indices, target_indices, entity_ids, entity_types, original_target = None):
type_map = {'people':0,'clothing':1,'bodyparts':2,'animals':3,'vehicles':4,'instruments':5,'scene':6,'other':7}
MAX_TYPE_NUM = 3
for i, entity_type in enumerate(entity_types):
assert MAX_TYPE_NUM >= len(entity_type)
entity_types[i] = list(type_map[x] for x in entity_type)
entity_types[i] += [-1] * (MAX_TYPE_NUM-len(entity_type))
entry = {
'image' : img,
'sentence' : sentence,
'entity_indices' : entity_indices,
'target_indices' : target_indices,
'entity_ids' : entity_ids,
'entity_types' : entity_types,
'entity_num' : len(entity_ids),
"original_target": original_target}
return entry
def tfidf_from_questions(names, dictionary, dataroot='data', target=['vqa', 'vg', 'cap', 'flickr']):
inds = [[], []] # rows, cols for uncoalesce sparse matrix
df = dict()
N = len(dictionary)
def populate(inds, df, text):
tokens = dictionary.tokenize(text, True)
for t in tokens:
df[t] = df.get(t, 0) + 1
combin = list(itertools.combinations(tokens, 2))
for c in combin:
if c[0] < N:
inds[0].append(c[0]); inds[1].append(c[1])
if c[1] < N:
inds[0].append(c[1]); inds[1].append(c[0])
if 'vqa' in target: # VQA 2.0
for name in names:
assert name in ['train', 'val', 'test-dev2015', 'test2015']
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s_questions.json' % \
(name + '2014' if 'test'!=name[:4] else name))
questions = json.load(open(question_path))['questions']
for question in questions:
populate(inds, df, question['question'])
if 'vg' in target: # Visual Genome
question_path = os.path.join(dataroot, 'question_answers.json')
vgq = json.load(open(question_path, 'r'))
for vg in vgq:
for q in vg['qas']:
populate(inds, df, q['question'])
if 'cap' in target: # MSCOCO Caption
for split in ['train2017', 'val2017']:
captions = json.load(open('data/annotations/captions_%s.json' % split, 'r'))
for caps in captions['annotations']:
populate(inds, df, caps['caption'])
# TF-IDF
vals = [1] * len(inds[1])
for idx, col in enumerate(inds[1]):
assert df[col] >= 1, 'document frequency should be greater than zero!'
vals[col] /= df[col]
# Make stochastic matrix
def normalize(inds, vals):
z = dict()
for row, val in zip(inds[0], vals):
z[row] = z.get(row, 0) + val
for idx, row in enumerate(inds[0]):
vals[idx] /= z[row]
return vals
vals = normalize(inds, vals)
tfidf = torch.sparse.FloatTensor(torch.LongTensor(inds), torch.FloatTensor(vals))
tfidf = tfidf.coalesce()
# Latent word embeddings
emb_dim = 300
glove_file = 'data/glove/glove.6B.%dd.txt' % emb_dim
weights, word2emb = utils.create_glove_embedding_init(dictionary.idx2word[N:], glove_file)
print('tf-idf stochastic matrix (%d x %d) is generated.' % (tfidf.size(0), tfidf.size(1)))
return tfidf, weights
| 23,011 | 35.8192 | 149 | py |
visualbert | visualbert-master/visualbert/utils/pytorch_misc.py | """
Question relevance model
"""
# Make stuff
import os
import re
import shutil
import time
import numpy as np
import pandas as pd
import torch
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.nn.util import device_mapping
from allennlp.training.trainer import move_optimizer_to_cuda
from torch.nn import DataParallel
import torch.nn.functional as F
def time_batch(gen, reset_every=100):
"""
Gets timing info for a batch
:param gen:
:param reset_every: How often we'll reset
:return:
"""
start = time.time()
start_t = 0
for i, item in enumerate(gen):
time_per_batch = (time.time() - start) / (i + 1 - start_t)
yield time_per_batch, item
if i % reset_every == 0:
start = time.time()
start_t = i
class Flattener(torch.nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def pad_sequence(sequence, lengths):
"""
:param sequence: [\sum b, .....] sequence
:param lengths: [b1, b2, b3...] that sum to \sum b
:return: [len(lengths), maxlen(b), .....] tensor
"""
output = sequence.new_zeros(len(lengths), max(lengths), *sequence.shape[1:])
start = 0
for i, diff in enumerate(lengths):
if diff > 0:
output[i, :diff] = sequence[start:(start + diff)]
start += diff
return output
def extra_leading_dim_in_sequence(f, x, mask):
return f(x.view(-1, *x.shape[2:]), mask.view(-1, mask.shape[2])).view(*x.shape[:3], -1)
def clip_grad_norm(named_parameters, max_norm, clip=True, verbose=False):
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
Returns:
Total norm of the parameters (viewed as a single vector).
"""
max_norm = float(max_norm)
parameters = [(n, p) for n, p in named_parameters if p.grad is not None]
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in parameters:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = tuple(p.size())
if np.isnan(param_norm.item()):
raise ValueError("the param {} was null.".format(n))
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef.item() < 1 and clip:
for n, p in parameters:
p.grad.data.mul_(clip_coef)
if verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name]))
print('-------------------------------', flush=True)
return pd.Series({name: norm.item() for name, norm in param_to_norm.items()})
def find_latest_checkpoint(serialization_dir, epoch_to_load = None):
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if epoch_to_load is None:
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return model_path, training_state_path
def find_latest_checkpoint_step(serialization_dir, epoch_to_load = None):
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (serialization_dir is not None and
any("model_step_" in x for x in os.listdir(serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_step_" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
info = [(x, int(x.split('_')[2]), int(x.split('_')[4].split('.')[0])) for x in model_checkpoints]
max_epoch = -1
max_step = -1
max_index = -1
for index, i in enumerate(info):
if i[2] > max_epoch:
max_epoch = i[2]
max_step = i[1]
max_index = index
elif i[2] == max_epoch:
if i[1] > max_step:
max_step = i[1]
max_index = index
model_path = os.path.join(serialization_dir,
"model_step_{}_epoch_{}.th".format(max_step, max_epoch))
training_state_path = os.path.join(serialization_dir,
"training_step_{}_epoch_{}.th".format(max_step, max_epoch))
return model_path, training_state_path
def save_checkpoint(model, optimizer, serialization_dir, epoch, val_metric_per_epoch, is_best=None,
learning_rate_scheduler=None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if serialization_dir is not None:
model_path = os.path.join(serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = model.module.state_dict() if isinstance(model, DataParallel) else model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': optimizer.state_dict()
}
if learning_rate_scheduler is not None:
training_state["learning_rate_scheduler"] = \
learning_rate_scheduler.lr_scheduler.state_dict()
training_path = os.path.join(serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
print("Best validation performance so far. Copying weights to '{}/best.th'.".format(serialization_dir))
shutil.copyfile(model_path, os.path.join(serialization_dir, "best.th"))
def restore_best_checkpoint(model, serialization_dir):
fn = os.path.join(serialization_dir, 'best.th')
model_state = torch.load(fn, map_location=device_mapping(-1))
assert os.path.exists(fn)
if isinstance(model, DataParallel):
model.module.load_state_dict(model_state)
else:
model.load_state_dict(model_state)
def restore_checkpoint_flexible(model, fn):
model_state = torch.load(fn, map_location=device_mapping(-1))
assert os.path.exists(fn)
if isinstance(model, DataParallel):
load_state_dict_flexible(model.module, model_state)
else:
load_state_dict_flexible(model, model_state)
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print("Full loading failed!! Try partial loading!!")
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped: " + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name)
def restore_checkpoint(model, optimizer, serialization_dir, epoch_to_load = None, learning_rate_scheduler=None):
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = find_latest_checkpoint(serialization_dir, epoch_to_load)
latest_checkpoint_step = find_latest_checkpoint_step(serialization_dir, epoch_to_load)
if latest_checkpoint is None and latest_checkpoint_step is None:
# No checkpoint to restore, start at 0
return 0, []
if latest_checkpoint is None:
latest_checkpoint = latest_checkpoint_step
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=device_mapping(-1))
training_state = torch.load(training_state_path, map_location=device_mapping(-1))
if isinstance(model, DataParallel):
model.module.load_state_dict(model_state)
else:
model.load_state_dict(model_state)
# idk this is always bad luck for me
optimizer.load_state_dict(training_state["optimizer"])
if learning_rate_scheduler is not None and "learning_rate_scheduler" in training_state:
learning_rate_scheduler.lr_scheduler.load_state_dict(
training_state["learning_rate_scheduler"])
move_optimizer_to_cuda(optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
print("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
print("########### Restroing states... from {}, at epoch {}".format(model_path, epoch_to_return))
if "step" in training_state:
print("########### Restroing states... from {}, at step {}".format(model_path, training_state["step"]))
return epoch_to_return, val_metric_per_epoch
def detokenize(array, vocab):
"""
Given an array of ints, we'll turn this into a string or a list of strings.
:param array: possibly multidimensional numpy array
:return:
"""
if array.ndim > 1:
return [detokenize(x, vocab) for x in array]
tokenized = [vocab.get_token_from_index(v) for v in array]
return ' '.join([x for x in tokenized if x not in (vocab._padding_token, START_SYMBOL, END_SYMBOL)])
def print_para(model):
"""
Prints parameters of a model
:param opt:
:return:
"""
st = {}
total_params = 0
total_params_training = 0
for p_name, p in model.named_parameters():
# if not ('bias' in p_name.split('.')[-1] or 'bn' in p_name.split('.')[-1]):
st[p_name] = ([str(x) for x in p.size()], np.prod(p.size()), p.requires_grad)
total_params += np.prod(p.size())
if p.requires_grad:
total_params_training += np.prod(p.size())
pd.set_option('display.max_columns', None)
shapes_df = pd.DataFrame([(p_name, '[{}]'.format(','.join(size)), prod, p_req_grad)
for p_name, (size, prod, p_req_grad) in sorted(st.items(), key=lambda x: -x[1][1])],
columns=['name', 'shape', 'size', 'requires_grad']).set_index('name')
print('\n {:.1f}M total parameters. {:.1f}M training \n ----- \n {} \n ----'.format(total_params / 1000000.0,
total_params_training / 1000000.0,
shapes_df.to_string()),
flush=True)
return shapes_df
def batch_index_iterator(len_l, batch_size, skip_end=True):
"""
Provides indices that iterate over a list
:param len_l: int representing size of thing that we will
iterate over
:param batch_size: size of each batch
:param skip_end: if true, don't iterate over the last batch
:return: A generator that returns (start, end) tuples
as it goes through all batches
"""
iterate_until = len_l
if skip_end:
iterate_until = (len_l // batch_size) * batch_size
for b_start in range(0, iterate_until, batch_size):
yield (b_start, min(b_start + batch_size, len_l))
def batch_iterator(seq, batch_size, skip_end=True):
for b_start, b_end in batch_index_iterator(len(seq), batch_size, skip_end=skip_end):
yield seq[b_start:b_end]
def masked_unk_softmax(x, dim, mask_idx):
x1 = F.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def compute_score_with_logits(logits, labels):
logits = masked_unk_softmax(logits, 1, 0)
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros_like(labels)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores | 15,975 | 38.156863 | 122 | py |
visualbert | visualbert-master/visualbert/utils/detector.py | """
ok so I lied. it's not a detector, it's the resnet backbone
"""
import torch
import torch.nn as nn
import torch.nn.parallel
from torchvision.models import resnet
from utils.pytorch_misc import Flattener
import torch.utils.model_zoo as model_zoo
#from config_vcr import USE_IMAGENET_PRETRAINED
from utils.pytorch_misc import pad_sequence
from torch.nn import functional as F
USE_IMAGENET_PRETRAINED = True
def _load_resnet(pretrained=True):
# huge thx to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/resnet_v1.py
backbone = resnet.resnet50(pretrained=False)
if pretrained:
backbone.load_state_dict(model_zoo.load_url(
'https://s3.us-west-2.amazonaws.com/ai2-rowanz/resnet50-e13db6895d81.th'))
for i in range(2, 4):
getattr(backbone, 'layer%d' % i)[0].conv1.stride = (2, 2)
getattr(backbone, 'layer%d' % i)[0].conv2.stride = (1, 1)
return backbone
def _load_resnet_imagenet(pretrained=True):
# huge thx to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/resnet_v1.py
backbone = resnet.resnet50(pretrained=pretrained)
for i in range(2, 4):
getattr(backbone, 'layer%d' % i)[0].conv1.stride = (2, 2)
getattr(backbone, 'layer%d' % i)[0].conv2.stride = (1, 1)
# use stride 1 for the last conv4 layer (same as tf-faster-rcnn)
backbone.layer4[0].conv2.stride = (1, 1)
backbone.layer4[0].downsample[0].stride = (1, 1)
# # Make batchnorm more sensible
# for submodule in backbone.modules():
# if isinstance(submodule, torch.nn.BatchNorm2d):
# submodule.momentum = 0.01
return backbone
class SimpleDetector(nn.Module):
def __init__(self, pretrained=True, average_pool=True, semantic=True, final_dim=1024):
"""
:param average_pool: whether or not to average pool the representations
:param pretrained: Whether we need to load from scratch
:param semantic: Whether or not we want to introduce the mask and the class label early on (default Yes)
"""
super(SimpleDetector, self).__init__()
# huge thx to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/resnet_v1.py
backbone = _load_resnet_imagenet(pretrained=pretrained) if USE_IMAGENET_PRETRAINED else _load_resnet(
pretrained=pretrained)
self.backbone = nn.Sequential(
backbone.conv1,
backbone.bn1,
backbone.relu,
backbone.maxpool,
backbone.layer1,
backbone.layer2,
backbone.layer3,
# backbone.l ayer4
)
from torchvision.layers import ROIAlign
self.roi_align = ROIAlign((7, 7) if USE_IMAGENET_PRETRAINED else (14, 14),
spatial_scale=1 / 16, sampling_ratio=0)
if semantic:
self.mask_dims = 32
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
self.mask_upsample = torch.nn.Conv2d(1, self.mask_dims, kernel_size=3,
stride=2 if USE_IMAGENET_PRETRAINED else 1,
padding=1, bias=True)
else:
self.object_embed = None
self.mask_upsample = None
after_roi_align = [backbone.layer4]
self.final_dim = final_dim
if average_pool:
after_roi_align += [nn.AvgPool2d(7, stride=1), Flattener()]
self.after_roi_align = torch.nn.Sequential(*after_roi_align)
self.obj_downsample = torch.nn.Sequential(
torch.nn.Dropout(p=0.1),
torch.nn.Linear(2048 + (128 if semantic else 0), final_dim),
torch.nn.ReLU(inplace=True),
)
self.regularizing_predictor = torch.nn.Linear(2048, 81)
def forward(self,
images: torch.Tensor,
boxes: torch.Tensor,
box_mask: torch.LongTensor,
classes: torch.Tensor = None,
segms: torch.Tensor = None,
):
"""
:param images: [batch_size, 3, im_height, im_width]
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:return: object reps [batch_size, max_num_objects, dim]
"""
# [batch_size, 2048, im_height // 32, im_width // 32
img_feats = self.backbone(images)
box_inds = box_mask.nonzero()
assert box_inds.shape[0] > 0
rois = torch.cat((
box_inds[:, 0, None].type(boxes.dtype),
boxes[box_inds[:, 0], box_inds[:, 1]],
), 1)
# Object class and segmentation representations
roi_align_res = self.roi_align(img_feats, rois)
if self.mask_upsample is not None:
assert segms is not None
segms_indexed = segms[box_inds[:, 0], None, box_inds[:, 1]] - 0.5
roi_align_res[:, :self.mask_dims] += self.mask_upsample(segms_indexed)
post_roialign = self.after_roi_align(roi_align_res)
# Add some regularization, encouraging the model to keep giving decent enough predictions
obj_logits = self.regularizing_predictor(post_roialign)
obj_labels = classes[box_inds[:, 0], box_inds[:, 1]]
cnn_regularization = F.cross_entropy(obj_logits, obj_labels, size_average=True)[None]
feats_to_downsample = post_roialign if self.object_embed is None else torch.cat((post_roialign, self.object_embed(obj_labels)), -1)
roi_aligned_feats = self.obj_downsample(feats_to_downsample)
# Reshape into a padded sequence - this is expensive and annoying but easier to implement and debug...
obj_reps = pad_sequence(roi_aligned_feats, box_mask.sum(1).tolist())
return {
'obj_reps_raw': post_roialign,
'obj_reps': obj_reps,
'obj_logits': obj_logits,
'obj_labels': obj_labels,
'cnn_regularization_loss': cnn_regularization
}
| 6,108 | 41.131034 | 139 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/get_mask.py | #!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import numpy as np
import base64
import csv
import timeit
import json
import torch
from detectron.utils.io import cache_url
import detectron.utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
from caffe2.python import workspace
import caffe2
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.timer import Timer
import detectron.core.test_engine as model_engine
import detectron.core.test as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.logging
import detectron.utils.vis as vis_utils
from detectron.utils.boxes import nms
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
csv.field_size_limit(sys.maxsize)
BOTTOM_UP_FIELDNAMES = ['image_id', 'image_w', 'image_h',
'num_boxes', 'boxes', 'features']
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes',
'boxes', 'features', 'object']
from get_mask_utils import detect_from_img, get_model
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output_dir',
dest='output_dir',
help='output dir name',
required=True,
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='jpg',
type=str
)
parser.add_argument(
'--bbox_file',
help="csv file from bottom-up attention model",
default=None
)
parser.add_argument(
'--total_group',
help="the number of group for exracting",
type=int,
default=1
)
parser.add_argument(
'--group_id',
help=" group id for current analysis, used to shard",
type=int,
default=0
)
parser.add_argument(
'--min_bboxes',
help=" min number of bboxes",
type=int,
default=10
)
parser.add_argument(
'--max_bboxes',
help=" min number of bboxes",
type=int,
default=100
)
parser.add_argument(
'--conf_thresh',
help=" confidentce",
type=float,
default=0.2
)
parser.add_argument(
'--total_split',
help=" confidentce",
type=int,
default=1
)
parser.add_argument(
'--one_giant_file',
help=" confidentce",
type=str,
default=None
)
parser.add_argument(
'--current_split',
help=" confidentce",
type=int,
default=0
)
parser.add_argument(
'--feat_name',
help=" the name of the feature to extract, default: gpu_0/fc7",
type=str,
default="gpu_0/fc7"
)
parser.add_argument(
'im_or_folder', help='image or folder of images', default=None
)
parser.add_argument(
'--no_id',
action='store_true'
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def get_detections_from_im(cfg, model, im, image_id, feat_blob_name,
MIN_BOXES, MAX_BOXES, conf_thresh=0.2, bboxes=None):
with c2_utils.NamedCudaScope(0):
scores, cls_boxes, im_scale = infer_engine.im_detect_bbox(model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=bboxes)
box_features = workspace.FetchBlob(feat_blob_name)
#print("ss")
#print(workspace.FetchBlob("gpu_0/fc7"))
cls_prob = workspace.FetchBlob("gpu_0/cls_prob")
rois = workspace.FetchBlob("gpu_0/rois")
max_conf = np.zeros((rois.shape[0]))
# unscale back to raw image space
cls_boxes = rois[:, 1:5] / im_scale
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes], axis=1)
#print(cls_boxes[keep_boxes])
#print("keep_boxes", keep_boxes)
#print("max_conf", max_conf)
#print("cls_boxes", cls_boxes[0])
#print("im_h", im.shape[0])
return box_features[keep_boxes], max_conf[keep_boxes], cls_boxes[keep_boxes]
#return {
# "image_id": image_id,
# "image_h": np.size(im, 0),
# "image_w": np.size(im, 1),
# 'num_boxes': len(keep_boxes),
# 'boxes': base64.b64encode(cls_boxes[keep_boxes]),
# 'features': base64.b64encode(box_features[keep_boxes]),
# 'object': base64.b64encode(objects)
#}
def extract_bboxes(bottom_up_csv_file):
image_bboxes = {}
with open(bottom_up_csv_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t',
fieldnames=BOTTOM_UP_FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bbox = np.frombuffer(
base64.b64decode(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
image_bboxes[image_id] = bbox
return image_bboxes
import os
def recurse_find_image(folder, image_list, image_ext):
files = os.listdir(folder)
files.sort()
for i in files:
path = os.path.join(folder, i)
if os.path.isdir(path):
recurse_find_image(path, image_list, image_ext)
else:
if path.endswith(image_ext):
image_list.append(path)
def main(args):
logger = logging.getLogger(__name__)
model = get_model()
start = timeit.default_timer()
im_list = []
recurse_find_image(args.im_or_folder, im_list, args.image_ext)
print(im_list[:10])
print("There are {} images to cache in total.".format(len(im_list)))
if args.total_split != 1:
im_lists = np.array_split(im_list, args.total_split)
im_list= im_lists[args.current_split]
print("Split {}: There are currently {} images to cache.".format(args.current_split ,len(im_list)))
'''if os.path.isdir(args.im_or_folder):
im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
else:
im_list = [args.im_or_folder]'''
#print("{} images in total.".format(len(im_list)))
# extract bboxes from bottom-up attention model
image_bboxes={}
if args.bbox_file is not None:
image_bboxes = extract_bboxes(args.bbox_file)
count = 0
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
one_giant_file = args.one_giant_file
if one_giant_file is not None:
giant_file = {}
for i, im_name in enumerate(im_list):
im_base_name = os.path.basename(im_name)
if not args.no_id:
image_id = int(im_base_name.split(".")[0].split("_")[-1]) # for COCO
else:
image_id = None
if not args.no_id:
'''if image_id % args.total_group == args.group_id:
if not args.no_id:
bbox = image_bboxes[image_id] if image_id in image_bboxes else None
else:
bbox = None
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir,
im_base_name.replace('jpg', 'npy'))
lock_folder = outfile.replace('npy', 'lock')
if not os.path.exists(lock_folder) and os.path.exists(outfile):
continue
if not os.path.exists(lock_folder):
os.makedirs(lock_folder)
result = get_detections_from_im(cfg, model, im,
image_id,args.feat_name,
args.min_bboxes,
args.max_bboxes,
bboxes=bbox)
np.save(outfile, result)
os.rmdir(lock_folder)
second_result = np.load(outfile)
print(result[1])
print(second_result[1])
count += 1
if count % 100 == 0:
end = timeit.default_timer()
epoch_time = end - start
print('process {:d} images after {:.1f} s'.format(count, epoch_time))'''
assert(0)
else:
bbox = None
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir, im_base_name) + ".npz"
lock_folder = outfile + '.lock'
if not os.path.exists(lock_folder) and os.path.exists(outfile):
continue
if not os.path.exists(lock_folder):
os.makedirs(lock_folder)
detection = detect_from_img(model, im)
#for i in detection:
# detection[i] = numpy.array(detection[i])
if one_giant_file is not None:
#box_features = torch.Tensor(box_features)
#cls_boxes = torch.Tensor(cls_boxes)
#max_conf = torch.Tensor(max_conf)
giant_file[im_base_name] = detection
#np.savez(outfile, box_features=box_features, max_conf=max_conf, cls_boxes=cls_boxes)
os.rmdir(lock_folder)
count += 1
if count % 100 == 0:
end = timeit.default_timer()
epoch_time = end - start
print('process {:d} images after {:.1f} s'.format(count, epoch_time))
if one_giant_file is not None:
torch.save(giant_file, one_giant_file)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
detectron.utils.logging.setup_logging(__name__)
args = parse_args()
if args.group_id >= args.total_group:
exit("sharding group %d is greater than the total group %d" %(args.group_id, args.total_group ))
main(args)
| 12,823 | 31.383838 | 107 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/get_mask_utils.py | # Modified by Harold. Courtesy of the author of VCR
"""
Detect the images from a dataframe, saving masks to a json.
"""
from collections import defaultdict
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import logging
import os
import time
from caffe2.python import workspace
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
from detectron.utils.vis import convert_from_cls_format, kp_connections, get_class_string
import detectron.utils.keypoints as keypoint_utils
from tqdm import tqdm
from detectron.utils.colormap import colormap
import pycocotools.mask as mask_util
import numpy as np
import json
import pickle as pkl
# Matplotlib requires certain adjustments in some environments
# Must happen before importing matplotlib
import detectron.utils.env as envu
envu.set_up_matplotlib()
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
THRESHOLD = 0.7
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
logger = logging.getLogger('__main__')
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
def get_model(use_keypoints=False):
"""
Obtain model
:param use_keypoints: whether to use keypoints or mask rcnn
:return:
"""
if use_keypoints:
MODEL_CONFIG = '/home/rowan/tools/Detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml'
MODEL_WEIGHTS = 'https://s3-us-west-2.amazonaws.com/detectron/37732318/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml.16_55_09.Lx8H5JVu/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl'
else:
MODEL_CONFIG = '/local/harold/vqa/trained_detectron/e2e_mask_rcnn_X-101-64x4d-FPN_1x.yaml'
MODEL_WEIGHTS = '/local/harold/vqa/trained_detectron/e2e_mask_rcnn_X-101-64x4d-FPN_1x.pkl'
merge_cfg_from_file(MODEL_CONFIG)
cfg.NUM_GPUS = 1
cfg.MODEL.KEYPOINTS_ON = use_keypoints
cfg.MODEL.MASK_ON = not use_keypoints
weights_arg = cache_url(MODEL_WEIGHTS, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
assert not cfg.MODEL.RPN_ONLY, 'RPN models are not supported'
assert not cfg.TEST.PRECOMPUTED_PROPOSALS, 'Models that require precomputed proposals are not supported'
model = infer_engine.initialize_model_from_cfg(weights_arg)
return model
def detect_from_img(model, im, dets_pkl_fn=None, dets_json_fn=None, debug_img_fn=None):
"""
Detect the boxes and segmentations in an image. Currently doesn't do segmentation.
:param im: Image
:param dets_pkl_fn: We'll back up the detections to here
:param dets_json_fn: We'll save detections here (above THRESHOLD) for turking
:param debug_img_fn: We'll backup the detections in a nice image, to this file
:return: boxes, obj names, classes if successful, otherwise NONE NONE NONE.
"""
# logger.info('Processing {}'.format(img_fn))
# im = cv2.imread(img_fn)
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
#logger.info('Inference time: {:.3f}s'.format(time.time() - t))
#for k, v in timers.items():
# logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if not isinstance(cls_boxes, list) or not any([x.size > 0 for x in cls_boxes if hasattr(x, 'size')]):
print("Skip because of other things")
return None, None, None
# Get the mask for visualization. #TODO do keypoints
boxes, segms, keypoints, classes = convert_from_cls_format(
cls_boxes, cls_segms, cls_keyps)
inds = np.where(boxes[:, -1] > THRESHOLD)[0]
if inds.size == 0:
print("Skip because of harsh threshhold")
return None, None, None
if dets_pkl_fn is not None:
with open(dets_pkl_fn, 'wb') as f:
pkl.dump({'boxes': cls_boxes, 'segms': cls_segms, 'keyps': cls_keyps, 'im_shape': im.shape}, f)
boxes = boxes[inds]
segms = [segms[i] for i in inds.tolist()] if segms is not None else None
classes = np.array([classes[i] for i in inds.tolist()])
keypoints = [keypoints[i].tolist() for i in inds.tolist()] if keypoints is not None else None
contours = []
if segms is not None:
masks = mask_util.decode(segms).transpose((2, 0, 1))
for mask_slice in masks:
contour, hier = cv2.findContours(
mask_slice.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
contours.append([c.squeeze(1).tolist() for c in contour])
# get the names
obj_names = []
for object_counter, obj_id in enumerate(classes):
obj_names.append('{} ({})'.format(object_counter+1, dummy_coco_dataset.classes[obj_id].replace(' ', '')))
if dets_json_fn is not None:
with open(dets_json_fn, 'w') as f:
json.dump({
'boxes': boxes.tolist(), # [num_boxes, dims]
'segms': contours, # [num_boxes, num_segms, num_points, 2]
'names': obj_names,
'width': int(im.shape[1]),
'height': int(im.shape[0]),
'keyps': keypoints,
}, f)
if debug_img_fn is not None:
vis_one_image(im[:, :, ::-1], debug_img_fn, boxes, contours, obj_names, keypoints,
dpi=200, box_alpha=0.3)
return {'boxes': boxes.tolist(), # [num_boxes, dims]
'segms': contours, # [num_boxes, num_segms, num_points, 2]
'names': obj_names,
'width': int(im.shape[1]),
'height': int(im.shape[0]),
'keyps': keypoints}
#return boxes, obj_names, classes
def vis_one_image(
im, im_name, boxes, segm_contours, obj_names, keypoints=None,
kp_thresh=2, dpi=200, box_alpha=0.0, show_class=True):
"""Visual debugging of detections. We assume that there are detections"""
dataset_keypoints, _ = keypoint_utils.get_keypoints()
color_list = colormap(rgb=True) / 255
kp_lines = kp_connections(dataset_keypoints)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
fig = plt.figure(frameon=False)
fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
ax.imshow(im)
assert boxes is not None
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
for mask_color_id, i in enumerate(sorted_inds):
bbox = boxes[i, :4]
score = boxes[i, -1]
# show box
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1],
fill=False, edgecolor=color_list[mask_color_id % len(color_list)],
linewidth=3, alpha=box_alpha))
if show_class:
# TODO: Make some boxes BIGGER if they are far from other things
y_coord = bbox[1] - 2
fontsize = max(min(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 40, 5)
if fontsize * 2 > y_coord:
y_coord += fontsize * 2 + 2
ax.text(
bbox[0], y_coord,
obj_names[i] + ' {:0.2f}'.format(score).lstrip('0'),
fontsize=fontsize,
family='serif',
bbox=dict(
facecolor=color_list[mask_color_id % len(color_list)],
alpha=0.4, pad=0, edgecolor='none'),
color='white')
# show mask
if len(segm_contours) > 0:
img = np.ones(im.shape)
color_mask = color_list[mask_color_id % len(color_list), 0:3]
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
for c in range(3):
img[:, :, c] = color_mask[c]
for segm_part in segm_contours[i]:
polygon = Polygon(
np.array(segm_part),
fill=True, facecolor=color_mask,
edgecolor='w', linewidth=1.2,
alpha=0.5)
ax.add_patch(polygon)
# show keypoints
if keypoints is not None and len(keypoints) > i:
kps = np.array(keypoints[i])
plt.autoscale(False)
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
x = [kps[0, i1], kps[0, i2]]
y = [kps[1, i1], kps[1, i2]]
line = plt.plot(x, y)
plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
if kps[2, i1] > kp_thresh:
plt.plot(
kps[0, i1], kps[1, i1], '.', color=colors[l],
markersize=3.0, alpha=0.7)
if kps[2, i2] > kp_thresh:
plt.plot(
kps[0, i2], kps[1, i2], '.', color=colors[l],
markersize=3.0, alpha=0.7)
# add mid shoulder / mid hip for better visualization
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
if (sc_mid_shoulder > kp_thresh and
kps[2, dataset_keypoints.index('nose')] > kp_thresh):
x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
line = plt.plot(x, y)
plt.setp(
line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
x = [mid_shoulder[0], mid_hip[0]]
y = [mid_shoulder[1], mid_hip[1]]
line = plt.plot(x, y)
plt.setp(
line, color=colors[len(kp_lines) + 1], linewidth=1.0,
alpha=0.7)
ext = im_name.split('.')[-1]
rest_of_the_fn = im_name[:-(len(ext) + 1)]
ext2use = 'png' if ext == 'jpg' else ext
output_name = rest_of_the_fn + '.' + ext2use
fig.savefig(output_name, dpi=dpi)
plt.close('all')
# Convert to JPG manually... ugh
if ext == 'jpg':
assert os.path.exists(output_name)
png_img = cv2.imread(output_name)
cv2.imwrite(rest_of_the_fn + '.' + ext, png_img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
os.remove(output_name)
def convert_detections(im_file, dets_pkl_fn, dets_json_fn=None, debug_img_fn=None):
"""
Update format for detections....
:param im: Image
:param dets_pkl_fn: We'll back up the detections to here
:param dets_json_fn: We'll save detections here (above THRESHOLD) for turking
:param debug_img_fn: We'll backup the detections in a nice image, to this file
:return: boxes, obj names, classes if successful, otherwise NONE NONE NONE.
"""
with open(dets_pkl_fn, 'rb') as f:
pkl_dict = pkl.load(f)
cls_boxes = pkl_dict['boxes']
cls_segms = pkl_dict['segms']
cls_keyps = pkl_dict['keyps']
im_shape = pkl_dict['im_shape']
if not isinstance(cls_boxes, list) or not any([x.size > 0 for x in cls_boxes if hasattr(x, 'size')]):
return None, None, None
# Get the mask for visualization. #TODO do keypoints
boxes, segms, keypoints, classes = convert_from_cls_format(
cls_boxes, cls_segms, cls_keyps)
inds = np.where(boxes[:, -1] > THRESHOLD)[0]
if inds.size == 0:
return None, None, None
boxes = boxes[inds]
segms = [segms[i] for i in inds.tolist()] if segms is not None else None
classes = np.array([classes[i] for i in inds.tolist()])
keypoints = [keypoints[i].tolist() for i in inds.tolist()] if keypoints is not None else None
contours = []
if segms is not None:
masks = mask_util.decode(segms).transpose((2, 0, 1))
for mask_slice in masks:
contour, hier = cv2.findContours(
mask_slice.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
contours.append([c.squeeze(1).tolist() for c in contour])
# get the names
obj_names = []
for object_counter, obj_id in enumerate(classes):
obj_names.append('{} ({})'.format(object_counter+1, dummy_coco_dataset.classes[obj_id].replace(' ', '')))
# object_counter = defaultdict(int)
# obj_names = []
# for obj_id in classes:
# object_counter[obj_id] += 1
# obj_names.append('[{}{}]'.format(dummy_coco_dataset.classes[obj_id].replace(' ', ''),
# object_counter[obj_id]))
if dets_json_fn is not None:
with open(dets_json_fn, 'w') as f:
json.dump({
'boxes': boxes.tolist(), # [num_boxes, dims]
'segms': contours, # [num_boxes, num_segms, num_points, 2]
'names': obj_names,
'width': int(im_shape[1]),
'height': int(im_shape[0]),
'keyps': keypoints,
}, f)
if debug_img_fn is not None:
im = cv2.imread(im_file)
vis_one_image(im[:, :, ::-1], debug_img_fn, boxes, contours, obj_names, keypoints,
dpi=200, box_alpha=0.3)
return boxes, obj_names, classes
if __name__ == "__main__":
model = get_model()
return_dict = detect_from_img(model, im) | 14,915 | 38.989276 | 268 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/extract_image_features_nlvr.py | # Modified by Harold
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import numpy as np
import base64
import csv
import timeit
import json
import torch
from tqdm import tqdm
from detectron.utils.io import cache_url
import detectron.utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
from caffe2.python import workspace
import caffe2
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.timer import Timer
import detectron.core.test_engine as model_engine
import detectron.core.test as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.logging
import detectron.utils.vis as vis_utils
from detectron.utils.boxes import nms
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
csv.field_size_limit(sys.maxsize)
BOTTOM_UP_FIELDNAMES = ['image_id', 'image_w', 'image_h',
'num_boxes', 'boxes', 'features']
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes',
'boxes', 'features', 'object']
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output_dir',
dest='output_dir',
help='output dir name',
required=True,
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='jpg',
type=str
)
parser.add_argument(
'--bbox_file',
help="csv file from bottom-up attention model",
default=None
)
parser.add_argument(
'--total_group',
help="the number of group for exracting",
type=int,
default=1
)
parser.add_argument(
'--group_id',
help=" group id for current analysis, used to shard",
type=int,
default=0
)
parser.add_argument(
'--min_bboxes',
help=" min number of bboxes",
type=int,
default=10
)
parser.add_argument(
'--max_bboxes',
help=" min number of bboxes",
type=int,
default=100
)
parser.add_argument(
'--conf_thresh',
help=" confidentce",
type=float,
default=0.2
)
parser.add_argument(
'--total_split',
help=" confidentce",
type=int,
default=1
)
parser.add_argument(
'--one_giant_file',
help=" confidentce",
type=str,
default=None
)
parser.add_argument(
'--current_split',
help=" confidentce",
type=int,
default=0
)
parser.add_argument(
'--feat_name',
help=" the name of the feature to extract, default: gpu_0/fc7",
type=str,
default="gpu_0/fc7"
)
parser.add_argument(
'im_or_folder', help='image or folder of images', default=None
)
parser.add_argument(
'--no_id',
action='store_true'
)
parser.add_argument(
'--existing',
type=str,
default=None
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def get_detections_from_im(cfg, model, im, image_id, feat_blob_name,
MIN_BOXES, MAX_BOXES, conf_thresh=0.2, bboxes=None):
with c2_utils.NamedCudaScope(0):
scores, cls_boxes, im_scale = infer_engine.im_detect_bbox(model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=bboxes)
box_features = workspace.FetchBlob(feat_blob_name)
cls_prob = workspace.FetchBlob("gpu_0/cls_prob")
rois = workspace.FetchBlob("gpu_0/rois")
max_conf = np.zeros((rois.shape[0]))
# unscale back to raw image space
cls_boxes = rois[:, 1:5] / im_scale
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes], axis=1)
return box_features[keep_boxes], max_conf[keep_boxes], cls_boxes[keep_boxes]
#return {
# "image_id": image_id,
# "image_h": np.size(im, 0),
# "image_w": np.size(im, 1),
# 'num_boxes': len(keep_boxes),
# 'boxes': base64.b64encode(cls_boxes[keep_boxes]),
# 'features': base64.b64encode(box_features[keep_boxes]),
# 'object': base64.b64encode(objects)
#}
def extract_bboxes(bottom_up_csv_file):
image_bboxes = {}
with open(bottom_up_csv_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t',
fieldnames=BOTTOM_UP_FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bbox = np.frombuffer(
base64.b64decode(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
image_bboxes[image_id] = bbox
return image_bboxes
import os
def recurse_find_image(folder, image_list, image_ext):
files = os.listdir(folder)
files.sort()
for i in files:
path = os.path.join(folder, i)
if os.path.isdir(path):
recurse_find_image(path, image_list, image_ext)
else:
if path.endswith(image_ext):
image_list.append(path)
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.weights)
start = timeit.default_timer()
im_list = []
recurse_find_image(args.im_or_folder, im_list, args.image_ext)
print("There are {} images to cache in total.".format(len(im_list)))
if args.total_split != 1:
im_lists = np.array_split(im_list, args.total_split)
im_list= im_lists[args.current_split]
print("Split {}: There are currently {} images to cache.".format(args.current_split ,len(im_list)))
# extract bboxes from bottom-up attention model
image_bboxes={}
if args.bbox_file is not None:
image_bboxes = extract_bboxes(args.bbox_file)
count = 0
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
one_giant_file = args.one_giant_file
if one_giant_file is not None:
giant_file = {}
if args.existing is not None:
giant_file = torch.load(args.existing)
print("Loaded {}".format(args.existing))
for i, im_name in enumerate(tqdm(im_list)):
im_base_name = os.path.basename(im_name)
if not args.no_id:
image_id = int(im_base_name.split(".")[0].split("_")[-1]) # for COCO
else:
image_id = None
bbox = None
if args.existing:
if im_base_name in giant_file:
continue
else:
print("Missing {}...".format(im_base_name))
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir, im_base_name) + ".npz"
#lock_folder = outfile + '.lock'
#if not os.path.exists(lock_folder) and os.path.exists(outfile):
# print("Reading {} falied!".format(im_base_name))
# continue
#if not os.path.exists(lock_folder):
# os.makedirs(lock_folder)
box_features, max_conf, cls_boxes = get_detections_from_im(cfg, model, im,
image_id,args.feat_name,
args.min_bboxes,
args.max_bboxes,
bboxes=bbox)
if one_giant_file is not None:
box_features = torch.Tensor(box_features)
cls_boxes = torch.Tensor(cls_boxes)
max_conf = torch.Tensor(max_conf)
giant_file[im_base_name] = (box_features, cls_boxes, max_conf)
else:
np.savez(outfile, box_features=box_features, max_conf=max_conf, cls_boxes=cls_boxes)
#os.rmdir(lock_folder)
else:
print("Reading {} falied!".format(im_base_name))
if one_giant_file is not None:
print(len(giant_file))
torch.save(giant_file, one_giant_file)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
detectron.utils.logging.setup_logging(__name__)
args = parse_args()
if args.group_id >= args.total_group:
exit("sharding group %d is greater than the total group %d" %(args.group_id, args.total_group ))
main(args)
| 11,369 | 31.485714 | 107 | py |
skccm | skccm-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# skccm documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 24 16:48:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['numpy', 'numba','sklearn', 'scipy','pandas']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skccm'
copyright = '2017, Nick Cortale'
author = 'Nick Cortale'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'skccmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'skccm.tex', 'skccm Documentation',
'Nick Cortale', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'skccm', 'skccm Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'skccm', 'skccm Documentation',
author, 'skccm', 'One line description of project.',
'Miscellaneous'),
]
| 5,151 | 28.953488 | 79 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/MNIST/EcoSVM_MNIST.py | #Owen Howell, July 20, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#This code runs Eco_SVM on MNIST dataset
#Note: This code takes significant computational time (+1 days aprox) , for the plots made in paper each realization was done in parallel
#Note: The memory requirments are also large for full dataset. For running on personal compute please subsample data
#Many thanks to https://www.bu.edu/tech/support/research/ for their advice on optimization
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number
thresh = 1e-3
#Note: For each realization the C slack hyperparameter and gamma RBF hyperparameter should be tuned to minimize out of sample error
#it is approxmitly five for most realizations
C = 5.321
#using the 'auto' scikit learn SVM parameters
gamma = 1/(28*28)
#defining a RBF kernel function, tunable parameter sigma
def kernel(x,y):
return np.exp( - gamma * np.dot( ( x - y ) , np.transpose(x - y) ) )
#Intilize the EcoSVM, compute support vectors for first N_start points
#Inputs are the datapoints, data label and Slack value
#Returns the set of active datapoints, active datapoint labels, support vector values and an active index value
def EcoSVM_initialize(xvals,yvals ):
N_start = len(yvals)
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qmat[i,j] = s*yvals[i]*yvals[j]
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = - np.ones(N_start)
p = matrix(p)
G = np.zeros([2*N_start,N_start])
for i in range(N_start):
G[i,i] = -1
for i in range(N_start):
G[i+N_start,i] = +1
G = matrix(G)
h = np.zeros([2*N_start])
for i in range(N_start,2*N_start):
h[i] = C
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = yvals[i]
A = matrix(A,(1,N_start),'d')
b = matrix(0.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0.0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
active_data_y = intial_yvals[support_vects_inds]
#Check that there is at least one active support vector
num_active = 0
for i in range(len(support_vects_inds)):
if ( (support_vects[i] - C)**2 > thresh ):
num_active = num_active + 1
if ( num_active == 0 ):
print("No active support vector found. Make sure that there are both +1 and -1 examples. Increase the number of intial points. Increase the slack.")
quit()
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm on a single new point
#Inputs are datapoint X, datalabel Y, active datapoints, active data labels, set of support vectors Lagrange Multiplier, dataset dimension and Slack value
def point_Run_EcoSVM( X, Y , active_data_x , active_data_y , support_vects , index_val , dimension ):
numsupportvects = len(active_data_y)
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
s = 0
for i in range(numsupportvects):
s = s + active_data_y[i]*Y*( kernel( active_data_x[i,:], active_data_x[index_val,:] ) - kernel( active_data_x[i,:], X ) )
#Compute the invasion condition
inv = 1 - Y*active_data_y[index_val] + s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = s*active_data_y[i]*active_data_y[j]
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = s*active_data_y[i]*Y
Qp[numsupportvects,i] = s*active_data_y[i]*Y
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = s*Y * Y
Qp = matrix(Qp)
p = - np.ones(numsupportvects+1)
p = matrix(p)
G = np.zeros([2*numsupportvects+2,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
for i in range(numsupportvects+1):
G[i+numsupportvects+1,i] = +1
G = matrix(G)
h = np.zeros([2*numsupportvects+2])
for i in range(numsupportvects+1,2*numsupportvects+2):
h[i] = C
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = active_data_y[i]
A[numsupportvects] = Y
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(0.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
new_active_data_y = np.zeros([countnew])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
new_active_data_y[auxcount] = active_data_y[auxcount2]
auxcount2 = auxcount2 + 1
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1] > thresh):
new_active_data_x[auxcount,:] = X
new_active_data_y[auxcount] = Y
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
active_data_y = new_active_data_y
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm
#Inputs are datapoints and labels, set of intial support vector indices, intial support vector values, intial Lagrange Multiplier and Slack Value
#Returns the set of active datapoints, the active data labels, the support vector values and the final lagrange multiplier
def Run_EcoSVM( xvals, yvals, active_data_x, active_data_y, support_vects, index_val ):
N = len(yvals)
test_accuracy = np.zeros([ N - N_start])
number_active = np.zeros([ N - N_start])
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
#compute the b value
b = b_value(active_data_x,active_data_y,support_vects)
#Compute performance errors
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
test_accuracy[point - N_start] = 1 - EcoSVMerror
print( 1 - EcoSVMerror )
count_active = 0
for i in range(len(active_data_y)):
if ( support_vects[i] > thresh and ( support_vects[i] - C )**2 > thresh**2 ):
count_active = count_active + 1
number_active[ point - N_start ] = count_active
X = xvals[point ,:]
Y = yvals[point ]
#Run the EcoSVM algorithm on a single point
active_data_x, active_data_y, support_vects, index_val = point_Run_EcoSVM( X , Y , active_data_x, active_data_y , support_vects , index_val , dimension )
return active_data_x, active_data_y , support_vects , index_val , test_accuracy, number_active
#Run a batch SVM on all data
#input is all training data, training labels and Slack value
#output is the set of active datapoints and data labels and support vector values
def batchSVM( xvals , yvals ):
#the number of datapoints
N = len(yvals)
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qfull[i,j] = s*yvals[i]*yvals[j]
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = - np.ones(N)
pf = matrix(pf)
Gf = np.zeros([2*N,N])
for i in range(N):
Gf[i,i] = -1
for i in range(N):
Gf[N+i,i] = +1
Gf = matrix(Gf)
hf = np.zeros([2*N])
for i in range(N,2*N):
hf[i] = C
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = yvals[i]
Af = matrix(Af,(1,N),'d')
bf = matrix(0.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0.0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull , :]
active_data_y = yvals[supvectsindsfull ]
return active_data_x, active_data_y, supvectsfull
#Compute the B value for an SVM
#Inputs are indices and support vector values
def b_value(active_data_x, active_data_y,supportvectors ):
s = 0
bp = 0
for i in range(len(supportvectors)):
bp = bp + 1
s = s + active_data_y[i]
for j in range(len(supportvectors)):
s = s - supportvectors[j] * active_data_y[j] * kernel( active_data_x[i,:] , active_data_x[j,:] )
b = 0
if (bp!=0):
b = 1/float(bp) * s
return b
#the SVM prediction function
#Inputs are datapoint x to make prediction on, set of indices, set of support vectors and b value
#Output is the prediction value +1 or -1
def pred(x , active_data_x, active_data_y , supportvectors , b):
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i] * supportvectors[i] * kernel(x , active_data_x[i,:] )
s = s + b
return s
#Function to compute test error
#Inputs are testing data and labels, set of support vector indices and support vector values
#Returns test error
def SVM_error( test_xvals , test_yvals , active_data_x, active_data_y , support_vects, b):
#the number of test points
N_test = len( test_yvals )
#Compute the EcoSVM error, # of missclassified points
error = 0
for i in range(N_test):
if ( test_yvals[i] != np.sign( pred( test_xvals[i] , active_data_x, active_data_y, support_vects, b ) ) ):
error = error + 1
return error/N_test
#The MNIST dataset, https://ci.nii.ac.jp/naid/10027939599/en/ for more information
from keras.datasets import mnist
#each image is 28x28 , dimension of each image
dimension = 28 * 28
#Function to get MNIST dataset
def getMNIST():
(all_xvals, all_yvals), (all_test_xvals , all_test_yvals) = mnist.load_data()
#reshape data into useable form
all_xvals = np.reshape(all_xvals,(60000,28*28))
all_test_xvals = np.reshape(all_test_xvals,(10000,28*28))
#Count the number of digits:
countA = 0
countB = 0
for i in range( len(all_yvals) ):
if ( all_yvals[i] == 1 ):
countA = countA + 1
if ( all_yvals[i] == 4 ):
countB = countB + 1
xvals = np.zeros([countA+countB, 28*28])
yvals = np.zeros([countA + countB])
count = 0
for i in range( len(all_yvals) ):
if ( all_yvals[i] == 1 ):
xvals[count,:] = all_xvals[i,:]
yvals[count] = +1
count = count + 1
if ( all_yvals[i] == 4 ):
xvals[count,:] = all_xvals[i,:]
yvals[count] = -1
count = count + 1
countA_test = 0
countB_test = 0
for i in range( len(all_test_yvals) ):
if ( all_test_yvals[i] == 1 ):
countA_test = countA_test + 1
if ( all_test_yvals[i] == 4 ):
countB_test = countB_test + 1
test_xvals = np.zeros([countA_test+countB_test, 28*28])
test_yvals = np.zeros([countA_test + countB_test])
count = 0
for i in range( len(all_test_yvals) ):
if ( all_test_yvals[i] == 1 ):
test_xvals[count,:] = all_test_xvals[i,:]
test_yvals[count] = +1
count = count + 1
if ( all_test_yvals[i] == 4 ):
test_xvals[count,:] = all_test_xvals[i,:]
test_yvals[count] = -1
count = count + 1
#Essential to rescale data as MNIST set has rank less than dimension
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(xvals)
xvals = scaler.transform( xvals )
test_xvals = scaler.transform( test_xvals )
return xvals, yvals, test_xvals, test_yvals
xvals, yvals, test_xvals, test_yvals = getMNIST()
#Shuffle the order of the training values
from sklearn.utils import shuffle
xvals , yvals = shuffle( xvals , yvals)
#the labels
yvals = np.array(yvals)
test_yvals = np.array(test_yvals)
#Total number of training points
N = len(yvals)
#Total number of test points
N_test = len(test_yvals)
#Intial number of points used to compute steady state, can be user entered
#This should be much greater than dataset dimension especily if dataset is highly non-linear
#For MNIST dataset with RBF kernel the data set is essentialy linear so there is no problem with using small number of points
N_start = 5
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
intial_yvals = yvals[0:N_start]
#subsample to run in reasonable time
#for results in paper please use whole dataset
Ntrun = 200
xvals = xvals[0:Ntrun,:]
yvals = yvals[0:Ntrun]
#Get the intial set of active datapoints, active datapoint labels, support vector values and the Lagrange multiplier
intial_active_data_x, intial_active_data_y, intial_support_vects , intial_index = EcoSVM_initialize(intial_xvals,intial_yvals)
#Run the EcoSVM algorithm on the dataset
active_data_x, active_data_y , support_vects, index_val, test_accuracy, number_active = Run_EcoSVM( xvals, yvals, intial_active_data_x, intial_active_data_y, intial_support_vects , intial_index)
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals)
batch_number_active = len(batch_data_y)
#compute the batch b value
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects)
#compute the batch accuracy
batcherror = SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
#averge batch error, averge number of batch support vectors
batcherror = batcherror
batch_number_active = batch_number_active
#make accuracy plots vs time
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
fontsize = 22
plt.plot( test_accuracy , linewidth=2, color = 'k')
plt.axhline(y = 1 - batcherror,linestyle='--',linewidth=6)
plt.ylim(0.5,1.1)
plt.ylabel("$A(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.grid()
plt.tick_params(labelsize=fontsize + 2)
plt.tight_layout()
plt.show()
#plt.savefig("./graphs/mnistacc")
plt.clf()
plt.plot( number_active , linewidth=2 , color = 'k')
plt.axhline(y = batch_number_active,linestyle='--',linewidth=6)
plt.ylabel("$N(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.tick_params(labelsize=fontsize + 2)
plt.grid()
plt.tight_layout()
plt.show()
#plt.savefig("./graphs/mnistnum")
plt.clf()
| 15,622 | 22.671212 | 195 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_1d.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_1D.train import train, train_stacked
from models.a3c_lstm_simple import A3C_LSTM, A3C_StackedLSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str, default="Harlow_1D/config.yaml", help='path of config file')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
n_seeds = 8
base_seed = config["seed"]
base_run_title = config["run-title"]
for seed_idx in range(1, n_seeds + 1):
config["run-title"] = base_run_title + f"_{seed_idx}"
config["seed"] = base_seed * seed_idx
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path):
os.mkdir(exp_path)
out_path = os.path.join(exp_path, os.path.basename(args.config))
with open(out_path, 'w') as fout:
yaml.dump(config, fout)
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']} using {config['optimizer']}")
if config["mode"] == "vanilla":
shared_model = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
config["agent"]["cell-type"]
)
elif config["mode"] == "stacked":
shared_model = A3C_StackedLSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
device=config["device"]
)
else:
raise ValueError(config["mode"])
shared_model.share_memory()
shared_model.to(config['device'])
print(shared_model)
optim_class = SharedAdam if config["optimizer"] == "adam" else SharedRMSprop
optimizer = optim_class(shared_model.parameters(), lr=config["agent"]["lr"])
optimizer.share_memory()
processes = []
T.manual_seed(config["seed"])
np.random.seed(config["seed"])
T.random.manual_seed(config["seed"])
if config["resume"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']}.pt"
)
print(f"> Loading Checkpoint {filepath}")
shared_model.load_state_dict(T.load(filepath)["state_dict"])
train_target = train_stacked if config["mode"] == "stacked" else train
for rank in range(config["agent"]["n-workers"]):
p = mp.Process(target=train_target, args=(
config,
shared_model,
optimizer,
rank,
))
p.start()
processes += [p]
for p in processes:
p.join()
| 3,414 | 30.915888 | 113 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/vis_simple.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
from Harlow_Simple.harlow import HarlowSimple
from models.a3c_lstm_simple import A3C_LSTM
def run_episode(agent, env, device="cpu"):
agent.eval()
done = False
state = env.reset()
p_action, p_reward = [0,0,0], 0
ht, ct = agent.get_init_states(device)
while not done:
logit, _, (ht, ct) = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
action = T.argmax(F.softmax(logit, dim=-1), -1)
state, reward, done, _ = env.step(action)
p_action = np.eye(env.n_actions)[action]
p_reward = reward
env.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str, default="Harlow_Simple/config.yaml", help='path of config file')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
load_path = config["load-path"]
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}.gif")
agent = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
)
agent.load_state_dict(T.load(load_path)["state_dict"])
env = HarlowSimple(visualize=True, save_interval=1, save_path=save_path)
run_episode(agent, env)
| 1,850 | 25.826087 | 117 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_psychlab.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_PsychLab.train import train, train_stacked
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
from models.resnet_lstm import ResNet_LSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str,
default="/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/Harlow_PsychLab/config.yaml",
help='path of config file')
parser.add_argument('--length', type=int, default=3600,
help='Number of steps to run the agent')
parser.add_argument('--width', type=int, default=512,
help='Horizontal size of the observations')
parser.add_argument('--height', type=int, default=512,
help='Vertical size of the observations')
parser.add_argument('--fps', type=int, default=60,
help='Number of frames per second')
parser.add_argument('--runfiles_path', type=str, default=None,
help='Set the runfiles path to find DeepMind Lab data')
parser.add_argument('--level_script', type=str,
default='contributed/psychlab/harlow',
help='The environment level script to load')
parser.add_argument('--record', type=str, default=None,
help='Record the run to a demo file')
parser.add_argument('--demo', type=str, default=None,
help='Play back a recorded demo file')
parser.add_argument('--demofiles', type=str, default=None,
help='Directory for demo files')
parser.add_argument('--video', type=str, default=None,
help='Record the demo run as a video')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
task_config = {
'fps': str(args.fps),
'width': str(args.width),
'height': str(args.height)
}
if args.record:
task_config['record'] = args.record
if args.demo:
task_config['demo'] = args.demo
if args.demofiles:
task_config['demofiles'] = args.demofiles
if args.video:
task_config['video'] = args.video
n_seeds = 1
base_seed = config["seed"]
base_run_title = config["run-title"]
for seed_idx in range(1, n_seeds + 1):
config["run-title"] = base_run_title + f"_{seed_idx}"
config["seed"] = base_seed * seed_idx
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path):
os.mkdir(exp_path)
out_path = os.path.join(exp_path, os.path.basename(args.config))
with open(out_path, 'w') as fout:
yaml.dump(config, fout)
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']} using {config['optimizer']}")
if config["mode"] == "resnet":
shared_model = ResNet_LSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "conv-stacked":
shared_model = A3C_ConvStackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "stacked":
shared_model = A3C_StackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "conv-vanilla":
shared_model = A3C_ConvLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "vanilla":
shared_model = A3C_LSTM(config["agent"], config["task"]["num-actions"])
else:
raise ValueError(config["mode"])
print(shared_model)
shared_model.share_memory()
shared_model.to(config['device'])
optim_class = SharedAdam if config["optimizer"] == "adam" else SharedRMSprop
optimizer = optim_class(shared_model.parameters(), lr=config["agent"]["lr"])
optimizer.share_memory()
processes = []
update_counter = 0
T.manual_seed(config["seed"])
np.random.seed(config["seed"])
T.random.manual_seed(config["seed"])
if config["copy-encoder"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']}.pt"
)
print(f"> Copying Encoder from {filepath}")
pretrained_dict = T.load(filepath, map_location=T.device(config["device"]))["state_dict"]
load_dict = shared_model.state_dict()
for k, v in pretrained_dict.items():
if k in "encoder": load_dict[k] = v
shared_model.load_state_dict(load_dict)
if config["resume"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']:04d}.pt"
)
print(f"> Loading Checkpoint {filepath}")
model_data = T.load(filepath, map_location=T.device(config["device"]))
update_counter = model_data["update_counter"]
pretrained_dict = model_data["state_dict"]
# load_dict = {}
# for i, (k, v) in enumerate(pretrained_dict.items()):
# load_dict[k] = v if i < 6 else eval(f"shared_model.{k}")
shared_model.load_state_dict(pretrained_dict)
train_target = train_stacked if "stacked" in config["mode"] else train
for rank in range(config["agent"]["n-workers"]):
p = mp.Process(target=train_target, args=(
config,
shared_model,
optimizer,
rank,
task_config,
update_counter,
))
p.start()
processes += [p]
for p in processes:
p.join()
| 6,521 | 37.591716 | 114 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/run_episode.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
import deepmind_lab as lab
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_PsychLab.train import train, train_stacked
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str,
default="/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/Harlow_PsychLab/config.yaml",
help='path of config file')
parser.add_argument('--length', type=int, default=3600,
help='Number of steps to run the agent')
parser.add_argument('--width', type=int, default=84,
help='Horizontal size of the observations')
parser.add_argument('--height', type=int, default=84,
help='Vertical size of the observations')
parser.add_argument('--fps', type=int, default=60,
help='Number of frames per second')
parser.add_argument('--runfiles_path', type=str, default=None,
help='Set the runfiles path to find DeepMind Lab data')
parser.add_argument('--level_script', type=str,
default='contributed/psychlab/harlow',
help='The environment level script to load')
parser.add_argument('--record', type=str, default=None,
help='Record the run to a demo file')
parser.add_argument('--demo', type=str, default=None,
help='Play back a recorded demo file')
parser.add_argument('--demofiles', type=str, default=None,
help='Directory for demo files')
parser.add_argument('--video', type=str, default=None,
help='Record the demo run as a video')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
task_config = {
'fps': str(args.fps),
'width': str(args.width),
'height': str(args.height)
}
if args.record:
task_config['record'] = args.record
if args.demo:
task_config['demo'] = args.demo
if args.demofiles:
task_config['demofiles'] = args.demofiles
if args.video:
task_config['video'] = args.video
n_seeds = 1
device = config["device"]
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']}")
if config["mode"] == "conv-stacked":
agent = A3C_ConvStackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "stacked":
agent = A3C_StackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "conv-vanilla":
agent = A3C_ConvLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "vanilla":
agent = A3C_LSTM(config["agent"], config["task"]["num-actions"])
else:
raise ValueError(config["mode"])
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']:04d}.pt"
)
print(f"> Loading Checkpoint {filepath}")
agent.load_state_dict(T.load(filepath, map_location=T.device(config["device"]))["state_dict"])
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, 0)
print(agent)
agent.to(config['device'])
agent.eval()
with T.no_grad():
done = False
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
episode_reward = 0
ht1, ct1 = agent.get_init_states(1, device)
ht2, ct2 = agent.get_init_states(2, device)
while not done:
logit, value, (ht1, ct1), (ht2, ct2) = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht1, ct1), (ht2, ct2)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
action = prob.multinomial(num_samples=1).detach()
state, reward, done, _ = env.step(int(action))
if reward == 0.2 and config["save-featmaps"]:
state, _, _, _ = env.step(0)
state, _, _, _ = env.step(1)
layer = 9
path = f"/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/featmaps_{config['start-episode']:04d}_{layer}.npy"
agent.save_featmaps(T.tensor([state]), path, layer)
print("> Feature Maps Saved")
exit()
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
env.save_frames(f"/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/sample_{config['start-episode']:04d}.gif")
print(f"Episode Reward: {episode_reward}")
| 5,563 | 35.605263 | 127 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_psychlab_single.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
import deepmind_lab as lab
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_PsychLab.train import train, train_stacked
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
from models.densenet_lstm import DenseNet_StackedLSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str,
default="/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/Harlow_PsychLab/config.yaml",
help='path of config file')
parser.add_argument('--length', type=int, default=3600,
help='Number of steps to run the agent')
parser.add_argument('--width', type=int, default=84,
help='Horizontal size of the observations')
parser.add_argument('--height', type=int, default=84,
help='Vertical size of the observations')
parser.add_argument('--fps', type=int, default=60,
help='Number of frames per second')
parser.add_argument('--runfiles_path', type=str, default=None,
help='Set the runfiles path to find DeepMind Lab data')
parser.add_argument('--level_script', type=str,
default='contributed/psychlab/harlow',
help='The environment level script to load')
parser.add_argument('--record', type=str, default=None,
help='Record the run to a demo file')
parser.add_argument('--demo', type=str, default=None,
help='Play back a recorded demo file')
parser.add_argument('--demofiles', type=str, default=None,
help='Directory for demo files')
parser.add_argument('--video', type=str, default=None,
help='Record the demo run as a video')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
task_config = {
'fps': str(args.fps),
'width': str(args.width),
'height': str(args.height)
}
if args.record:
task_config['record'] = args.record
if args.demo:
task_config['demo'] = args.demo
if args.demofiles:
task_config['demofiles'] = args.demofiles
if args.video:
task_config['video'] = args.video
n_seeds = 1
device = config["device"]
base_seed = config["seed"]
base_run_title = config["run-title"]
for seed_idx in range(1, n_seeds + 1):
config["run-title"] = base_run_title + f"_{seed_idx}"
config["seed"] = base_seed * seed_idx
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path):
os.mkdir(exp_path)
out_path = os.path.join(exp_path, os.path.basename(args.config))
with open(out_path, 'w') as fout:
yaml.dump(config, fout)
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']} using {config['optimizer']}")
params = (config["agent"], config["task"]["num-actions"])
if config["mode"] == "densenet-stacked":
agent = DenseNet_StackedLSTM(*params)
elif config["mode"] == "conv-stacked":
agent = A3C_ConvStackedLSTM(*params)
elif config["mode"] == "stacked":
agent = A3C_StackedLSTM(*params)
elif config["mode"] == "conv-vanilla":
agent = A3C_ConvLSTM(*params)
elif config["mode"] == "vanilla":
agent = A3C_LSTM(*params)
else:
raise ValueError(config["mode"])
print(agent)
agent.to(config['device'])
optim_class = T.optim.RMSprop if config["optimizer"] == "rmsprop" else T.optim.AdamW
optimizer = optim_class(agent.parameters(), lr=config["agent"]["lr"])
T.manual_seed(config["seed"])
np.random.seed(config["seed"])
T.random.manual_seed(config["seed"])
update_counter = 0
if config["copy-encoder"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']}.pt"
)
print(f"> Copying Encoder from {filepath}")
pretrained_dict = T.load(filepath, map_location=T.device(config["device"]))["state_dict"]
load_dict = {}
for k, v in pretrained_dict.items():
load_dict[k] = v if "encoder" in k else eval(f"agent.{k}")
agent.load_state_dict(load_dict)
if config["resume"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']:04d}.pt"
)
print(f"> Loading Checkpoint {filepath}")
model_data = T.load(filepath, map_location=T.device(config["device"]))
update_counter = model_data["update_counter"]
agent.load_state_dict(model_data["state_dict"])
if config["freeze-encoder"]:
print("> Freezing Encoder")
for param in agent.encoder.parameters():
param.requires_grad = False
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, 0)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"]))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
episode_reward = 0
total_rewards = []
while True:
if done:
ht, ct = agent.get_init_states(device)
else:
ht, ct = ht.detach(), ct.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, (ht, ct) = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
# if reward == 1:
# env.snapshot()
# exit()
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": agent.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _ = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
# while True:
# if done:
# ht1, ct1 = agent.get_init_states(1, device)
# ht2, ct2 = agent.get_init_states(2, device)
# else:
# ht1, ct1 = ht1.detach(), ct1.detach()
# ht2, ct2 = ht2.detach(), ct2.detach()
# values = []
# log_probs = []
# rewards = []
# entropies = []
# for _ in range(n_step_update):
# logit, value, (ht1, ct1), (ht2, ct2) = agent(
# T.tensor([state]).float().to(device), (
# T.tensor([p_action]).float().to(device),
# T.tensor([[p_reward]]).float().to(device)),
# (ht1, ct1), (ht2, ct2)
# )
# logit = logit.squeeze(0)
# prob = F.softmax(logit, dim=-1)
# log_prob = F.log_softmax(logit, dim=-1)
# entropy = -(log_prob * prob).sum(1, keepdim=True)
# entropies += [entropy]
# action = prob.multinomial(num_samples=1).detach()
# log_prob = log_prob.gather(1, action)
# state, reward, done, _ = env.step(int(action))
# episode_reward += reward
# p_action = np.eye(env.num_actions)[int(action)]
# p_reward = reward
# log_probs += [log_prob]
# values += [value]
# rewards += [reward]
# if done:
# state = env.reset()
# total_rewards += [episode_reward]
# avg_reward_100 = np.array(total_rewards[-100:]).mean()
# writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
# writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
# episode_reward = 0
# if env.episode_num % save_interval == 0:
# T.save({
# "state_dict": agent.state_dict(),
# "avg_reward_100": avg_reward_100,
# "update_counter": update_counter
# }, save_path.format(epi=env.episode_num) + ".pt")
# break
# R = T.zeros(1, 1).to(device)
# if not done:
# _, value, _, _ = agent(
# T.tensor([state]).float().to(device), (
# T.tensor([p_action]).float().to(device),
# T.tensor([[p_reward]]).float().to(device)),
# (ht1, ct1), (ht2, ct2)
# )
# R = value.detach()
# values += [R]
# policy_loss = 0
# value_loss = 0
# gae = T.zeros(1, 1).to(device)
# for i in reversed(range(len(rewards))):
# R = gamma * R + rewards[i]
# advantage = R - values[i]
# value_loss = value_loss + 0.5 * advantage.pow(2)
# # Generalized Advantage Estimation
# delta_t = rewards[i] + gamma * values[i + 1] - values[i]
# gae = gae * gamma * gae_lambda + delta_t
# policy_loss = policy_loss - \
# log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
# loss = policy_loss + val_coeff * value_loss
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# update_counter += 1
# writer.add_scalar("losses/total_loss", loss.item(), update_counter)
| 14,014 | 36.573727 | 114 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/evaluate.py | import torch as T
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
from utils import get_test_loader
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
class CIFAR(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(CIFAR, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def cifar10(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar10'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def cifar100(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=100)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar100'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
if __name__ == "__main__":
device = 'cuda'
model = cifar100(128, pretrained=True).to(device)
model.eval()
test_loader = get_test_loader("./data", num_classes=100, batch_size=32)
total, correct = 0, 0
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = model(images)
_, predicted = T.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = correct / total
print(f"Testing Accuracy: {acc*100:.4f}%")
| 3,243 | 36.287356 | 122 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/utils.py | """
Create train, valid, test iterators for CIFAR-10 [1].
Easily extended to MNIST, CIFAR-100 and Imagenet.
[1]: https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4
"""
import torch
import imageio
import numpy as np
import matplotlib.pyplot as plt
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def pad_image(image):
imsize = 84
full_image = np.zeros((imsize,imsize,3))
image = np.array(image)
rand_x = np.random.randint(imsize-image.shape[0])
rand_y = np.random.randint(imsize-image.shape[1])
full_image[
rand_x:rand_x+image.shape[0],
rand_y:rand_y+image.shape[1], :
] = image
return full_image
def get_train_valid_loader(data_dir,
num_classes,
batch_size,
val_batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
# define transforms
valid_transform = transforms.Compose([
# transforms.Lambda(lambda x: pad_image(x)),
transforms.ToTensor()
])
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.2),
# transforms.Lambda(lambda x: pad_image(x)),
transforms.ToTensor()
])
dataset = datasets.CIFAR10 if num_classes == 10 else datasets.CIFAR100
# load the dataset
train_dataset = dataset(
root=data_dir,
train=True,
download=True,
transform=train_transform,
)
valid_dataset = dataset(
root=data_dir,
train=True,
download=True,
transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=val_batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
return (train_loader, valid_loader)
def get_test_loader(data_dir,
num_classes,
batch_size,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
# define transform
transform = transforms.Compose([
# transforms.Lambda(lambda x: pad_image(x)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = datasets.CIFAR10 if num_classes == 10 else datasets.CIFAR100
dataset = dataset(
root=data_dir,
train=False,
download=True,
transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
)
return data_loader
if __name__ == "__main__":
loader = get_test_loader("./data", num_classes=100, batch_size=1)
image, _ = next(iter(loader))
transformed_image = pad_image(image[0])
plt.imshow(np.moveaxis(transformed_image, 0, -1))
plt.show() | 5,546 | 29.646409 | 85 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/train.py | import os
import yaml
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
from copy import deepcopy
from torch.utils.tensorboard import SummaryWriter
from utils import get_train_valid_loader, get_test_loader
class ConvNet(nn.Module):
def __init__(self, num_classes):
super(ConvNet, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(8, 8), stride=(4, 4)), # output: (16, 20, 20)
nn.Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2)), # output: (32, 9, 9)
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, num_classes),
nn.Softmax(dim=-1)
)
def forward(self, image):
return self.classifier(self.encoder(image))
class Trainer:
def __init__(self, config):
self.device = config["device"]
self.model = ConvNet(num_classes=config["num-classes"])
self.model.to(self.device)
if config["resume"]:
print("> Loading Checkpoint")
self.model.load_state_dict(T.load(config["load-path"]))
self.train_loader, self.val_loader = get_train_valid_loader(
config["data-path"],
config["num-classes"],
config["batch-size"],
config["val-batch-size"],
config["augment"],
config["seed"],
config["valid-size"],
config["shuffle"],
config["num-workers"]
)
self.test_loader = get_test_loader(
config["data-path"],
config["num-classes"],
config["batch-size"],
config["shuffle"],
config["num-workers"],
config["pin-memory"]
)
self.criterion = nn.CrossEntropyLoss()
self.optim = T.optim.AdamW(self.model.parameters(), lr=config["lr-init"], weight_decay=config["weight-decay"])
self.writer = SummaryWriter(log_dir=os.path.join("logs", config["run-title"]))
self.reduce_lr = T.optim.lr_scheduler.ReduceLROnPlateau(self.optim, factor=config["lr-factor"], patience=config["lr-patience"], min_lr=config["lr-min"])
self.stopping_patience = config["stopping-patience"]
self.stopping_delta = config["stopping-delta"]
self.filepath = os.path.join(config["save-path"], config["run-title"], config["run-title"]+".pt")
def train_epoch(self, epoch, pbar):
self.model.train()
train_loss = np.zeros(len(self.train_loader))
for i, (inputs, labels) in enumerate(self.train_loader):
inputs = inputs.float().to(self.device)
labels = labels.to(self.device)
self.optim.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optim.step()
train_loss[i] = loss.item()
pbar.set_description(f"Epoch {epoch} | Loss: {train_loss[:i].sum()/(i+1):.4f} | ({i}/{len(self.train_loader)})")
val_loss = self.validate_epoch()
return train_loss.mean(), val_loss
def validate_epoch(self):
self.model.eval()
val_loss = np.zeros(len(self.val_loader))
for i, (inputs, labels) in enumerate(self.val_loader):
inputs = inputs.float().to(self.device)
labels = labels.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
val_loss[i] = loss.item()
return val_loss.mean()
def evaluate(self, load_path):
total, correct = 0, 0
self.model.load_state_dict(T.load(load_path, map_location=T.device(self.device)))
self.model.eval()
for data in self.test_loader:
images, labels = data[0].to(self.device), data[1].to(self.device)
outputs = self.model(images)
_, predicted = T.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
def train(self, epochs):
stopping_counter = 0
best_val_loss = np.inf
progress = tqdm(range(epochs))
for epoch in progress:
########## Training ##########
train_loss, val_loss = self.train_epoch(epoch, progress)
self.writer.add_scalar("loss/train", train_loss, epoch)
self.writer.add_scalar("loss/val", val_loss, epoch)
progress.write(f"Epoch {epoch}/{epochs}\t| Train Loss {train_loss:.5f} | Val Loss {val_loss:.5f}")
if val_loss < best_val_loss and abs(val_loss-best_val_loss) > self.stopping_delta :
stopping_counter = 0
best_val_loss = val_loss
T.save(self.model.state_dict(), self.filepath)
else:
stopping_counter += 1
if stopping_counter > self.stopping_patience:
break
self.writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str, default="config.yaml", help='path of config file')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path): os.mkdir(exp_path)
trainer = Trainer(config)
if config["train"]:
print("> Training")
trainer.train(config["epochs"])
if config["test"]:
print("> Testing")
acc = trainer.evaluate(config["load-path"])
print(f"Testing Accuracy: {acc*100:.4f}%")
| 5,955 | 32.088889 | 160 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/common/shared_optim.py | import math
import torch as T
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states.
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = T.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step'].item()
bias_correction2 = 1 - beta2 ** state['step'].item()
step_size = group['lr'] * math.sqrt(
bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
# Non-centered RMSprop update with shared statistics (without momentum)
class SharedRMSprop(optim.RMSprop):
"""Implements RMSprop algorithm with shared states.
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0):
super(SharedRMSprop, self).__init__(params, lr=lr, alpha=alpha, eps=eps, weight_decay=weight_decay, momentum=0, centered=False)
# State initialisation (must be done before step, else will not be shared between threads)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = p.data.new().resize_(1).zero_()
state['square_avg'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['square_avg'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# g = αg + (1 - α)Δθ^2
# square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha)
# θ ← θ - ηΔθ/√(g + ε)
avg = square_avg.sqrt().add_(group['eps'])
# p.data.addcdiv_(-group['lr'], grad, avg)
p.data.addcdiv_(grad, avg, value=-group['lr'])
return loss | 4,680 | 35.286822 | 135 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_PsychLab/train.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
import deepmind_lab as lab
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
from models.resnet_lstm import ResNet_LSTM
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train(config,
shared_model,
optimizer,
rank,
task_config,
counter,
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, rank)
if config["mode"] == "resnet":
agent = ResNet_LSTM(config["agent"], env.num_actions)
elif config["mode"] == "conv-vanilla":
agent = A3C_ConvLSTM(config["agent"], env.num_actions)
elif config["mode"] == "vanilla":
agent = A3C_LSTM(config["agent"], env.num_actions)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = counter
total_rewards = []
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
rnn_state = agent.get_init_states(device)
else:
if config["agent"]["cell-type"] == "lstm":
rnn_state = rnn_state[0].detach(), rnn_state[1].detach()
else:
rnn_state = rnn_state.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, rnn_state = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
# if done:
# env.save_frames(os.path.join(config["save-path"], "frames.gif"))
# exit()
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _ = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
def train_stacked(config,
shared_model,
optimizer,
rank,
task_config,
counter,
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, rank)
if config["mode"] == "conv-stacked":
agent = A3C_ConvStackedLSTM(config["agent"], env.num_actions)
elif config["mode"] == "stacked":
agent = A3C_StackedLSTM(config["agent"], env.num_actions)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
ht1, ct1 = agent.get_init_states(1, device)
ht2, ct2 = agent.get_init_states(2, device)
else:
ht1, ct1 = ht1.detach(), ct1.detach()
ht2, ct2 = ht2.detach(), ct2.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, (ht1, ct1), (ht2, ct2) = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht1, ct1), (ht2, ct2)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _, _ = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht1, ct1), (ht2, ct2)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter) | 10,760 | 31.315315 | 104 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/dnd.py | import torch as T
import torch.nn.functional as F
# constants
ALL_KERNELS = ['cosine', 'l1', 'l2']
ALL_POLICIES = ['1NN']
class DND:
"""The differentiable neural dictionary (DND) class. This enables episodic
recall in a neural network.
notes:
- a memory is a row vector
Parameters
----------
dict_len : int
the maximial len of the dictionary
memory_dim : int
the dim or len of memory i, we assume memory_i is a row vector
kernel : str
the metric for memory search
Attributes
----------
encoding_off : bool
if True, stop forming memories
retrieval_off : type
if True, stop retrieving memories
reset_memory : func;
if called, clear the dictionary
check_config : func
check the class config
"""
def __init__(self, dict_len, key_dim, memory_dim, kernel='l2'):
# params
self.dict_len = dict_len
self.kernel = kernel
self.key_dim = key_dim
self.memory_dim = memory_dim
# dynamic state
self.encoding_off = False
self.retrieval_off = False
# allocate space for memories
self.reset_memory()
# check everything
self.check_config()
def reset_memory(self):
self.pointer = 0
self.overflow = False
self.keys = T.empty(self.dict_len, self.key_dim)
self.vals = T.empty(self.dict_len, self.memory_dim)
def check_config(self):
assert self.dict_len > 0
assert self.kernel in ALL_KERNELS
def inject_memories(self, input_keys, input_vals):
"""Inject pre-defined keys and values
Parameters
----------
input_keys : list
a list of memory keys
input_vals : list
a list of memory content
"""
assert len(input_keys) == len(input_vals)
for k, v in zip(input_keys, input_vals):
self.save_memory(k, v)
def save_memory(self,
memory_key,
memory_val,
replace_similar=False,
threshold=0
):
"""Save an episodic memory to the dictionary
Parameters
----------
memory_key : a row vector
a DND key, used to for memory search
memory_val : a row vector
a DND value, representing the memory content
"""
if self.encoding_off:
return
# add new memory to the the dictionary
# get data is necessary for gradient reason
replaced = False
if replace_similar and (self.pointer > 0 or self.overflow):
similarities = compute_similarities(memory_key, self.keys[:self.pointer], self.kernel)
closest_idx = T.argmax(similarities)
if similarities[closest_idx] > threshold:
self.keys[closest_idx] = T.squeeze(memory_key.data)
self.vals[closest_idx] = T.squeeze(memory_val.data)
replaced = True
if not replace_similar or not replaced:
self.keys[self.pointer] = T.squeeze(memory_key.data)
self.vals[self.pointer] = T.squeeze(memory_val.data)
self.pointer += 1
if self.pointer >= self.dict_len:
self.pointer = 0
self.overflow = True
def get_memory(self, query_key, threshold=-1):
"""Perform a 1-NN search over dnd
Parameters
----------
query_key : a row vector
a DND key, used to for memory search
Returns
-------
a row vector
a DND value, representing the memory content
"""
# if no memory, return the zero vector
if (self.pointer == 0 and not self.overflow) or self.retrieval_off:
return _empty_memory(self.memory_dim)
# compute similarity(query, memory_i ), for all i
similarities = compute_similarities(query_key, self.keys[:self.pointer], self.kernel)
# get the best-match memory
best_memory_val = self._get_memory(similarities, threshold)
return best_memory_val
def _get_memory(self, similarities, threshold, policy='1NN'):
"""get the episodic memory according to some policy
e.g. if the policy is 1nn, return the best matching memory
e.g. the policy can be based on the rational model
Parameters
----------
similarities : a vector of len #memories
the similarity between query vs. key_i, for all i
policy : str
the retrieval policy
Returns
-------
a row vector
a DND value, representing the memory content
"""
best_memory_val = None
if policy is '1NN':
best_memory_id = T.argmax(similarities)
if threshold <= 0 or similarities[best_memory_id] > threshold:
best_memory_val = self.vals[best_memory_id].unsqueeze(0)
else:
best_memory_val = _empty_memory(self.memory_dim)
else:
raise ValueError(f'unrecog recall policy: {policy}')
return best_memory_val
"""helpers"""
def compute_similarities(query_key, key_list, metric):
"""Compute the similarity between query vs. key_i for all i
i.e. compute q M, w/ q: 1 x key_dim, M: key_dim x #keys
Parameters
----------
query_key : a vector
Description of parameter `query_key`.
key_list : list
Description of parameter `key_list`.
metric : str
Description of parameter `metric`.
Returns
-------
a row vector w/ len #memories
the similarity between query vs. key_i, for all i
"""
# reshape query to 1 x key_dim
q = query_key.data.view(1, -1)
# compute similarities
if metric == 'cosine':
similarities = F.cosine_similarity(q.float(), key_list.float())
elif metric == 'l1':
similarities = - F.pairwise_distance(q, key_list, p=1)
elif metric == 'l2':
similarities = - F.pairwise_distance(q, key_list, p=2)
else:
raise ValueError(f'unrecog metric: {metric}')
return similarities
def _empty_memory(memory_dim):
"""Get a empty memory, assuming the memory is a row vector
"""
return T.zeros(1, memory_dim)
| 6,304 | 29.756098 | 98 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/ep_lstm.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch.nn import functional as F
from torch import Tensor
from models.ep_lstm_cell import EpLSTMCell
@dataclass
class EpLSTMCell_Builder:
hidden_size : int
vertical_dropout : float = 0.0
recurrent_dropout : float = 0.0
recurrent_dropout_mode : str = 'gal_tied'
input_kernel_initialization : str = 'xavier_uniform'
recurrent_activation : str = 'sigmoid'
tied_forget_gate : bool = False
def make(self, input_size: int):
return EpLSTMCell(input_size, self)
def make_scripted(self, *p, **ks):
return T.jit.script(self.make(*p, **ks))
class EpLSTM_Layer(nn.Module):
def reorder_inputs(self, inputs: Union[List[T.Tensor], T.Tensor]):
#^ inputs : [t b i]
if self.direction == 'backward':
return inputs[::-1]
return inputs
def __init__(
self,
cell: EpLSTMCell,
direction='forward',
batch_first=False,
):
super().__init__()
if isinstance(batch_first, bool):
batch_first = (batch_first, batch_first)
self.batch_first = batch_first
self.direction = direction
self.cell_: EpLSTMCell = cell
@T.jit.ignore
def forward(self, inputs, state_t0):
x_t, m_t = inputs
if self.batch_first[0]:
#^ x_t : [b t i]
x_t = x_t.transpose(1, 0)
#^ x_t : [t b i]
# x_t = x_t.unbind(0)
if state_t0 is None:
state_t0 = self.cell_.get_init_state(x_t)
x_t = self.reorder_inputs(x_t)
sequence, state = self.cell_.loop(x_t, m_t, state_t0)
#^ sequence : t * [b h]
sequence = self.reorder_inputs(sequence)
sequence = T.stack(sequence)
#^ sequence : [t b h]
if self.batch_first[1]:
sequence = sequence.transpose(1, 0)
#^ sequence : [b t h]
return sequence, state
class EpLSTM(nn.Module):
def __init__(
self,
input_size : int,
num_layers : int,
batch_first : bool = False,
scripted : bool = True,
*args, **kargs,
):
super().__init__()
self._cell_builder = EpLSTMCell_Builder(*args, **kargs)
Dh = self._cell_builder.hidden_size
def make(isize: int):
# cell = self._cell_builder.make_scripted(isize)
cell = self._cell_builder.make(isize)
return EpLSTM_Layer(cell, isize, batch_first=batch_first)
rnns = [
make(input_size),
*[
make(Dh)
for _ in range(num_layers - 1)
],
]
self.rnn = nn.Sequential(*rnns)
self.input_size = input_size
self.hidden_size = self._cell_builder.hidden_size
self.num_layers = num_layers
def __repr__(self):
return (
f'${self.__class__.__name__}'
+ '('
+ f'in={self.input_size}, '
+ f'hid={self.hidden_size}, '
+ f'layers={self.num_layers}, '
+ f'bi={self.bidirectional}'
+ '; '
+ str(self._cell_builder)
)
def forward(self, inputs, state_t0=None):
for rnn in self.rnn:
inputs, state = rnn(inputs, state_t0)
return inputs, state
def reset_parameters(self):
for rnn in self.rnn:
rnn.cell_.reset_parameters_() | 3,780 | 26.398551 | 70 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.