repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GTA-RL | GTA-RL-master/test.py | import os
import numpy as np
import torch
import time
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from utils import load_model
from problems.tsp.tsp_gurobi import *
from problems.tsp.tsp_aco import *
# Put in evaluation mode to not track gradients
def get(num_nodes, threshold):
stack = []
init = np.random.uniform(0, 1, (num_nodes, 2))
for i in range(num_nodes):
stack.append(init)
init = np.clip(init + np.random.uniform(-threshold, threshold, (num_nodes, 2)), 0, 1)
np_stack = np.array(stack)
return np_stack
def make_oracle(model, xy, temperature=1.0):
num_nodes = len(xy)
if len(xy.shape) == 3:
xyt = torch.tensor(xy[0]).float()[None] # Add batch dimension
else:
xyt = torch.tensor(xy).float()[None]
with torch.no_grad(): # Inference only
embeddings, _ = model.embedder(model._init_embed(xyt))
# Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
fixed = model._precompute(embeddings)
def oracle(tour):
with torch.no_grad(): # Inference only
# Input tour with 0 based indices
# Output vector with probabilities for locations not in tour
tour = torch.tensor(tour).long()
if len(tour) == 0:
step_context = model.W_placeholder
else:
step_context = torch.cat((embeddings[0, tour[0]], embeddings[0, tour[-1]]), -1)
# Compute query = context node embedding, add batch and step dimensions (both 1)
query = fixed.context_node_projected + model.project_step_context(step_context[None, None, :])
# Create the mask and convert to bool depending on PyTorch version
mask = torch.zeros(num_nodes, dtype=torch.uint8) > 0
mask[tour] = 1
mask = mask[None, None, :] # Add batch and step dimension
log_p, _ = model._one_to_many_logits(query, fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key, mask)
p = torch.softmax(log_p / temperature, -1)[0, 0]
assert (p[tour] == 0).all()
assert (p.sum() - 1).abs() < 1e-5
# assert np.allclose(p.sum().item(), 1)
return p.numpy()
return oracle
def make_dynamic_oracle(model, xy, temperature=1.0):
num_nodes = len(xy)
xyt = torch.tensor(xy).float()[None] # Add batch dimension
with torch.no_grad(): # Inference only
embeddings_all = model._init_embed(xyt)
embeddings_all, _ = model.embedder(embeddings_all)
# Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
def oracle(tour):
with torch.no_grad(): # Inference only
# Input tour with 0 based indices
# Output vector with probabilities for locations not in tour
embeddings = embeddings_all[:, len(tour), :, :]
fixed = model._precompute(embeddings)
tour = torch.tensor(tour).long()
if len(tour) == 0:
step_context = model.W_placeholder
else:
step_context = torch.cat((embeddings[0, tour[0]], embeddings[0, tour[-1]]), -1)
# Compute query = context node embedding, add batch and step dimensions (both 1)
query = fixed.context_node_projected + model.project_step_context(step_context[None, None, :])
# Create the mask and convert to bool depending on PyTorch version
mask = torch.zeros(num_nodes, dtype=torch.uint8) > 0
mask[tour] = 1
mask = mask[None, None, :] # Add batch and step dimension
log_p, _ = model._one_to_many_logits(query, fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key, mask)
p = torch.softmax(log_p / temperature, -1)[0, 0]
assert (p[tour] == 0).all()
assert (p.sum() - 1).abs() < 1e-5
# assert np.allclose(p.sum().item(), 1)
return p.numpy()
return oracle
def find_tour(xy, model, test_dynamic):
model.eval()
oracle = make_dynamic_oracle(model, xy) if test_dynamic else make_oracle(model, xy)
sample = False
tour = []
tour_p = []
while (len(tour) < len(xy)):
p = oracle(tour)
if sample:
# Advertising the Gumbel-Max trick
g = -np.log(-np.log(np.random.rand(*p.shape)))
i = np.argmax(np.log(p) + g)
# i = np.random.multinomial(1, p)
else:
# Greedy
i = np.argmax(p)
tour.append(i)
tour_p.append(p)
return tour_p, tour
# Code inspired by Google OR Tools plot:
# https://github.com/google/or-tools/blob/fb12c5ded7423d524fc6c95656a9bdc290a81d4d/examples/python/cvrptw_plot.py
def plot_tsp(xy, tour, ax1, total_xy=None, title=""):
"""
Plot the TSP tour on matplotlib axis ax1.
"""
ax1.set_xlim(0, 1)
ax1.set_ylim(0, 1)
xs, ys = xy[tour].transpose()
xs, ys = xy[tour].transpose()
dx = np.roll(xs, -1) - xs
dy = np.roll(ys, -1) - ys
d = np.sqrt(dx * dx + dy * dy)
lengths = d.cumsum()
# Scatter nodes
# Starting node
ax1.scatter([xs[0]], [ys[0]], s=100, color='red')
if total_xy is not None:
time, nodes, coords = total_xy.shape
flatten_xy = total_xy.reshape((time*nodes, coords))
colors = cm.rainbow(np.linspace(0, 1, nodes))
for i in range(nodes):
ax1.scatter(total_xy[:, i, 0], total_xy[:, i, 1], s=20, color=colors[i])
ax1.scatter(xy[i][0], xy[i][1], s=100, color=colors[i])
else:
ax1.scatter(xs, ys, s=100, color='blue')
# Arcs
qv = ax1.quiver(
xs, ys, dx, dy,
scale_units='xy',
angles='xy',
scale=1,
)
ax1.set_title('Algorithm {}, {} nodes, total length {:.2f}'.format(title, len(tour), lengths[-1]))
def plot_tsp_with_data(xy, tour, title=""):
fig, ax = plt.subplots(figsize=(10, 10))
if len(xy.shape) == 3:
ordered = np.array([np.arange(len(tour)), tour]).T
ordered = ordered[ordered[:, 1].argsort()]
coords = xy[ordered[:, 0], ordered[:, 1]]
plot_tsp(coords, tour, ax, xy, title)
else:
plot_tsp(xy, tour, ax)
plt.show()
def run_test(opts):
if opts.dynamic:
xy = get(opts.graph_size, opts.intensity)
else:
xy = np.random.uniform(0, 1, (opts.graph_size, 2))
model, _ = load_model(path=opts.load_path)
tour_p, tour = find_tour(xy, model, not opts.baseline)
### Plotting
fig, ax = plt.subplots(figsize=(10, 10))
if len(xy.shape) == 3:
ordered = np.array([np.arange(len(tour)), tour]).T
ordered = ordered[ordered[:, 1].argsort()]
coords = xy[ordered[:, 0], ordered[:, 1]]
plot_tsp(coords, tour, ax, xy, "RL: ")
else:
plot_tsp(xy, tour, ax)
print("RL: ", tour)
if opts.use_gurobi:
fig1, ax1 = plt.subplots(figsize=(10, 10))
start_time = time.time()
tour_length, tour_gb = solve_dynamic_euclidian_tsp(xy)
print("Gurobi Time: ", time.time() - start_time)
if len(xy.shape) == 3:
ordered = np.array([np.arange(len(tour_gb)), tour_gb]).T
ordered = ordered[ordered[:, 1].argsort()]
coords = xy[ordered[:, 0], ordered[:, 1]]
plot_tsp(coords, tour_gb, ax1, xy, "LP: ")
else:
plot_tsp(xy, tour_gb, ax1)
print("LP: ", tour_gb)
plt.show()
np.random.seed(1234)
def test_gurobi(filename, dynamic, time=60):
data = load_from_path(filename)
results = solve_all_gurobi(data, dynamic, time)
lengths = np.array(results)[:, 0]
print("Results: ", np.mean(lengths))
return results
def test_aco(filename):
data = load_from_path(filename)
results = solve_all_aco(data)
lengths = np.array(results)[:, 0]
print("Results: ", np.mean(lengths))
return results
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dynamic", action='store_true', help="Solve the Dynamic TSP")
parser.add_argument('--graph_size', type=int, default=20, help="The size of the problem graph")
parser.add_argument('--intensity', type=int, default=0.1, help="How much dynamic nodes should change over time")
parser.add_argument("--baseline", help="Name of the method to evaluate, 'aco', 'gurobi'")
parser.add_argument("--static", action='store_true', help="Use static baseline")
parser.add_argument('--load_path', help='Path to load model parameters and optimizer state from')
parser.add_argument('--load_data', help='Path to load dataset')
parser.add_argument('--gurobi_time', type=int, default=30, help="Time limit for Gurobi Solver")
parser.add_argument('--problem', type=str, default=20, help="Problem to solve")
opts = parser.parse_args(args=None)
opts.baseline = 'aco'
opts.dynamic = True
opts.gurobi_time = 20
if not opts.static:
opts.load_path = 'outputs/order/dynamic_tsp_20/run_4'
else:
opts.load_path = 'pretrained/tsp_20/'
if opts.dynamic:
opts.load_data = 'data/dynamic_tsp/dynamic_tsp20_validation_seed4321.pkl'
else:
opts.load_data = 'data/tsp/tsp20_test_seed1234.pkl'
if opts.baseline == 'gurobi':
test_gurobi(opts.load_data, opts.dynamic, opts.gurobi_time)
elif opts.baseline == 'aco':
test_aco(opts.load_data)
else:
run_test(opts)
# %% | 9,668 | 32.341379 | 116 | py |
GTA-RL | GTA-RL-master/run.py | #!/usr/bin/env python
import os
import json
import pprint as pp
import torch
import torch.optim as optim
from tensorboard_logger import Logger as TbLogger
from nets.critic_network import CriticNetwork
from options import get_options
from train import train_epoch, validate, get_inner_model
from reinforce_baselines import NoBaseline, ExponentialBaseline, CriticBaseline, RolloutBaseline, WarmupBaseline
from nets.attention_model import AttentionModel
from nets.st_attention_model import StAttentionModel
from nets.pointer_network import PointerNetwork, CriticNetworkLSTM
from utils import torch_load_cpu, load_problem
def run(opts):
# Pretty print the run args
pp.pprint(vars(opts))
# Set the random seed
torch.manual_seed(opts.seed)
# Optionally configure tensorboard
tb_logger = None
if not opts.no_tensorboard:
tb_logger = TbLogger(os.path.join(opts.log_dir, "{}_{}".format(opts.problem, opts.graph_size), opts.run_name))
os.makedirs(opts.save_dir)
# Save arguments so exact configuration can always be found
with open(os.path.join(opts.save_dir, "args.json"), 'w') as f:
json.dump(vars(opts), f, indent=True)
# Set the device
opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
# Figure out what's the problem
problem = load_problem(opts.problem)
# Load data from load_path
load_data = {}
assert opts.load_path is None or opts.resume is None, "Only one of load path and resume can be given"
load_path = opts.load_path if opts.load_path is not None else opts.resume
if load_path is not None:
print(' [*] Loading data from {}'.format(load_path))
load_data = torch_load_cpu(load_path)
# Initialize model
model_class = {
'attention': AttentionModel,
'pointer': PointerNetwork,
'st_attention': StAttentionModel
}.get(opts.model, None)
assert model_class is not None, "Unknown model: {}".format(model_class)
model = model_class(
opts.embedding_dim,
opts.hidden_dim,
problem,
n_encode_layers=opts.n_encode_layers,
mask_inner=True,
mask_logits=True,
normalization=opts.normalization,
tanh_clipping=opts.tanh_clipping,
checkpoint_encoder=opts.checkpoint_encoder,
shrink_size=opts.shrink_size,
use_single_time=opts.use_single_time
).to(opts.device)
if opts.use_cuda and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# Overwrite model parameters by parameters to load
model_ = get_inner_model(model)
model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
dynamic = False
if opts.model == 'st_attention':
dynamic = True
# Initialize baseline
if opts.baseline == 'exponential':
baseline = ExponentialBaseline(opts.exp_beta)
elif opts.baseline == 'critic' or opts.baseline == 'critic_lstm':
assert problem.NAME == 'tsp', "Critic only supported for TSP"
baseline = CriticBaseline(
(
CriticNetworkLSTM(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.tanh_clipping
)
if opts.baseline == 'critic_lstm'
else
CriticNetwork(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.normalization,
dynamic
)
).to(opts.device)
)
elif opts.baseline == 'rollout':
baseline = RolloutBaseline(model, problem, opts)
else:
assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
baseline = NoBaseline()
if opts.bl_warmup_epochs > 0:
baseline = WarmupBaseline(baseline, opts.bl_warmup_epochs, warmup_exp_beta=opts.exp_beta)
# Load baseline from data, make sure script is called with same type of baseline
if 'baseline' in load_data:
baseline.load_state_dict(load_data['baseline'])
# Initialize optimizer
optimizer = optim.Adam(
[{'params': model.parameters(), 'lr': opts.lr_model}]
+ (
[{'params': baseline.get_learnable_parameters(), 'lr': opts.lr_critic}]
if len(baseline.get_learnable_parameters()) > 0
else []
)
)
# Load optimizer state
if 'optimizer' in load_data:
optimizer.load_state_dict(load_data['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
# if isinstance(v, torch.Tensor):
if torch.is_tensor(v):
state[k] = v.to(opts.device)
# Initialize learning rate scheduler, decay by lr_decay once per epoch!
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: opts.lr_decay ** epoch)
#optimizer.param_groups[0]['lr'] = opts.lr_model
# Start the actual training loop
val_dataset = problem.make_dataset(
size=opts.graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution)
if opts.resume:
epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split("-")[1])
torch.set_rng_state(load_data['rng_state'])
if opts.use_cuda:
torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
# Set the random states
# Dumping of state was done before epoch callback, so do that now (model is loaded)
baseline.epoch_callback(model, epoch_resume)
print("Resuming after {}".format(epoch_resume))
opts.epoch_start = epoch_resume + 1
if opts.eval_only:
validate(model, val_dataset, opts)
else:
for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):
train_epoch(
model,
optimizer,
baseline,
lr_scheduler,
epoch,
val_dataset,
problem,
tb_logger,
opts
)
if __name__ == "__main__":
run(get_options())
| 6,307 | 33.850829 | 120 | py |
GTA-RL | GTA-RL-master/options.py | import os
import time
import argparse
import torch
from utils.paths import find_next_path_id, createNextFileName
def get_options(args=None):
parser = argparse.ArgumentParser(
description="Attention based model for solving the Travelling Salesman Problem with Reinforcement Learning")
# Data
parser.add_argument('--problem', default='dynamic_tsp', help="The problem to solve, default 'tsp'")
parser.add_argument('--graph_size', type=int, default=100, help="The size of the problem graph")
parser.add_argument('--batch_size', type=int, default=8, help='Number of instances per batch during training')
parser.add_argument('--epoch_size', type=int, default=12800, help='Number of instances per epoch during training')
parser.add_argument('--val_size', type=int, default=100,
help='Number of instances used for reporting validation performance')
parser.add_argument('--val_dataset', type=str, default=None, help='Dataset file to use for validation')
parser.add_argument('--use_single_time', action='store_true', help='Use a single time step data rather than the entire time')
# Model
parser.add_argument('--model', default='st_attention', help="Model, 'attention' (default) or 'pointer'")
parser.add_argument('--embedding_dim', type=int, default=128, help='Dimension of input embedding')
parser.add_argument('--hidden_dim', type=int, default=128, help='Dimension of hidden layers in Enc/Dec')
parser.add_argument('--n_encode_layers', type=int, default=3,
help='Number of layers in the encoder/critic network')
parser.add_argument('--tanh_clipping', type=float, default=10.,
help='Clip the parameters to within +- this value using tanh. '
'Set to 0 to not perform any clipping.')
parser.add_argument('--normalization', default='batch', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--temporal_decoder', action='store_true', default=True, help="Use temporal decoder")
# Training
parser.add_argument('--lr_model', type=float, default=1e-4, help="Set the learning rate for the actor network")
parser.add_argument('--lr_critic', type=float, default=1e-4, help="Set the learning rate for the critic network")
parser.add_argument('--lr_decay', type=float, default=1., help='Learning rate decay per epoch')
parser.add_argument('--eval_only', action='store_true', help='Set this value to only evaluate model')
parser.add_argument('--n_epochs', type=int, default=50, help='The number of epochs to train')
parser.add_argument('--seed', type=int, default=1234, help='Random seed to use')
parser.add_argument('--max_grad_norm', type=float, default=1.0,
help='Maximum L2 norm for gradient clipping, default 1.0 (0 to disable clipping)')
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--exp_beta', type=float, default=0.8,
help='Exponential moving average baseline decay (default 0.8)')
parser.add_argument('--baseline', default=None,
help="Baseline to use: 'rollout', 'critic' or 'exponential'. Defaults to no baseline.")
parser.add_argument('--bl_alpha', type=float, default=0.05,
help='Significance in the t-test for updating rollout baseline')
parser.add_argument('--bl_warmup_epochs', type=int, default=None,
help='Number of epochs to warmup the baseline, default None means 1 for rollout (exponential '
'used for warmup phase), 0 otherwise. Can only be used with rollout baseline.')
parser.add_argument('--eval_batch_size', type=int, default=64,
help="Batch size to use during (baseline) evaluation")
parser.add_argument('--checkpoint_encoder', action='store_true',
help='Set to decrease memory usage by checkpointing encoder')
parser.add_argument('--shrink_size', type=int, default=None,
help='Shrink the batch size if at least this many instances in the batch are finished'
' to save memory (default None means no shrinking)')
parser.add_argument('--data_distribution', type=str, default=None,
help='Data distribution to use during training, defaults and options depend on problem.')
# Misc
parser.add_argument('--log_step', type=int, default=50, help='Log info every log_step steps')
parser.add_argument('--log_dir', default='logs/icde', help='Directory to write TensorBoard information to')
parser.add_argument('--run_name', default='run', help='Name to identify the run')
parser.add_argument('--output_dir', default='outputs/icde', help='Directory to write output models to')
parser.add_argument('--epoch_start', type=int, default=0,
help='Start at epoch # (relevant for learning rate decay)')
parser.add_argument('--checkpoint_epochs', type=int, default=1,
help='Save checkpoint every n epochs (default 1), 0 to save no checkpoints')
parser.add_argument('--load_path', help='Path to load model parameters and optimizer state from')
parser.add_argument('--resume', help='Resume from previous checkpoint file')
parser.add_argument('--no_tensorboard', action='store_true', help='Disable logging TensorBoard files')
parser.add_argument('--no_progress_bar', action='store_true', help='Disable progress bar')
opts = parser.parse_args(args)
opts.val_dataset = "./data/dynamic_tsp/dynamic_tsp50_validation_seed4321.pkl"
#opts.resume = "outputs/icde/dynamic_cvrp_20/run_7/epoch-49.pt"
opts.use_single_time = True
opts.use_cuda = torch.cuda.is_available() and not opts.no_cuda
dir, next_id = createNextFileName(os.path.join(opts.output_dir,"{}_{}".format(opts.problem, opts.graph_size)), opts.run_name)
opts.run_name = "{}_{}".format(opts.run_name, next_id)
opts.save_dir = os.path.join(
opts.output_dir,
"{}_{}".format(opts.problem, opts.graph_size),
opts.run_name
)
opts.baseline = 'rollout'
if opts.bl_warmup_epochs is None:
opts.bl_warmup_epochs = 1 if opts.baseline == 'rollout' else 0
assert (opts.bl_warmup_epochs == 0) or (opts.baseline == 'rollout')
assert opts.epoch_size % opts.batch_size == 0, "Epoch size must be integer multiple of batch size!"
return opts | 6,545 | 66.484536 | 129 | py |
GTA-RL | GTA-RL-master/eval.py | import math
import torch
import os
import argparse
import numpy as np
import itertools
from tqdm import tqdm
from utils import load_model, move_to
from utils.data_utils import save_dataset
from torch.utils.data import DataLoader
import time
from datetime import timedelta
from utils.functions import parse_softmax_temperature
mp = torch.multiprocessing.get_context('spawn')
def get_best(sequences, cost, ids=None, batch_size=None):
"""
Ids contains [0, 0, 0, 1, 1, 2, ..., n, n, n] if 3 solutions found for 0th instance, 2 for 1st, etc
:param sequences:
:param lengths:
:param ids:
:return: list with n sequences and list with n lengths of solutions
"""
if ids is None:
idx = cost.argmin()
return sequences[idx:idx+1, ...], cost[idx:idx+1, ...]
splits = np.hstack([0, np.where(ids[:-1] != ids[1:])[0] + 1])
mincosts = np.minimum.reduceat(cost, splits)
group_lengths = np.diff(np.hstack([splits, len(ids)]))
all_argmin = np.flatnonzero(np.repeat(mincosts, group_lengths) == cost)
result = np.full(len(group_lengths) if batch_size is None else batch_size, -1, dtype=int)
result[ids[all_argmin[::-1]]] = all_argmin[::-1]
return [sequences[i] if i >= 0 else None for i in result], [cost[i] if i >= 0 else math.inf for i in result]
def eval_dataset_mp(args):
(dataset_path, width, softmax_temp, opts, i, num_processes) = args
model, _ = load_model(opts.model)
val_size = opts.val_size // num_processes
dataset = model.problem.make_dataset(filename=dataset_path, num_samples=val_size, offset=opts.offset + val_size * i)
device = torch.device("cuda:{}".format(i))
return _eval_dataset(model, dataset, width, softmax_temp, opts, device)
def eval_dataset(dataset_path, width, softmax_temp, opts):
# Even with multiprocessing, we load the model here since it contains the name where to write results
model, _ = load_model(opts.model)
use_cuda = torch.cuda.is_available() and not opts.no_cuda
if opts.multiprocessing:
assert use_cuda, "Can only do multiprocessing with cuda"
num_processes = torch.cuda.device_count()
assert opts.val_size % num_processes == 0
with mp.Pool(num_processes) as pool:
results = list(itertools.chain.from_iterable(pool.map(
eval_dataset_mp,
[(dataset_path, width, softmax_temp, opts, i, num_processes) for i in range(num_processes)]
)))
else:
device = torch.device("cuda:0" if use_cuda else "cpu")
dataset = model.problem.make_dataset(filename=dataset_path, num_samples=opts.val_size, offset=opts.offset)
results = _eval_dataset(model, dataset, width, softmax_temp, opts, device)
# This is parallelism, even if we use multiprocessing (we report as if we did not use multiprocessing, e.g. 1 GPU)
parallelism = opts.eval_batch_size
costs, tours, durations = zip(*results) # Not really costs since they should be negative
print("Average cost: {} +- {}".format(np.mean(costs), 2 * np.std(costs) / np.sqrt(len(costs))))
print("Average serial duration: {} +- {}".format(
np.mean(durations), 2 * np.std(durations) / np.sqrt(len(durations))))
print("Average parallel duration: {}".format(np.mean(durations) / parallelism))
print("Calculated total duration: {}".format(timedelta(seconds=int(np.sum(durations) / parallelism))))
dataset_basename, ext = os.path.splitext(os.path.split(dataset_path)[-1])
model_name = "_".join(os.path.normpath(os.path.splitext(opts.model)[0]).split(os.sep)[-2:])
if opts.o is None:
results_dir = os.path.join(opts.results_dir, model.problem.NAME, dataset_basename)
os.makedirs(results_dir, exist_ok=True)
out_file = os.path.join(results_dir, "{}-{}-{}{}-t{}-{}-{}{}".format(
dataset_basename, model_name,
opts.decode_strategy,
width if opts.decode_strategy != 'greedy' else '',
softmax_temp, opts.offset, opts.offset + len(costs), ext
))
else:
out_file = opts.o
assert opts.f or not os.path.isfile(
out_file), "File already exists! Try running with -f option to overwrite."
save_dataset((results, parallelism), out_file)
return costs, tours, durations
def _eval_dataset(model, dataset, width, softmax_temp, opts, device):
model.to(device)
model.eval()
model.set_decode_type(
"greedy" if opts.decode_strategy in ('bs', 'greedy') else "sampling",
temp=softmax_temp)
dataloader = DataLoader(dataset, batch_size=opts.eval_batch_size)
results = []
for batch in tqdm(dataloader, disable=opts.no_progress_bar):
batch = move_to(batch, device)
start = time.time()
with torch.no_grad():
if opts.decode_strategy in ('sample', 'greedy'):
if opts.decode_strategy == 'greedy':
assert width == 0, "Do not set width when using greedy"
assert opts.eval_batch_size <= opts.max_calc_batch_size, \
"eval_batch_size should be smaller than calc batch size"
batch_rep = 1
iter_rep = 1
elif width * opts.eval_batch_size > opts.max_calc_batch_size:
assert opts.eval_batch_size == 1
assert width % opts.max_calc_batch_size == 0
batch_rep = opts.max_calc_batch_size
iter_rep = width // opts.max_calc_batch_size
else:
batch_rep = width
iter_rep = 1
assert batch_rep > 0
# This returns (batch_size, iter_rep shape)
sequences, costs = model.sample_many(batch, batch_rep=batch_rep, iter_rep=iter_rep)
batch_size = len(costs)
ids = torch.arange(batch_size, dtype=torch.int64, device=costs.device)
else:
assert opts.decode_strategy == 'bs'
cum_log_p, sequences, costs, ids, batch_size = model.beam_search(
batch, beam_size=width,
compress_mask=opts.compress_mask,
max_calc_batch_size=opts.max_calc_batch_size
)
if sequences is None:
sequences = [None] * batch_size
costs = [math.inf] * batch_size
else:
sequences, costs = get_best(
sequences.cpu().numpy(), costs.cpu().numpy(),
ids.cpu().numpy() if ids is not None else None,
batch_size
)
duration = time.time() - start
for seq, cost in zip(sequences, costs):
if model.problem.NAME == "tsp":
seq = seq.tolist() # No need to trim as all are same length
elif model.problem.NAME in ("cvrp", "sdvrp"):
seq = np.trim_zeros(seq).tolist() + [0] # Add depot
elif model.problem.NAME in ("op", "pctsp"):
seq = np.trim_zeros(seq) # We have the convention to exclude the depot
else:
assert False, "Unkown problem: {}".format(model.problem.NAME)
# Note VRP only
results.append((cost, seq, duration))
return results
from test import plot_tsp_with_data, solve_dynamic_euclidian_tsp
import pickle
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#parser.add_argument("--datasets", type=str, default=None, help="Filename of the dataset(s) to evaluate")
parser.add_argument("datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument('--val_size', type=int, default=100,
help='Number of instances used for reporting validation performance')
parser.add_argument('--offset', type=int, default=0,
help='Offset where to start in dataset (default 0)')
parser.add_argument('--eval_batch_size', type=int, default=10,
help="Batch size to use during (baseline) evaluation")
# parser.add_argument('--decode_type', type=str, default='greedy',
# help='Decode type, greedy or sampling')
parser.add_argument('--width', type=int, nargs='+',
help='Sizes of beam to use for beam search (or number of samples for sampling), '
'0 to disable (default), -1 for infinite')
parser.add_argument('--decode_strategy', type=str,
help='Beam search (bs), Sampling (sample) or Greedy (greedy)')
parser.add_argument('--softmax_temperature', type=parse_softmax_temperature, default=1,
help="Softmax temperature (sampling or bs)")
parser.add_argument('--model', type=str)
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--no_progress_bar', action='store_true', help='Disable progress bar')
parser.add_argument('--compress_mask', action='store_true', help='Compress mask into long')
parser.add_argument('--max_calc_batch_size', type=int, default=10000, help='Size for subbatches')
parser.add_argument('--results_dir', default='results', help="Name of results directory")
parser.add_argument('--multiprocessing', action='store_true',
help='Use multiprocessing to parallelize over multiple GPUs')
# plot
parser.add_argument('--plot', action='store_true', help='Use to plot the computed solution')
parser.add_argument('--plot_index', type=int, default=0, help='Index of the batch to plot')
parser.add_argument("--use_gurobi", action='store_true', help="Use gurobi optimizer to solve the TSP")
opts = parser.parse_args()
opts.f =True
#dynamic = True
#opts.plot = False
#opts.use_gurobi = False
#
#if dynamic:
# opts.datasets = ["data/dynamic_tsp/dynamic_tsp10_validation_seed4321.pkl"]
# opts.model = "pretrained/dynamic_tsp_10/GTA-RL/"
#else:
# opts.datasets = ["data/dynamic_tsp/dynamic_tsp50_validation_seed4321.pkl"]
# opts.model = "outputs/icde/dynamic_tsp_20/run_12/"
#
#opts.decode_strategy = "bs"
#opts.width = [5]
#opts.datasets = ["data/dynamic_tsp/dynamic_tsp10_validation_seed4321.pkl"]
print("Print: ", opts.datasets)
assert opts.o is None or (len(opts.datasets) == 1 and len(opts.width) <= 1), \
"Cannot specify result filename with more than one dataset or more than one width"
widths = opts.width if opts.width is not None else [0]
outputs = []
for width in widths:
for dataset_path in opts.datasets:
outputs.append(eval_dataset(dataset_path, width, opts.softmax_temperature, opts))
dataset = opts.datasets[0]
if opts.plot:
with open(dataset, 'rb') as f:
data = pickle.load(f)
plot_tsp_with_data(data[opts.plot_index], outputs[0][1][opts.plot_index], "RL")
if opts.use_gurobi:
with open(dataset, 'rb') as f:
data = pickle.load(f)
tour_length, tour_gb = solve_dynamic_euclidian_tsp(data[opts.plot_index])
plot_tsp_with_data(data[opts.plot_index], tour_gb, "GB") | 11,392 | 44.031621 | 120 | py |
GTA-RL | GTA-RL-master/train.py | import os
import time
from tqdm import tqdm
import torch
import math
from torch.utils.data import DataLoader
from torch.nn import DataParallel
from nets.attention_model import set_decode_type
from utils.log_utils import log_values
from utils import move_to
def get_inner_model(model):
return model.module if isinstance(model, DataParallel) else model
def validate(model, dataset, opts):
# Validate
print('Validating...')
cost = rollout(model, dataset, opts)
avg_cost = cost.mean()
print('Validation overall avg_cost: {} +- {}'.format(
avg_cost, torch.std(cost) / math.sqrt(len(cost))))
return avg_cost
def rollout(model, dataset, opts):
# Put in greedy evaluation mode!
set_decode_type(model, "greedy")
model.eval()
def eval_model_bat(bat):
with torch.no_grad():
cost, _ = model(move_to(bat, opts.device))
return cost.data.cpu()
return torch.cat([
eval_model_bat(bat)
for bat
in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar)
], 0)
def clip_grad_norms(param_groups, max_norm=math.inf):
"""
Clips the norms for all param groups to max_norm and returns gradient norms before clipping
:param optimizer:
:param max_norm:
:param gradient_norms_log:
:return: grad_norms, clipped_grad_norms: list with (clipped) gradient norms per group
"""
grad_norms = [
torch.nn.utils.clip_grad_norm_(
group['params'],
max_norm if max_norm > 0 else math.inf, # Inf so no clipping but still call to calc
norm_type=2
)
for group in param_groups
]
grad_norms_clipped = [min(g_norm, max_norm) for g_norm in grad_norms] if max_norm > 0 else grad_norms
return grad_norms, grad_norms_clipped
def train_epoch(model, optimizer, baseline, lr_scheduler, epoch, val_dataset, problem, tb_logger, opts):
print("Start train epoch {}, lr={} for run {}".format(epoch, optimizer.param_groups[0]['lr'], opts.run_name))
step = epoch * (opts.epoch_size // opts.batch_size)
start_time = time.time()
if not opts.no_tensorboard:
tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step)
# Generate new training data for each epoch
training_dataset = baseline.wrap_dataset(problem.make_dataset(
size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution))
training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, num_workers=0)
# Put model in train mode!
model.train()
set_decode_type(model, "sampling")
for batch_id, batch in enumerate(tqdm(training_dataloader, disable=opts.no_progress_bar)):
train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
)
step += 1
epoch_duration = time.time() - start_time
print("Finished epoch {}, took {} s".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))
if (opts.checkpoint_epochs != 0 and epoch % opts.checkpoint_epochs == 0) or epoch == opts.n_epochs - 1:
print('Saving model and state...')
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all(),
'baseline': baseline.state_dict()
},
os.path.join(opts.save_dir, 'epoch-{}.pt'.format(epoch))
)
avg_reward = validate(model, val_dataset, opts)
if not opts.no_tensorboard:
tb_logger.log_value('val_avg_reward', avg_reward, epoch)
baseline.epoch_callback(model, epoch)
# lr_scheduler should be called at end of epoch
lr_scheduler.step()
def train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
):
x, bl_val = baseline.unwrap_batch(batch)
x = move_to(x, opts.device)
bl_val = move_to(bl_val, opts.device) if bl_val is not None else None
# Evaluate model, get costs and log probabilities
cost, log_likelihood = model(x)
# Evaluate baseline, get baseline loss if any (only for critic)
bl_val, bl_loss = baseline.eval(x, cost) if bl_val is None else (bl_val, 0)
# Calculate loss
reinforce_loss = ((cost - bl_val) * log_likelihood).mean()
loss = reinforce_loss + bl_loss
# Perform backward pass and optimization step
optimizer.zero_grad()
loss.backward()
# Clip gradient norms and get (clipped) gradient norms for logging
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
optimizer.step()
# Logging
if step % int(opts.log_step) == 0:
log_values(cost, grad_norms, epoch, batch_id, step,
log_likelihood, reinforce_loss, bl_loss, tb_logger, opts)
| 5,119 | 30.219512 | 113 | py |
GTA-RL | GTA-RL-master/nets/pointer_network.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import numpy as np
class Encoder(nn.Module):
"""Maps a graph represented as an input sequence
to a hidden vector"""
def __init__(self, input_dim, hidden_dim):
super(Encoder, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.init_hx, self.init_cx = self.init_hidden(hidden_dim)
def forward(self, x, hidden):
output, hidden = self.lstm(x, hidden)
return output, hidden
def init_hidden(self, hidden_dim):
"""Trainable initial hidden state"""
std = 1. / math.sqrt(hidden_dim)
enc_init_hx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_hx.data.uniform_(-std, std)
enc_init_cx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_cx.data.uniform_(-std, std)
return enc_init_hx, enc_init_cx
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C # tanh exploration
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1. / math.sqrt(dim)), 1. / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
# ref is now [batch_size x hidden_dim x sourceL]
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2) # batch x dim x 1
e = self.project_ref(ref) # batch_size x hidden_dim x sourceL
# expand the query by sourceL
# batch x dim x sourceL
expanded_q = q.repeat(1, 1, e.size(2))
# batch x 1 x hidden_dim
v_view = self.v.unsqueeze(0).expand(
expanded_q.size(0), len(self.v)).unsqueeze(1)
# [batch_size x 1 x hidden_dim] * [batch_size x hidden_dim x sourceL]
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
class Decoder(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
tanh_exploration,
use_tanh,
n_glimpses=1,
mask_glimpses=True,
mask_logits=True):
super(Decoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_glimpses = n_glimpses
self.mask_glimpses = mask_glimpses
self.mask_logits = mask_logits
self.use_tanh = use_tanh
self.tanh_exploration = tanh_exploration
self.decode_type = None # Needs to be set explicitly before use
self.lstm = nn.LSTMCell(embedding_dim, hidden_dim)
self.pointer = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.glimpse = Attention(hidden_dim, use_tanh=False)
self.sm = nn.Softmax(dim=1)
def update_mask(self, mask, selected):
return mask.clone().scatter_(1, selected.unsqueeze(-1), True)
def recurrence(self, x, h_in, prev_mask, prev_idxs, step, context):
logit_mask = self.update_mask(prev_mask, prev_idxs) if prev_idxs is not None else prev_mask
logits, h_out = self.calc_logits(x, h_in, logit_mask, context, self.mask_glimpses, self.mask_logits)
# Calculate log_softmax for better numerical stability
log_p = torch.log_softmax(logits, dim=1)
probs = log_p.exp()
if not self.mask_logits:
# If self.mask_logits, this would be redundant, otherwise we must mask to make sure we don't resample
# Note that as a result the vector of probs may not sum to one (this is OK for .multinomial sampling)
# But practically by not masking the logits, a model is learned over all sequences (also infeasible)
# while only during sampling feasibility is enforced (a.k.a. by setting to 0. here)
probs[logit_mask] = 0.
# For consistency we should also mask out in log_p, but the values set to 0 will not be sampled and
# Therefore not be used by the reinforce estimator
return h_out, log_p, probs, logit_mask
def calc_logits(self, x, h_in, logit_mask, context, mask_glimpses=None, mask_logits=None):
if mask_glimpses is None:
mask_glimpses = self.mask_glimpses
if mask_logits is None:
mask_logits = self.mask_logits
hy, cy = self.lstm(x, h_in)
g_l, h_out = hy, (hy, cy)
for i in range(self.n_glimpses):
ref, logits = self.glimpse(g_l, context)
# For the glimpses, only mask before softmax so we have always an L1 norm 1 readout vector
if mask_glimpses:
logits[logit_mask] = -np.inf
# [batch_size x h_dim x sourceL] * [batch_size x sourceL x 1] =
# [batch_size x h_dim x 1]
g_l = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
_, logits = self.pointer(g_l, context)
# Masking before softmax makes probs sum to one
if mask_logits:
logits[logit_mask] = -np.inf
return logits, h_out
def forward(self, decoder_input, embedded_inputs, hidden, context, eval_tours=None):
"""
Args:
decoder_input: The initial input to the decoder
size is [batch_size x embedding_dim]. Trainable parameter.
embedded_inputs: [sourceL x batch_size x embedding_dim]
hidden: the prev hidden state, size is [batch_size x hidden_dim].
Initially this is set to (enc_h[-1], enc_c[-1])
context: encoder outputs, [sourceL x batch_size x hidden_dim]
"""
batch_size = context.size(1)
outputs = []
selections = []
steps = range(embedded_inputs.size(0))
idxs = None
mask = Variable(
embedded_inputs.data.new().byte().new(embedded_inputs.size(1), embedded_inputs.size(0)).zero_(),
requires_grad=False
)
for i in steps:
hidden, log_p, probs, mask = self.recurrence(decoder_input, hidden, mask, idxs, i, context)
# select the next inputs for the decoder [batch_size x hidden_dim]
idxs = self.decode(
probs,
mask
) if eval_tours is None else eval_tours[:, i]
idxs = idxs.detach() # Otherwise pytorch complains it want's a reward, todo implement this more properly?
# Gather input embedding of selected
decoder_input = torch.gather(
embedded_inputs,
0,
idxs.contiguous().view(1, batch_size, 1).expand(1, batch_size, *embedded_inputs.size()[2:])
).squeeze(0)
# use outs to point to next object
outputs.append(log_p)
selections.append(idxs)
return (torch.stack(outputs, 1), torch.stack(selections, 1)), hidden
def decode(self, probs, mask):
if self.decode_type == "greedy":
_, idxs = probs.max(1)
assert not mask.gather(1, idxs.unsqueeze(-1)).data.any(), \
"Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
idxs = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
while mask.gather(1, idxs.unsqueeze(-1)).data.any():
print(' [!] resampling due to race condition')
idxs = probs.multinomial().squeeze(1)
else:
assert False, "Unknown decode type"
return idxs
class CriticNetworkLSTM(nn.Module):
"""Useful as a baseline in REINFORCE updates"""
def __init__(self,
embedding_dim,
hidden_dim,
n_process_block_iters,
tanh_exploration,
use_tanh):
super(CriticNetworkLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.n_process_block_iters = n_process_block_iters
self.encoder = Encoder(embedding_dim, hidden_dim)
self.process_block = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.sm = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
Args:
inputs: [embedding_dim x batch_size x sourceL] of embedded inputs
"""
inputs = inputs.transpose(0, 1).contiguous()
encoder_hx = self.encoder.init_hx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
encoder_cx = self.encoder.init_cx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
# encoder forward pass
enc_outputs, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
# grab the hidden state and process it via the process block
process_block_state = enc_h_t[-1]
for i in range(self.n_process_block_iters):
ref, logits = self.process_block(process_block_state, enc_outputs)
process_block_state = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
# produce the final scalar output
out = self.decoder(process_block_state)
return out
class PointerNetwork(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=None,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization=None,
**kwargs):
super(PointerNetwork, self).__init__()
self.problem = problem
assert problem.NAME == "tsp", "Pointer Network only supported for TSP"
self.input_dim = 2
self.encoder = Encoder(
embedding_dim,
hidden_dim)
self.decoder = Decoder(
embedding_dim,
hidden_dim,
tanh_exploration=tanh_clipping,
use_tanh=tanh_clipping > 0,
n_glimpses=1,
mask_glimpses=mask_inner,
mask_logits=mask_logits
)
# Trainable initial hidden states
std = 1. / math.sqrt(embedding_dim)
self.decoder_in_0 = nn.Parameter(torch.FloatTensor(embedding_dim))
self.decoder_in_0.data.uniform_(-std, std)
self.embedding = nn.Parameter(torch.FloatTensor(self.input_dim, embedding_dim))
self.embedding.data.uniform_(-std, std)
def set_decode_type(self, decode_type):
self.decoder.decode_type = decode_type
def forward(self, inputs, eval_tours=None, return_pi=False):
batch_size, graph_size, input_dim = inputs.size()
embedded_inputs = torch.mm(
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1)
# query the actor net for the input indices
# making up the output, and the pointer attn
_log_p, pi = self._inner(embedded_inputs, eval_tours)
cost, mask = self.problem.get_costs(inputs, pi)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
return cost, ll, pi
return cost, ll
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _inner(self, inputs, eval_tours=None):
encoder_hx = encoder_cx = Variable(
torch.zeros(1, inputs.size(1), self.encoder.hidden_dim, out=inputs.data.new()),
requires_grad=False
)
# encoder forward pass
enc_h, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
dec_init_state = (enc_h_t[-1], enc_c_t[-1])
# repeat decoder_in_0 across batch
decoder_input = self.decoder_in_0.unsqueeze(0).repeat(inputs.size(1), 1)
(pointer_probs, input_idxs), dec_hidden_t = self.decoder(decoder_input,
inputs,
dec_init_state,
enc_h,
eval_tours)
return pointer_probs, input_idxs | 13,515 | 37.288952 | 118 | py |
GTA-RL | GTA-RL-master/nets/st_attention_model.py | import torch
from torch import nn
from torch.utils.checkpoint import checkpoint
import math
from typing import NamedTuple
from utils.tensor_functions import compute_in_batches
from nets.graph_encoder import GraphAttentionEncoder
from torch.nn import DataParallel
from utils.beam_search import CachedLookup
from utils.functions import sample_many
def set_decode_type(model, decode_type):
if isinstance(model, DataParallel):
model = model.module
model.set_decode_type(decode_type)
class AttentionModelFixed(NamedTuple):
"""
Context for AttentionModel decoder that is fixed during decoding so can be precomputed/cached
This class allows for efficient indexing of multiple Tensors at once
"""
node_embeddings: torch.Tensor
context_node_projected: torch.Tensor
glimpse_key: torch.Tensor
glimpse_val: torch.Tensor
logit_key: torch.Tensor
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice)
return AttentionModelFixed(
node_embeddings=self.node_embeddings[key],
context_node_projected=self.context_node_projected[key],
glimpse_key=self.glimpse_key[:, key], # dim 0 are the heads
glimpse_val=self.glimpse_val[:, key], # dim 0 are the heads
logit_key=self.logit_key[key]
)
class StAttentionModel(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=2,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization='batch',
n_heads=8,
checkpoint_encoder=False,
shrink_size=None,
use_single_time=False,
sum_encoder=False):
super(StAttentionModel, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_encode_layers = n_encode_layers
self.decode_type = None
self.temp = 1.0
self.allow_partial = problem.NAME == 'sdvrp'
self.is_vrp = problem.NAME == 'cvrp' or problem.NAME == 'sdvrp'
self.is_orienteering = problem.NAME == 'op'
self.is_pctsp = problem.NAME == 'pctsp'
self.tanh_clipping = tanh_clipping
self.mask_inner = mask_inner
self.mask_logits = mask_logits
self.problem = problem
self.n_heads = n_heads
self.checkpoint_encoder = checkpoint_encoder
self.shrink_size = shrink_size
self.use_single_time = use_single_time
self.sum_encoder = False
# Problem specific context parameters (placeholder and step context dimension)
if self.is_vrp or self.is_orienteering or self.is_pctsp:
# Embedding of last node + remaining_capacity / remaining length / remaining prize to collect
step_context_dim = embedding_dim + 1
if self.is_pctsp:
node_dim = 4 # x, y, expected_prize, penalty
else:
node_dim = 3 # x, y, demand / prize
# Special embedding projection for depot node
self.init_embed_depot = nn.Linear(2, embedding_dim)
if self.is_vrp and self.allow_partial: # Need to include the demand if split delivery allowed
self.project_node_step = nn.Linear(1, 3 * embedding_dim, bias=False)
else: # TSP
assert problem.NAME == "tsp", "Unsupported problem: {}".format(problem.NAME)
step_context_dim = 2 * embedding_dim # Embedding of first and last node
node_dim = 2 # x, y
# Learned input symbols for first action
self.W_placeholder = nn.Parameter(torch.Tensor(2 * embedding_dim))
self.W_placeholder.data.uniform_(-1, 1) # Placeholder should be in range of activations
self.init_embed = nn.Linear(node_dim, embedding_dim)
self.embedder = GraphAttentionEncoder(
n_heads=n_heads,
embed_dim=embedding_dim,
n_layers=self.n_encode_layers,
normalization=normalization,
st_attention=True,
is_vrp=self.is_vrp
)
# For each node we compute (glimpse key, glimpse value, logit key) so 3 * embedding_dim
self.project_node_embeddings = nn.Linear(embedding_dim, 3 * embedding_dim, bias=False)
self.project_fixed_context = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.project_step_context = nn.Linear(step_context_dim, embedding_dim, bias=False)
assert embedding_dim % n_heads == 0
# Note n_heads * val_dim == embedding_dim so input to project_out is embedding_dim
self.project_out = nn.Linear(embedding_dim, embedding_dim, bias=False)
def set_decode_type(self, decode_type, temp=None):
self.decode_type = decode_type
if temp is not None: # Do not change temperature if not provided
self.temp = temp
def forward(self, input, return_pi=False):
"""
:param input: (batch_size, graph_size, node_dim) input node features or dictionary with multiple tensors
:param return_pi: whether to return the output sequences, this is optional as it is not compatible with
using DataParallel as the results may be of different lengths on different GPUs
:return:
"""
original_input = input
if self.checkpoint_encoder and self.training: # Only checkpoint if we need gradients
embeddings, _ = checkpoint(self.embedder, self._init_embed(input))
else:
if not self.use_single_time:
embeddings, _ = self.embedder(self._init_embed(input))
else:
embeddings = self._init_embed(input)
#if len(input.size()) == 4:
# embeddings = embeddings[:, 0, :, :]
# input = input[:, 0, :, :]
_log_p, pi = self._inner(input, embeddings)
cost, mask = self.problem.get_costs(original_input, pi)
# Log likelihood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
return cost, ll, pi
return cost, ll
def beam_search(self, *args, **kwargs):
return self.problem.beam_search(*args, **kwargs, model=self, dynamic=True)
def precompute_fixed(self, input):
embeddings, _ = self.embedder(self._init_embed(input))
# Use a CachedLookup such that if we repeatedly index this object with the same index we only need to do
# the lookup once... this is the case if all elements in the batch have maximum batch size
return CachedLookup(self._precompute(embeddings))
def propose_expansions(self, beam, fixed, expand_size=None, normalize=False, max_calc_batch_size=4096):
# First dim = batch_size * cur_beam_size
log_p_topk, ind_topk = compute_in_batches(
lambda b: self._get_log_p_topk(fixed[b.ids], b.state, k=expand_size, normalize=normalize),
max_calc_batch_size, beam, n=beam.size()
)
assert log_p_topk.size(1) == 1, "Can only have single step"
# This will broadcast, calculate log_p (score) of expansions
score_expand = beam.score[:, None] + log_p_topk[:, 0, :]
# We flatten the action as we need to filter and this cannot be done in 2d
flat_action = ind_topk.view(-1)
flat_score = score_expand.view(-1)
flat_feas = flat_score > -1e10 # != -math.inf triggers
# Parent is row idx of ind_topk, can be found by enumerating elements and dividing by number of columns
flat_parent = torch.arange(flat_action.size(-1), out=flat_action.new()) // ind_topk.size(-1)
# Filter infeasible
feas_ind_2d = torch.nonzero(flat_feas)
if len(feas_ind_2d) == 0:
# Too bad, no feasible expansions at all :(
return None, None, None
feas_ind = feas_ind_2d[:, 0]
return flat_parent[feas_ind], flat_action[feas_ind], flat_score[feas_ind]
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _init_embed(self, input):
if self.is_vrp or self.is_orienteering or self.is_pctsp:
_, time, _, _ = input['loc'].size()
if self.is_vrp:
features = ('demand', )
elif self.is_orienteering:
features = ('prize', )
else:
assert self.is_pctsp
features = ('deterministic_prize', 'penalty')
return torch.cat(
(
self.init_embed_depot(input['depot'])[:, None, None, :].expand(-1, time, 1, -1),
self.init_embed(torch.cat((
input['loc'],
input['demand'][:, None, :, None].expand(-1, time, -1, -1)
), -1))
),
2
)
# TSP
return self.init_embed(input)
def _inner(self, input, embeddings):
outputs = []
sequences = []
if self.sum_encoder:
g = embeddings.mean(dim=1)
state = self.problem.make_state(g)
fixed = self._precompute(g)
else:
state = self.problem.make_state(input=input, index=0)
# Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
#fixed = self._precompute(embeddings[:, 0, :, :])
batch_size = state.ids.size(0)
# Perform decoding steps
i = 0
current_emb = 0
while not (self.shrink_size is None and state.all_finished()):
if self.use_single_time:
if i%20 == 0:
current_emb, _ = self.embedder(embeddings[:, i:i+20, :, :])
fixed = self._precompute(current_emb[:, 0, :, :])
state = state.update_state(input=input, index=i)
else:
fixed = self._precompute(current_emb[:, i % 20, :, :])
state = state.update_state(input=input, index=i)
elif self.sum_encoder:
pass
else:
fixed = self._precompute(embeddings[:, i, :, :])
state = state.update_state(input=input, index=i)
if self.shrink_size is not None:
unfinished = torch.nonzero(state.get_finished() == 0)
if len(unfinished) == 0:
break
unfinished = unfinished[:, 0]
# Check if we can shrink by at least shrink_size and if this leaves at least 16
# (otherwise batch norm will not work well and it is inefficient anyway)
if 16 <= len(unfinished) <= state.ids.size(0) - self.shrink_size:
# Filter states
state = state[unfinished]
fixed = fixed[unfinished]
log_p, mask = self._get_log_p(fixed, state)
# Select the indices of the next nodes in the sequences, result (batch_size) long
selected = self._select_node(log_p.exp()[:, 0, :], mask[:, 0, :]) # Squeeze out steps dimension
state = state.update(selected)
# Now make log_p, selected desired output size by 'unshrinking'
if self.shrink_size is not None and state.ids.size(0) < batch_size:
log_p_, selected_ = log_p, selected
log_p = log_p_.new_zeros(batch_size, *log_p_.size()[1:])
selected = selected_.new_zeros(batch_size)
log_p[state.ids[:, 0]] = log_p_
selected[state.ids[:, 0]] = selected_
# Collect output of step
outputs.append(log_p[:, 0, :])
sequences.append(selected)
i += 1
#state = self.problem.make_state(loc=input, index=i)
# Collected lists, return Tensor
return torch.stack(outputs, 1), torch.stack(sequences, 1)
def sample_many(self, input, batch_rep=1, iter_rep=1):
"""
:param input: (batch_size, graph_size, node_dim) input node features
:return:
"""
# Bit ugly but we need to pass the embeddings as well.
# Making a tuple will not work with the problem.get_cost function
return sample_many(
lambda input: self._inner(*input), # Need to unpack tuple into arguments
lambda input, pi: self.problem.get_costs(input[0], pi), # Don't need embeddings as input to get_costs
(input, self.embedder(self._init_embed(input))[0]), # Pack input with embeddings (additional input)
batch_rep, iter_rep
)
def _select_node(self, probs, mask):
assert (probs == probs).all(), "Probs should not contain any nans"
if self.decode_type == "greedy":
_, selected = probs.max(1)
assert not mask.gather(1, selected.unsqueeze(
-1)).data.any(), "Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
selected = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
# See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232
while mask.gather(1, selected.unsqueeze(-1)).data.any():
print('Sampled bad values, resampling!')
selected = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return selected
def _precompute(self, embeddings, num_steps=1):
# The fixed context projection of the graph embedding is calculated only once for efficiency
graph_embed = embeddings.mean(1)
# fixed context = (batch_size, 1, embed_dim) to make broadcastable with parallel timesteps
fixed_context = self.project_fixed_context(graph_embed)[:, None, :]
# The projection of the node embeddings for the attention is calculated once up front
glimpse_key_fixed, glimpse_val_fixed, logit_key_fixed = \
self.project_node_embeddings(embeddings[:, None, :, :]).chunk(3, dim=-1)
# No need to rearrange key for logit as there is a single head
fixed_attention_node_data = (
self._make_heads(glimpse_key_fixed, num_steps),
self._make_heads(glimpse_val_fixed, num_steps),
logit_key_fixed.contiguous()
)
return AttentionModelFixed(embeddings, fixed_context, *fixed_attention_node_data)
def _get_log_p_topk(self, fixed, state, k=None, normalize=True):
log_p, _ = self._get_log_p(fixed, state, normalize=normalize)
# Return topk
if k is not None and k < log_p.size(-1):
return log_p.topk(k, -1)
# Return all, note different from torch.topk this does not give error if less than k elements along dim
return (
log_p,
torch.arange(log_p.size(-1), device=log_p.device, dtype=torch.int64).repeat(log_p.size(0), 1)[:, None, :]
)
def _get_log_p(self, fixed, state, normalize=True):
# Compute query = context node embedding
query = fixed.context_node_projected + \
self.project_step_context(self._get_parallel_step_context(fixed.node_embeddings, state))
# Compute keys and values for the nodes
glimpse_K, glimpse_V, logit_K = self._get_attention_node_data(fixed, state)
# Compute the mask
mask = state.get_mask()
# Compute logits (unnormalized log_p)
log_p, glimpse = self._one_to_many_logits(query, glimpse_K, glimpse_V, logit_K, mask)
if normalize:
log_p = torch.log_softmax(log_p / self.temp, dim=-1)
assert not torch.isnan(log_p).any()
return log_p, mask
def _get_parallel_step_context(self, embeddings, state, from_depot=False):
"""
Returns the context per step, optionally for multiple steps at once (for efficient evaluation of the model)
:param embeddings: (batch_size, graph_size, embed_dim)
:param prev_a: (batch_size, num_steps)
:param first_a: Only used when num_steps = 1, action of first step or None if first step
:return: (batch_size, num_steps, context_dim)
"""
current_node = state.get_current_node()
batch_size, num_steps = current_node.size()
if self.is_vrp:
# Embedding of previous node + remaining capacity
if from_depot:
# 1st dimension is node idx, but we do not squeeze it since we want to insert step dimension
# i.e. we actually want embeddings[:, 0, :][:, None, :] which is equivalent
return torch.cat(
(
embeddings[:, 0:1, :].expand(batch_size, num_steps, embeddings.size(-1)),
# used capacity is 0 after visiting depot
self.problem.VEHICLE_CAPACITY - torch.zeros_like(state.used_capacity[:, :, None])
),
-1
)
else:
return torch.cat(
(
torch.gather(
embeddings,
1,
current_node.contiguous()
.view(batch_size, num_steps, 1)
.expand(batch_size, num_steps, embeddings.size(-1))
).view(batch_size, num_steps, embeddings.size(-1)),
self.problem.VEHICLE_CAPACITY - state.used_capacity[:, :, None]
),
-1
)
elif self.is_orienteering or self.is_pctsp:
return torch.cat(
(
torch.gather(
embeddings,
1,
current_node.contiguous()
.view(batch_size, num_steps, 1)
.expand(batch_size, num_steps, embeddings.size(-1))
).view(batch_size, num_steps, embeddings.size(-1)),
(
state.get_remaining_length()[:, :, None]
if self.is_orienteering
else state.get_remaining_prize_to_collect()[:, :, None]
)
),
-1
)
else: # TSP
if num_steps == 1: # We need to special case if we have only 1 step, may be the first or not
if state.i.item() == 0:
# First and only step, ignore prev_a (this is a placeholder)
return self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1))
else:
return embeddings.gather(
1,
torch.cat((state.first_a, current_node), 1)[:, :, None].expand(batch_size, 2, embeddings.size(-1))
).view(batch_size, 1, -1)
# More than one step, assume always starting with first
embeddings_per_step = embeddings.gather(
1,
current_node[:, 1:, None].expand(batch_size, num_steps - 1, embeddings.size(-1))
)
return torch.cat((
# First step placeholder, cat in dim 1 (time steps)
self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1)),
# Second step, concatenate embedding of first with embedding of current/previous (in dim 2, context dim)
torch.cat((
embeddings_per_step[:, 0:1, :].expand(batch_size, num_steps - 1, embeddings.size(-1)),
embeddings_per_step
), 2)
), 1)
def _one_to_many_logits(self, query, glimpse_K, glimpse_V, logit_K, mask):
batch_size, num_steps, embed_dim = query.size()
key_size = val_size = embed_dim // self.n_heads
# Compute the glimpse, rearrange dimensions so the dimensions are (n_heads, batch_size, num_steps, 1, key_size)
glimpse_Q = query.view(batch_size, num_steps, self.n_heads, 1, key_size).permute(2, 0, 1, 3, 4)
# Batch matrix multiplication to compute compatibilities (n_heads, batch_size, num_steps, graph_size)
compatibility = torch.matmul(glimpse_Q, glimpse_K.transpose(-2, -1)) / math.sqrt(glimpse_Q.size(-1))
if self.mask_inner:
assert self.mask_logits, "Cannot mask inner without masking logits"
compatibility[mask[None, :, :, None, :].expand_as(compatibility)] = -math.inf
# Batch matrix multiplication to compute heads (n_heads, batch_size, num_steps, val_size)
heads = torch.matmul(torch.softmax(compatibility, dim=-1), glimpse_V)
# Project to get glimpse/updated context node embedding (batch_size, num_steps, embedding_dim)
glimpse = self.project_out(
heads.permute(1, 2, 3, 0, 4).contiguous().view(-1, num_steps, 1, self.n_heads * val_size))
# Now projecting the glimpse is not needed since this can be absorbed into project_out
# final_Q = self.project_glimpse(glimpse)
final_Q = glimpse
# Batch matrix multiplication to compute logits (batch_size, num_steps, graph_size)
# logits = 'compatibility'
logits = torch.matmul(final_Q, logit_K.transpose(-2, -1)).squeeze(-2) / math.sqrt(final_Q.size(-1))
# From the logits compute the probabilities by clipping, masking and softmax
if self.tanh_clipping > 0:
logits = torch.tanh(logits) * self.tanh_clipping
if self.mask_logits:
logits[mask] = -math.inf
return logits, glimpse.squeeze(-2)
def _get_attention_node_data(self, fixed, state):
if self.is_vrp and self.allow_partial:
# Need to provide information of how much each node has already been served
# Clone demands as they are needed by the backprop whereas they are updated later
glimpse_key_step, glimpse_val_step, logit_key_step = \
self.project_node_step(state.demands_with_depot[:, :, :, None].clone()).chunk(3, dim=-1)
# Projection of concatenation is equivalent to addition of projections but this is more efficient
return (
fixed.glimpse_key + self._make_heads(glimpse_key_step),
fixed.glimpse_val + self._make_heads(glimpse_val_step),
fixed.logit_key + logit_key_step,
)
# TSP or VRP without split delivery
return fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key
def _make_heads(self, v, num_steps=None):
assert num_steps is None or v.size(1) == 1 or v.size(1) == num_steps
return (
v.contiguous().view(v.size(0), v.size(1), v.size(2), self.n_heads, -1)
.expand(v.size(0), v.size(1) if num_steps is None else num_steps, v.size(2), self.n_heads, -1)
.permute(3, 0, 1, 2, 4) # (n_heads, batch_size, num_steps, graph_size, head_dim)
)
| 23,874 | 41.940647 | 122 | py |
GTA-RL | GTA-RL-master/nets/attention_model.py | import torch
from torch import nn
from torch.utils.checkpoint import checkpoint
import math
from typing import NamedTuple
from utils.tensor_functions import compute_in_batches
from nets.graph_encoder import GraphAttentionEncoder
from torch.nn import DataParallel
from utils.beam_search import CachedLookup
from utils.functions import sample_many
def set_decode_type(model, decode_type):
if isinstance(model, DataParallel):
model = model.module
model.set_decode_type(decode_type)
class AttentionModelFixed(NamedTuple):
"""
Context for AttentionModel decoder that is fixed during decoding so can be precomputed/cached
This class allows for efficient indexing of multiple Tensors at once
"""
node_embeddings: torch.Tensor
context_node_projected: torch.Tensor
glimpse_key: torch.Tensor
glimpse_val: torch.Tensor
logit_key: torch.Tensor
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice)
return AttentionModelFixed(
node_embeddings=self.node_embeddings[key],
context_node_projected=self.context_node_projected[key],
glimpse_key=self.glimpse_key[:, key], # dim 0 are the heads
glimpse_val=self.glimpse_val[:, key], # dim 0 are the heads
logit_key=self.logit_key[key]
)
class AttentionModel(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=2,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization='batch',
n_heads=8,
checkpoint_encoder=False,
shrink_size=None,
use_single_time=False):
super(AttentionModel, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_encode_layers = n_encode_layers
self.decode_type = None
self.temp = 1.0
self.allow_partial = problem.NAME == 'sdvrp'
self.is_vrp = problem.NAME == 'cvrp' or problem.NAME == 'sdvrp'
self.is_orienteering = problem.NAME == 'op'
self.is_pctsp = problem.NAME == 'pctsp'
self.is_dynamic_tsp = problem.NAME == 'dynamic_tsp'
self.tanh_clipping = tanh_clipping
self.mask_inner = mask_inner
self.mask_logits = mask_logits
self.problem = problem
self.n_heads = n_heads
self.checkpoint_encoder = checkpoint_encoder
self.shrink_size = shrink_size
self.use_single_time = use_single_time
# Problem specific context parameters (placeholder and step context dimension)
if self.is_vrp or self.is_orienteering or self.is_pctsp:
# Embedding of last node + remaining_capacity / remaining length / remaining prize to collect
step_context_dim = embedding_dim + 1
if self.is_pctsp:
node_dim = 4 # x, y, expected_prize, penalty
else:
node_dim = 3 # x, y, demand / prize
# Special embedding projection for depot node
self.init_embed_depot = nn.Linear(2, embedding_dim)
if self.is_vrp and self.allow_partial: # Need to include the demand if split delivery allowed
self.project_node_step = nn.Linear(1, 3 * embedding_dim, bias=False)
else: # TSP
assert problem.NAME == "tsp", "Unsupported problem: {}".format(problem.NAME)
step_context_dim = 2 * embedding_dim # Embedding of first and last node
node_dim = 2 # x, y
# Learned input symbols for first action
self.W_placeholder = nn.Parameter(torch.Tensor(2 * embedding_dim))
self.W_placeholder.data.uniform_(-1, 1) # Placeholder should be in range of activations
self.init_embed = nn.Linear(node_dim, embedding_dim)
self.embedder = GraphAttentionEncoder(
n_heads=n_heads,
embed_dim=embedding_dim,
n_layers=self.n_encode_layers,
normalization=normalization
)
# For each node we compute (glimpse key, glimpse value, logit key) so 3 * embedding_dim
self.project_node_embeddings = nn.Linear(embedding_dim, 3 * embedding_dim, bias=False)
self.project_fixed_context = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.project_step_context = nn.Linear(step_context_dim, embedding_dim, bias=False)
assert embedding_dim % n_heads == 0
# Note n_heads * val_dim == embedding_dim so input to project_out is embedding_dim
self.project_out = nn.Linear(embedding_dim, embedding_dim, bias=False)
def set_decode_type(self, decode_type, temp=None):
self.decode_type = decode_type
if temp is not None: # Do not change temperature if not provided
self.temp = temp
def forward(self, input, return_pi=False):
"""
:param input: (batch_size, graph_size, node_dim) input node features or dictionary with multiple tensors
:param return_pi: whether to return the output sequences, this is optional as it is not compatible with
using DataParallel as the results may be of different lengths on different GPUs
:return:
"""
original_input = input
input = self.prepare_input(input)
#if len(input.size()) == 4:
# input = input[:, 0, :, :]
if self.checkpoint_encoder and self.training: # Only checkpoint if we need gradients
embeddings, _ = checkpoint(self.embedder, self._init_embed(input))
else:
embeddings, _ = self.embedder(self._init_embed(input))
_log_p, pi = self._inner(input, embeddings)
cost, mask = self.problem.get_costs(original_input, pi)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
return cost, ll, pi
return cost, ll
def beam_search(self, *args, **kwargs):
return self.problem.beam_search(*args, **kwargs, model=self)
def precompute_fixed(self, input):
embeddings, _ = self.embedder(self._init_embed(input))
# Use a CachedLookup such that if we repeatedly index this object with the same index we only need to do
# the lookup once... this is the case if all elements in the batch have maximum batch size
return CachedLookup(self._precompute(embeddings))
def propose_expansions(self, beam, fixed, expand_size=None, normalize=False, max_calc_batch_size=4096):
# First dim = batch_size * cur_beam_size
log_p_topk, ind_topk = compute_in_batches(
lambda b: self._get_log_p_topk(fixed[b.ids], b.state, k=expand_size, normalize=normalize),
max_calc_batch_size, beam, n=beam.size()
)
assert log_p_topk.size(1) == 1, "Can only have single step"
# This will broadcast, calculate log_p (score) of expansions
score_expand = beam.score[:, None] + log_p_topk[:, 0, :]
# We flatten the action as we need to filter and this cannot be done in 2d
flat_action = ind_topk.view(-1)
flat_score = score_expand.view(-1)
flat_feas = flat_score > -1e10 # != -math.inf triggers
# Parent is row idx of ind_topk, can be found by enumerating elements and dividing by number of columns
flat_parent = torch.arange(flat_action.size(-1), out=flat_action.new()) // ind_topk.size(-1)
# Filter infeasible
feas_ind_2d = torch.nonzero(flat_feas)
if len(feas_ind_2d) == 0:
# Too bad, no feasible expansions at all :(
return None, None, None
feas_ind = feas_ind_2d[:, 0]
return flat_parent[feas_ind], flat_action[feas_ind], flat_score[feas_ind]
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def prepare_input(self, input):
if self.is_vrp or self.is_orienteering or self.is_pctsp:
if len(input['loc'].size()) == 4:
data = {
'loc' : input['loc'][:, 0, :, :],
'demand' : input['demand'],
'depot' : input['depot']
}
else: data = input
else: # TSP
if len(input.size()) == 4:
data = input[:, 0, :, :]
else: data = input
return data
def _init_embed(self, input):
if self.is_vrp or self.is_orienteering or self.is_pctsp:
if self.is_vrp:
features = ('demand', )
elif self.is_orienteering:
features = ('prize', )
else:
assert self.is_pctsp
features = ('deterministic_prize', 'penalty')
return torch.cat(
(
self.init_embed_depot(input['depot'])[:, None, :],
self.init_embed(torch.cat((
input['loc'],
*(input[feat][:, :, None] for feat in features)
), -1))
),
1
)
# TSP
return self.init_embed(input)
def _inner(self, input, embeddings):
outputs = []
sequences = []
state = self.problem.make_state(input)
# Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
fixed = self._precompute(embeddings)
batch_size = state.ids.size(0)
# Perform decoding steps
i = 0
while not (self.shrink_size is None and state.all_finished()):
if self.shrink_size is not None:
unfinished = torch.nonzero(state.get_finished() == 0)
if len(unfinished) == 0:
break
unfinished = unfinished[:, 0]
# Check if we can shrink by at least shrink_size and if this leaves at least 16
# (otherwise batch norm will not work well and it is inefficient anyway)
if 16 <= len(unfinished) <= state.ids.size(0) - self.shrink_size:
# Filter states
state = state[unfinished]
fixed = fixed[unfinished]
log_p, mask = self._get_log_p(fixed, state)
# Select the indices of the next nodes in the sequences, result (batch_size) long
selected = self._select_node(log_p.exp()[:, 0, :], mask[:, 0, :]) # Squeeze out steps dimension
state = state.update(selected)
# Now make log_p, selected desired output size by 'unshrinking'
if self.shrink_size is not None and state.ids.size(0) < batch_size:
log_p_, selected_ = log_p, selected
log_p = log_p_.new_zeros(batch_size, *log_p_.size()[1:])
selected = selected_.new_zeros(batch_size)
log_p[state.ids[:, 0]] = log_p_
selected[state.ids[:, 0]] = selected_
# Collect output of step
outputs.append(log_p[:, 0, :])
sequences.append(selected)
i += 1
# Collected lists, return Tensor
return torch.stack(outputs, 1), torch.stack(sequences, 1)
def sample_many(self, input, batch_rep=1, iter_rep=1):
"""
:param input: (batch_size, graph_size, node_dim) input node features
:return:
"""
# Bit ugly but we need to pass the embeddings as well.
# Making a tuple will not work with the problem.get_cost function
input = self.prepare_input(input)
return sample_many(
lambda input: self._inner(*input), # Need to unpack tuple into arguments
lambda input, pi: self.problem.get_costs(input[0], pi), # Don't need embeddings as input to get_costs
(input, self.embedder(self._init_embed(input))[0]), # Pack input with embeddings (additional input)
batch_rep, iter_rep
)
def _select_node(self, probs, mask):
assert (probs == probs).all(), "Probs should not contain any nans"
if self.decode_type == "greedy":
_, selected = probs.max(1)
assert not mask.gather(1, selected.unsqueeze(
-1)).data.any(), "Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
selected = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
# See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232
while mask.gather(1, selected.unsqueeze(-1)).data.any():
print('Sampled bad values, resampling!')
selected = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return selected
def _precompute(self, embeddings, num_steps=1):
# The fixed context projection of the graph embedding is calculated only once for efficiency
graph_embed = embeddings.mean(1)
# fixed context = (batch_size, 1, embed_dim) to make broadcastable with parallel timesteps
fixed_context = self.project_fixed_context(graph_embed)[:, None, :]
# The projection of the node embeddings for the attention is calculated once up front
glimpse_key_fixed, glimpse_val_fixed, logit_key_fixed = \
self.project_node_embeddings(embeddings[:, None, :, :]).chunk(3, dim=-1)
# No need to rearrange key for logit as there is a single head
fixed_attention_node_data = (
self._make_heads(glimpse_key_fixed, num_steps),
self._make_heads(glimpse_val_fixed, num_steps),
logit_key_fixed.contiguous()
)
return AttentionModelFixed(embeddings, fixed_context, *fixed_attention_node_data)
def _get_log_p_topk(self, fixed, state, k=None, normalize=True):
log_p, _ = self._get_log_p(fixed, state, normalize=normalize)
# Return topk
if k is not None and k < log_p.size(-1):
return log_p.topk(k, -1)
# Return all, note different from torch.topk this does not give error if less than k elements along dim
return (
log_p,
torch.arange(log_p.size(-1), device=log_p.device, dtype=torch.int64).repeat(log_p.size(0), 1)[:, None, :]
)
def _get_log_p(self, fixed, state, normalize=True):
# Compute query = context node embedding
query = fixed.context_node_projected + \
self.project_step_context(self._get_parallel_step_context(fixed.node_embeddings, state))
# Compute keys and values for the nodes
glimpse_K, glimpse_V, logit_K = self._get_attention_node_data(fixed, state)
# Compute the mask
mask = state.get_mask()
# Compute logits (unnormalized log_p)
log_p, glimpse = self._one_to_many_logits(query, glimpse_K, glimpse_V, logit_K, mask)
if normalize:
log_p = torch.log_softmax(log_p / self.temp, dim=-1)
assert not torch.isnan(log_p).any()
return log_p, mask
def _get_parallel_step_context(self, embeddings, state, from_depot=False):
"""
Returns the context per step, optionally for multiple steps at once (for efficient evaluation of the model)
:param embeddings: (batch_size, graph_size, embed_dim)
:param prev_a: (batch_size, num_steps)
:param first_a: Only used when num_steps = 1, action of first step or None if first step
:return: (batch_size, num_steps, context_dim)
"""
current_node = state.get_current_node()
batch_size, num_steps = current_node.size()
if self.is_vrp:
# Embedding of previous node + remaining capacity
if from_depot:
# 1st dimension is node idx, but we do not squeeze it since we want to insert step dimension
# i.e. we actually want embeddings[:, 0, :][:, None, :] which is equivalent
return torch.cat(
(
embeddings[:, 0:1, :].expand(batch_size, num_steps, embeddings.size(-1)),
# used capacity is 0 after visiting depot
self.problem.VEHICLE_CAPACITY - torch.zeros_like(state.used_capacity[:, :, None])
),
-1
)
else:
return torch.cat(
(
torch.gather(
embeddings,
1,
current_node.contiguous()
.view(batch_size, num_steps, 1)
.expand(batch_size, num_steps, embeddings.size(-1))
).view(batch_size, num_steps, embeddings.size(-1)),
self.problem.VEHICLE_CAPACITY - state.used_capacity[:, :, None]
),
-1
)
elif self.is_orienteering or self.is_pctsp:
return torch.cat(
(
torch.gather(
embeddings,
1,
current_node.contiguous()
.view(batch_size, num_steps, 1)
.expand(batch_size, num_steps, embeddings.size(-1))
).view(batch_size, num_steps, embeddings.size(-1)),
(
state.get_remaining_length()[:, :, None]
if self.is_orienteering
else state.get_remaining_prize_to_collect()[:, :, None]
)
),
-1
)
else: # TSP
if num_steps == 1: # We need to special case if we have only 1 step, may be the first or not
if state.i.item() == 0:
# First and only step, ignore prev_a (this is a placeholder)
return self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1))
else:
return embeddings.gather(
1,
torch.cat((state.first_a, current_node), 1)[:, :, None].expand(batch_size, 2, embeddings.size(-1))
).view(batch_size, 1, -1)
# More than one step, assume always starting with first
embeddings_per_step = embeddings.gather(
1,
current_node[:, 1:, None].expand(batch_size, num_steps - 1, embeddings.size(-1))
)
return torch.cat((
# First step placeholder, cat in dim 1 (time steps)
self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1)),
# Second step, concatenate embedding of first with embedding of current/previous (in dim 2, context dim)
torch.cat((
embeddings_per_step[:, 0:1, :].expand(batch_size, num_steps - 1, embeddings.size(-1)),
embeddings_per_step
), 2)
), 1)
def _one_to_many_logits(self, query, glimpse_K, glimpse_V, logit_K, mask):
batch_size, num_steps, embed_dim = query.size()
key_size = val_size = embed_dim // self.n_heads
# Compute the glimpse, rearrange dimensions so the dimensions are (n_heads, batch_size, num_steps, 1, key_size)
glimpse_Q = query.view(batch_size, num_steps, self.n_heads, 1, key_size).permute(2, 0, 1, 3, 4)
# Batch matrix multiplication to compute compatibilities (n_heads, batch_size, num_steps, graph_size)
compatibility = torch.matmul(glimpse_Q, glimpse_K.transpose(-2, -1)) / math.sqrt(glimpse_Q.size(-1))
if self.mask_inner:
assert self.mask_logits, "Cannot mask inner without masking logits"
compatibility[mask[None, :, :, None, :].expand_as(compatibility)] = -math.inf
# Batch matrix multiplication to compute heads (n_heads, batch_size, num_steps, val_size)
heads = torch.matmul(torch.softmax(compatibility, dim=-1), glimpse_V)
# Project to get glimpse/updated context node embedding (batch_size, num_steps, embedding_dim)
glimpse = self.project_out(
heads.permute(1, 2, 3, 0, 4).contiguous().view(-1, num_steps, 1, self.n_heads * val_size))
# Now projecting the glimpse is not needed since this can be absorbed into project_out
# final_Q = self.project_glimpse(glimpse)
final_Q = glimpse
# Batch matrix multiplication to compute logits (batch_size, num_steps, graph_size)
# logits = 'compatibility'
logits = torch.matmul(final_Q, logit_K.transpose(-2, -1)).squeeze(-2) / math.sqrt(final_Q.size(-1))
# From the logits compute the probabilities by clipping, masking and softmax
if self.tanh_clipping > 0:
logits = torch.tanh(logits) * self.tanh_clipping
if self.mask_logits:
logits[mask] = -math.inf
return logits, glimpse.squeeze(-2)
def _get_attention_node_data(self, fixed, state):
if self.is_vrp and self.allow_partial:
# Need to provide information of how much each node has already been served
# Clone demands as they are needed by the backprop whereas they are updated later
glimpse_key_step, glimpse_val_step, logit_key_step = \
self.project_node_step(state.demands_with_depot[:, :, :, None].clone()).chunk(3, dim=-1)
# Projection of concatenation is equivalent to addition of projections but this is more efficient
return (
fixed.glimpse_key + self._make_heads(glimpse_key_step),
fixed.glimpse_val + self._make_heads(glimpse_val_step),
fixed.logit_key + logit_key_step,
)
# TSP or VRP without split delivery
return fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key
def _make_heads(self, v, num_steps=None):
assert num_steps is None or v.size(1) == 1 or v.size(1) == num_steps
return (
v.contiguous().view(v.size(0), v.size(1), v.size(2), self.n_heads, -1)
.expand(v.size(0), v.size(1) if num_steps is None else num_steps, v.size(2), self.n_heads, -1)
.permute(3, 0, 1, 2, 4) # (n_heads, batch_size, num_steps, graph_size, head_dim)
)
| 23,184 | 41.855823 | 122 | py |
GTA-RL | GTA-RL-master/nets/graph_encoder.py | import torch
import numpy as np
from torch import nn
import math
class SkipConnection(nn.Module):
def __init__(self, module):
super(SkipConnection, self).__init__()
self.module = module
def forward(self, input):
return input + self.module(input)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:,:x.size(1),:]
return self.dropout(x)
class PositionalDecoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(PositionalDecoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x - self.pe[:,:x.size(1),:]
return self.dropout(x)
class StMultiHeadAttention(nn.Module):
def __init__(
self,
n_heads,
input_dim,
embed_dim,
val_dim=None,
key_dim=None,
is_vrp=False
):
super(StMultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.is_vrp = is_vrp
self.spatial_attention = MultiHeadAttention(n_heads, input_dim, embed_dim, val_dim, key_dim)
self.temporal_attention = MultiHeadAttention(n_heads, input_dim, embed_dim, val_dim, key_dim)
#self.positional_encode = PositionalEncoding(input_dim, dropout=0.0)
#self.positional_decode = PositionalDecoding(input_dim, dropout=0.0)
self.fixed_emb = torch.nn.Linear(input_dim, input_dim)
self.fushed = torch.nn.Linear(2*input_dim, input_dim)
def forward(self, q, h=None, mask=None):
if h is None:
h = q # compute self-attention #TODO
# h should be (batch_size, time, graph_size, input_dim)
batch_size, time, graph_size, input_dim = q.size()
shape_spatial = (batch_size, time, graph_size, input_dim)
shape_temporal = (batch_size, graph_size, time, input_dim)
spatial = q.contiguous().view(batch_size*time, graph_size, input_dim)
if self.is_vrp:
temporal = q[:,:,1:,:].transpose(1, 2).contiguous().view(batch_size * (graph_size-1), time, input_dim)
shape_temporal = (batch_size, graph_size-1, time, input_dim)
fixed_out = self.fixed_emb(q[:,:,0,:])
else:
temporal = q.transpose(1,2).contiguous().view(batch_size*graph_size, time, input_dim)
spatial_out = self.spatial_attention(spatial).view(shape_spatial)
temporal_out = self.temporal_attention(temporal).view(shape_temporal).transpose(1, 2)
if self.is_vrp:
temporal_out = torch.cat((fixed_out[:,:,None,:], temporal_out), 2)
emb_cat = self.fushed(torch.cat((spatial_out, temporal_out), dim=-1))
#emb_cat = torch.add(spatial_out, temporal_out)
fusion = torch.sigmoid(emb_cat)#TODO: Implement fusuion
return fusion
class MultiHeadAttention(nn.Module):
def __init__(
self,
n_heads,
input_dim,
embed_dim,
val_dim=None,
key_dim=None
):
super(MultiHeadAttention, self).__init__()
if val_dim is None:
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.norm_factor = 1 / math.sqrt(key_dim) # See Attention is all you need
self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))
self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1. / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q # compute self-attention
# h should be (batch_size, time, graph_size, input_dim)
if len(q.size()) == 4:
batch_size, time, graph_size, input_dim = q.size()
# h should be (batch_size, graph_size, input_dim)
else:
batch_size, graph_size, input_dim = q.size()
n_query = q.size(1)
assert q.size(0) == batch_size
#assert q.size(2) == input_dim
assert input_dim == self.input_dim, "Wrong embedding dimension of input"
hflat = h.contiguous().view(-1, input_dim)
qflat = q.contiguous().view(-1, input_dim)
# last dimension can be different for keys and values
shp = (self.n_heads, batch_size, graph_size, -1)
shp_q = (self.n_heads, batch_size, n_query, -1)
# Calculate queries, (n_heads, n_query, graph_size, key/val_size)
Q = torch.matmul(qflat, self.W_query).view(shp_q)
# Calculate keys and values (n_heads, batch_size, graph_size, key/val_size)
K = torch.matmul(hflat, self.W_key).view(shp)
V = torch.matmul(hflat, self.W_val).view(shp)
# Calculate compatibility (n_heads, batch_size, n_query, graph_size)
compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))
# Optionally apply mask to prevent attention
if mask is not None:
mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility)
compatibility[mask] = -np.inf
attn = torch.softmax(compatibility, dim=-1)
# If there are nodes with no neighbours then softmax returns nan so we fix them to 0
if mask is not None:
attnc = attn.clone()
attnc[mask] = 0
attn = attnc
heads = torch.matmul(attn, V)
out = torch.mm(
heads.permute(1, 2, 0, 3).contiguous().view(-1, self.n_heads * self.val_dim),
self.W_out.view(-1, self.embed_dim)
).view(batch_size, n_query, self.embed_dim)
# Alternative:
# headst = heads.transpose(0, 1) # swap the dimensions for batch and heads to align it for the matmul
# # proj_h = torch.einsum('bhni,hij->bhnj', headst, self.W_out)
# projected_heads = torch.matmul(headst, self.W_out)
# out = torch.sum(projected_heads, dim=1) # sum across heads
# Or:
# out = torch.einsum('hbni,hij->bnj', heads, self.W_out)
return out
class MultiHeadConv2DAttention(nn.Module):
def __init__(
self,
n_heads,
input_dim,
embed_dim,
elem_dim=20,
val_dim=None,
key_dim=None,
):
super(MultiHeadConv2DAttention, self).__init__()
if val_dim is None:
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.elem_dim = elem_dim
self.norm_factor = 1 / math.sqrt(key_dim) # See Attention is all you need
self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim*elem_dim, key_dim*elem_dim))
self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim*elem_dim, key_dim*elem_dim))
self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim*elem_dim, val_dim*elem_dim))
self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim*elem_dim, embed_dim*elem_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1. / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q # compute self-attention
# h should be (batch_size, time, graph_size, input_dim)
#if len(h.size()) == 4:
batch_size, time, graph_size, input_dim = h.size()
# h should be (batch_size, graph_size, input_dim)
#else:
# batch_size, graph_size, input_dim = h.size()
n_query = q.size(2)
assert q.size(0) == batch_size
assert q.size(3) == input_dim
assert input_dim == self.input_dim, "Wrong embedding dimension of input"
hflat = h.contiguous().view(batch_size*time, graph_size*input_dim)
qflat = q.contiguous().view(batch_size*time, graph_size*input_dim)
# last dimension can be different for keys and values
shp = (self.n_heads, batch_size, time, -1)
shp_q = (self.n_heads, batch_size, time, -1)
# Calculate queries, (n_heads, n_query, graph_size, key/val_size)
Q = torch.matmul(qflat, self.W_query).view(shp_q)
# Calculate keys and values (n_heads, batch_size, graph_size, key/val_size)
K = torch.matmul(hflat, self.W_key).view(shp)
V = torch.matmul(hflat, self.W_val).view(shp)
# Calculate compatibility (n_heads, batch_size, n_query, graph_size)
compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))
# Optionally apply mask to prevent attention
if mask is not None:
mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility)
compatibility[mask] = -np.inf
attn = torch.softmax(compatibility, dim=-1)
# If there are nodes with no neighbours then softmax returns nan so we fix them to 0
if mask is not None:
attnc = attn.clone()
attnc[mask] = 0
attn = attnc
heads = torch.matmul(attn, V)
out = torch.mm(
heads.permute(1, 2, 0, 3).contiguous().view(-1, self.n_heads * self.val_dim * self.elem_dim),
self.W_out.view(-1, self.embed_dim * self.elem_dim)
).view(batch_size, n_query, self.elem_dim, self.embed_dim)
# Alternative:
# headst = heads.transpose(0, 1) # swap the dimensions for batch and heads to align it for the matmul
# # proj_h = torch.einsum('bhni,hij->bhnj', headst, self.W_out)
# projected_heads = torch.matmul(headst, self.W_out)
# out = torch.sum(projected_heads, dim=1) # sum across heads
# Or:
# out = torch.einsum('hbni,hij->bnj', heads, self.W_out)
return out
class Normalization(nn.Module):
def __init__(self, embed_dim, normalization='batch'):
super(Normalization, self).__init__()
normalizer_class = {
'batch': nn.BatchNorm1d,
'instance': nn.InstanceNorm1d
}.get(normalization, None)
self.normalizer = normalizer_class(embed_dim, affine=True)
# Normalization by default initializes affine parameters with bias 0 and weight unif(0,1) which is too large!
# self.init_parameters()
def init_parameters(self):
for name, param in self.named_parameters():
stdv = 1. / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, input):
if isinstance(self.normalizer, nn.BatchNorm1d):
return self.normalizer(input.view(-1, input.size(-1))).view(*input.size())
elif isinstance(self.normalizer, nn.InstanceNorm1d):
return self.normalizer(input.permute(0, 2, 1)).permute(0, 2, 1)
else:
assert self.normalizer is None, "Unknown normalizer type"
return input
class MultiHeadAttentionLayer(nn.Sequential):
def __init__(
self,
n_heads,
embed_dim,
feed_forward_hidden=512,
normalization='batch',
st_attention=False,
is_vrp=False
):
super(MultiHeadAttentionLayer, self).__init__(
SkipConnection(
StMultiHeadAttention(
n_heads,
input_dim=embed_dim,
embed_dim=embed_dim,
is_vrp=is_vrp
) if st_attention else
MultiHeadAttention(
n_heads,
input_dim=embed_dim,
embed_dim=embed_dim
)
),
Normalization(embed_dim, normalization),
SkipConnection(
nn.Sequential(
nn.Linear(embed_dim, feed_forward_hidden),
nn.ReLU(),
nn.Linear(feed_forward_hidden, embed_dim)
) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)
),
Normalization(embed_dim, normalization)
)
class StMultiHeadAttentionLayer(nn.Sequential):
def __init__(
self,
n_heads,
embed_dim,
feed_forward_hidden=512,
normalization='batch',
):
super(StMultiHeadAttentionLayer, self).__init__(
SkipConnection(
StMultiHeadAttention(
n_heads,
input_dim=embed_dim,
embed_dim=embed_dim
)
),
Normalization(embed_dim, normalization),
SkipConnection(
nn.Sequential(
nn.Linear(embed_dim, feed_forward_hidden),
nn.ReLU(),
nn.Linear(feed_forward_hidden, embed_dim)
) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)
),
Normalization(embed_dim, normalization)
)
class GraphAttentionEncoder(nn.Module):
def __init__(
self,
n_heads,
embed_dim,
n_layers,
node_dim=None,
normalization='batch',
feed_forward_hidden=512,
st_attention=False,
is_vrp=False,
):
super(GraphAttentionEncoder, self).__init__()
# To map input to embedding space
self.init_embed = nn.Linear(node_dim, embed_dim) if node_dim is not None else None
self.is_st_attention = st_attention
self.positional_encode = PositionalEncoding(d_model=embed_dim, dropout=0.0)
self.layers = nn.Sequential(*(
MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization, st_attention, is_vrp)
for _ in range(n_layers)
))
def forward(self, x, mask=None):
assert mask is None, "TODO mask not yet supported!"
# Batch multiply to get initial embeddings of nodes
h = self.init_embed(x.view(-1, x.size(-1))).view(*x.size()[:-1], -1) if self.init_embed is not None else x
#if self.is_st_attention:
# batch_size, time, graph_size, input_dim = h.size()
# shape_temporal = (batch_size, graph_size, time, input_dim)
#
# h = h.transpose(1, 2).contiguous().view(batch_size * graph_size, time, input_dim)
# h = self.positional_encode(h).view(shape_temporal).transpose(1, 2).contiguous()
h = self.layers(h)
return (
h, # (batch_size, graph_size, embed_dim)
h.mean(dim=1), # average to get embedding of graph, (batch_size, embed_dim)
)
| 16,939 | 34.738397 | 117 | py |
GTA-RL | GTA-RL-master/nets/critic_network.py | from torch import nn
from nets.graph_encoder import GraphAttentionEncoder
class CriticNetwork(nn.Module):
def __init__(
self,
input_dim,
embedding_dim,
hidden_dim,
n_layers,
encoder_normalization,
st_attention
):
super(CriticNetwork, self).__init__()
self.hidden_dim = hidden_dim
self.encoder = GraphAttentionEncoder(
node_dim=input_dim,
n_heads=8,
embed_dim=embedding_dim,
n_layers=n_layers,
normalization=encoder_normalization,
st_attention=st_attention
)
self.value_head = nn.Sequential(
nn.Linear(embedding_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
:param inputs: (batch_size, graph_size, input_dim)
:return:
"""
_, graph_embeddings = self.encoder(inputs)
return self.value_head(graph_embeddings)
| 1,026 | 22.883721 | 58 | py |
GTA-RL | GTA-RL-master/problems/pctsp/state_pctsp.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
import torch.nn.functional as F
class StatePCTSP(NamedTuple):
# Fixed input
coords: torch.Tensor # Depot + loc
expected_prize: torch.Tensor
real_prize: torch.Tensor
penalty: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and prizes tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_total_prize: torch.Tensor
cur_total_penalty: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
if self.visited_.dtype == torch.uint8:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.coords.size(-2))
@property
def dist(self):
return (self.coords[:, :, None, :] - self.coords[:, None, :, :]).norm(p=2, dim=-1)
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice) # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_total_prize=self.cur_total_prize[key],
cur_total_penalty=self.cur_total_penalty[key],
cur_coord=self.cur_coord[key],
)
# Warning: cannot override len of NamedTuple, len should be number of fields, not batch size
# def __len__(self):
# return len(self.used_capacity)
@staticmethod
def initialize(input, visited_dtype=torch.uint8, stochastic=False):
depot = input['depot']
loc = input['loc']
# For both deterministic and stochastic variant, model sees only deterministic (expected) prize
expected_prize = input['deterministic_prize']
# This is the prize that is actually obtained at each node
real_prize = input['stochastic_prize' if stochastic else 'deterministic_prize']
penalty = input['penalty']
batch_size, n_loc, _ = loc.size()
coords = torch.cat((depot[:, None, :], loc), -2)
# For prize, prepend 0 (corresponding to depot) so we can gather efficiently
real_prize_with_depot = torch.cat((torch.zeros_like(real_prize[:, :1]), real_prize), -1)
penalty_with_depot = F.pad(penalty, (1, 0), mode='constant', value=0)
return StatePCTSP(
coords=coords,
expected_prize=expected_prize,
real_prize=real_prize_with_depot,
penalty=penalty_with_depot,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
visited_=( # Visited as mask is easier to understand, as long more memory efficient
# Keep visited_ with depot so we can scatter efficiently (if there is an action for depot)
torch.zeros(
batch_size, 1, n_loc + 1,
dtype=torch.uint8, device=loc.device
)
if visited_dtype == torch.uint8
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_total_prize=torch.zeros(batch_size, 1, device=loc.device),
cur_total_penalty=penalty.sum(-1)[:, None], # Sum penalties (all when nothing is visited), add step dim
cur_coord=input['depot'][:, None, :], # Add step dimension
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_remaining_prize_to_collect(self):
# returns the remaining prize to collect, or 0 if already collected the minimum (1.0)
return torch.clamp(1 - self.cur_total_prize, min=0)
def get_final_cost(self):
assert self.all_finished()
# assert self.visited_.
# We are at the depot so no need to add remaining distance
return self.lengths + self.cur_total_penalty
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
# Add the length
cur_coord = self.coords[self.ids, selected]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Add current total prize
cur_total_prize = self.cur_total_prize + self.real_prize[self.ids, selected]
cur_total_penalty = self.cur_total_penalty + self.penalty[self.ids, selected]
if self.visited_.dtype == torch.uint8:
# Note: here we do not subtract one as we have to scatter so the first column allows scattering depot
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
# This works, by check_unset=False it is allowed to set the depot visited a second a time
visited_ = mask_long_scatter(self.visited_, prev_a, check_unset=False)
return self._replace(
prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_total_prize=cur_total_prize, cur_total_penalty=cur_total_penalty, cur_coord=cur_coord,
i=self.i + 1
)
def all_finished(self):
# All must be returned to depot (and at least 1 step since at start also prev_a == 0)
# This is more efficient than checking the mask
return self.i.item() > 0 and (self.prev_a == 0).all()
# return self.visited[:, :, 0].all() # If we have visited the depot we're done
def get_current_node(self):
"""
Returns the current node where 0 is depot, 1...n are nodes
:return: (batch_size, num_steps) tensor with current nodes
"""
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
# Note: this always allows going to the depot, but that should always be suboptimal so be ok
# Cannot visit if already visited or if the depot has already been visited then we cannot visit anymore
visited_ = self.visited
mask = (
visited_ | visited_[:, :, 0:1]
)
# Cannot visit depot if not yet collected 1 total prize and there are unvisited nodes
mask[:, :, 0] = (self.cur_total_prize < 1.) & (visited_[:, :, 1:].int().sum(-1) < visited_[:, :, 1:].size(-1))
return mask > 0 # Hacky way to return bool or uint8 depending on pytorch version
def construct_solutions(self, actions):
return actions
| 7,409 | 43.107143 | 119 | py |
GTA-RL | GTA-RL-master/problems/pctsp/problem_pctsp.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.pctsp.state_pctsp import StatePCTSP
from utils.beam_search import beam_search
class PCTSP(object):
NAME = 'pctsp' # Prize Collecting TSP, without depot, with penalties
@staticmethod
def _get_costs(dataset, pi, stochastic=False):
if pi.size(-1) == 1: # In case all tours directly return to depot, prevent further problems
assert (pi == 0).all(), "If all length 1 tours, they should be zero"
# Return
return torch.zeros(pi.size(0), dtype=torch.float, device=pi.device), None
# Check that tours are valid, i.e. contain 0 to n -1
sorted_pi = pi.data.sort(1)[0]
# Make sure each node visited once at most (except for depot)
assert ((sorted_pi[:, 1:] == 0) | (sorted_pi[:, 1:] > sorted_pi[:, :-1])).all(), "Duplicates"
prize = dataset['stochastic_prize'] if stochastic else dataset['deterministic_prize']
prize_with_depot = torch.cat(
(
torch.zeros_like(prize[:, :1]),
prize
),
1
)
p = prize_with_depot.gather(1, pi)
# Either prize constraint should be satisfied or all prizes should be visited
assert (
(p.sum(-1) >= 1 - 1e-5) |
(sorted_pi.size(-1) - (sorted_pi == 0).int().sum(-1) == dataset['loc'].size(-2))
).all(), "Total prize does not satisfy min total prize"
penalty_with_depot = torch.cat(
(
torch.zeros_like(dataset['penalty'][:, :1]),
dataset['penalty']
),
1
)
pen = penalty_with_depot.gather(1, pi)
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
length = (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=-1).sum(1) # Prevent error if len 1 seq
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=-1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=-1) # Last to depot, will be 0 if depot is last
)
# We want to maximize total prize but code minimizes so return negative
# Incurred penalty cost is total penalty cost - saved penalty costs of nodes visited
return length + dataset['penalty'].sum(-1) - pen.sum(-1), None
@staticmethod
def make_dataset(*args, **kwargs):
return PCTSPDataset(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
# With beam search we always consider the deterministic case
state = PCTSPDet.make_state(
input, visited_dtype=torch.int64 if compress_mask else torch.uint8
)
return beam_search(state, beam_size, propose_expansions)
class PCTSPDet(PCTSP):
@staticmethod
def get_costs(dataset, pi):
return PCTSP._get_costs(dataset, pi, stochastic=False)
@staticmethod
def make_state(*args, **kwargs):
return StatePCTSP.initialize(*args, **kwargs, stochastic=False)
class PCTSPStoch(PCTSP):
# Stochastic variant of PCTSP, the real (stochastic) prize is only revealed when node is visited
@staticmethod
def get_costs(dataset, pi):
return PCTSP._get_costs(dataset, pi, stochastic=True)
@staticmethod
def make_state(*args, **kwargs):
return StatePCTSP.initialize(*args, **kwargs, stochastic=True)
def generate_instance(size, penalty_factor=3):
depot = torch.rand(2)
loc = torch.rand(size, 2)
# For the penalty to make sense it should be not too large (in which case all nodes will be visited) nor too small
# so we want the objective term to be approximately equal to the length of the tour, which we estimate with half
# of the nodes by half of the tour length (which is very rough but similar to op)
# This means that the sum of penalties for all nodes will be approximately equal to the tour length (on average)
# The expected total (uniform) penalty of half of the nodes (since approx half will be visited by the constraint)
# is (n / 2) / 2 = n / 4 so divide by this means multiply by 4 / n,
# However instead of 4 we use penalty_factor (3 works well) so we can make them larger or smaller
MAX_LENGTHS = {
20: 2.,
50: 3.,
100: 4.
}
penalty_max = MAX_LENGTHS[size] * (penalty_factor) / float(size)
penalty = torch.rand(size) * penalty_max
# Take uniform prizes
# Now expectation is 0.5 so expected total prize is n / 2, we want to force to visit approximately half of the nodes
# so the constraint will be that total prize >= (n / 2) / 2 = n / 4
# equivalently, we divide all prizes by n / 4 and the total prize should be >= 1
deterministic_prize = torch.rand(size) * 4 / float(size)
# In the deterministic setting, the stochastic_prize is not used and the deterministic prize is known
# In the stochastic setting, the deterministic prize is the expected prize and is known up front but the
# stochastic prize is only revealed once the node is visited
# Stochastic prize is between (0, 2 * expected_prize) such that E(stochastic prize) = E(deterministic_prize)
stochastic_prize = torch.rand(size) * deterministic_prize * 2
return {
'depot': depot,
'loc': loc,
'penalty': penalty,
'deterministic_prize': deterministic_prize,
'stochastic_prize': stochastic_prize
}
class PCTSPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution=None):
super(PCTSPDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [
{
'depot': torch.FloatTensor(depot),
'loc': torch.FloatTensor(loc),
'penalty': torch.FloatTensor(penalty),
'deterministic_prize': torch.FloatTensor(deterministic_prize),
'stochastic_prize': torch.tensor(stochastic_prize)
}
for depot, loc, penalty, deterministic_prize, stochastic_prize in (data[offset:offset+num_samples])
]
else:
self.data = [
generate_instance(size)
for i in range(num_samples)
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 7,214 | 38 | 120 | py |
GTA-RL | GTA-RL-master/problems/tsp/problem_tsp.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.tsp.state_tsp import StateTSP
from utils.beam_search import beam_search
class TSP(object):
NAME = 'tsp'
@staticmethod
def get_costs(dataset, pi):
# Check that tours are valid, i.e. contain 0 to n -1
assert (
torch.arange(pi.size(1), out=pi.data.new()).view(1, -1).expand_as(pi) ==
pi.data.sort(1)[0]
).all(), "Invalid tour"
# Gather dataset in order of tour
if len(dataset.size()) == 4:
d = dataset.gather(2, pi.unsqueeze(-1).unsqueeze(1).expand_as(dataset)).diagonal(dim1=1, dim2=2)
d = d.permute(0,2,1)
#dataset = dataset[:,0,:,:]
#d1 = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
else:
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
# Length is distance (L2-norm of difference) from each next location from its prev and of last from first
return (d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1), None
@staticmethod
def make_dataset(*args, **kwargs):
return TSPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTSP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096, dynamic=False):
assert model is not None, "Provide model"
if dynamic:
def propose_expansions(beam, fixed):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
return beam_search(dynamic, TSP, input, model, beam_size, propose_expansions)
else:
state = TSP.make_state(
input, visited_dtype=torch.int64 if compress_mask else torch.uint8
)
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
return beam_search(dynamic, state, beam_size, propose_expansions)
class TSPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution=None, is_dynamic=False):
super(TSPDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
if is_dynamic:
self.data = [self.get_dynamic_data(size, 0.1) for i in range(num_samples)]
else:
self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
def as_tensor(self):
return torch.stack(self.data, dim=0)
def get_dynamic_data(self, size, strength=0.01):
total_nodes = []
next = torch.FloatTensor(size, 2).uniform_(0, 1) # Create initial coordinates
for i in range(size):
total_nodes.append(next)
next = torch.clip(torch.add(next, torch.FloatTensor(size, 2).uniform_(-strength, strength))
, 0, 1) # Change the previous coordinates between 0 and 1
return torch.stack(total_nodes, dim=0)
class DTSP(TSP):
@staticmethod
def make_dataset(*args, **kwargs):
kwargs['is_dynamic'] = True
return TSPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTSP.initialize(*args, **kwargs)
| 4,123 | 32.803279 | 115 | py |
GTA-RL | GTA-RL-master/problems/tsp/tsp_baseline.py | import argparse
import numpy as np
import os
import time
from datetime import timedelta
from scipy.spatial import distance_matrix
from utils import run_all_in_pool
from utils.data_utils import check_extension, load_dataset, save_dataset
from subprocess import check_call, check_output, CalledProcessError
from problems.vrp.vrp_baseline import get_lkh_executable
import torch
from tqdm import tqdm
import re
def solve_gurobi(directory, name, loc, disable_cache=False, timeout=None, gap=None):
# Lazy import so we do not need to have gurobi installed to run this script
from problems.tsp.tsp_gurobi import solve_euclidian_tsp as solve_euclidian_tsp_gurobi
try:
problem_filename = os.path.join(directory, "{}.gurobi{}{}.pkl".format(
name, "" if timeout is None else "t{}".format(timeout), "" if gap is None else "gap{}".format(gap)))
if os.path.isfile(problem_filename) and not disable_cache:
(cost, tour, duration) = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
cost, tour = solve_euclidian_tsp_gurobi(loc, threads=1, timeout=timeout, gap=gap)
duration = time.time() - start # Measure clock time
save_dataset((cost, tour, duration), problem_filename)
# First and last node are depot(s), so first node is 2 but should be 1 (as depot is 0) so subtract 1
total_cost = calc_tsp_length(loc, tour)
assert abs(total_cost - cost) <= 1e-5, "Cost is incorrect"
return total_cost, tour, duration
except Exception as e:
# For some stupid reason, sometimes OR tools cannot find a feasible solution?
# By letting it fail we do not get total results, but we dcan retry by the caching mechanism
print("Exception occured")
print(e)
return None
def solve_concorde_log(executable, directory, name, loc, disable_cache=False):
problem_filename = os.path.join(directory, "{}.tsp".format(name))
tour_filename = os.path.join(directory, "{}.tour".format(name))
output_filename = os.path.join(directory, "{}.concorde.pkl".format(name))
log_filename = os.path.join(directory, "{}.log".format(name))
# if True:
try:
# May have already been run
if os.path.isfile(output_filename) and not disable_cache:
tour, duration = load_dataset(output_filename)
else:
write_tsplib(problem_filename, loc, name=name)
with open(log_filename, 'w') as f:
start = time.time()
try:
# Concorde is weird, will leave traces of solution in current directory so call from target dir
check_call([executable, '-s', '1234', '-x', '-o',
os.path.abspath(tour_filename), os.path.abspath(problem_filename)],
stdout=f, stderr=f, cwd=directory)
except CalledProcessError as e:
# Somehow Concorde returns 255
assert e.returncode == 255
duration = time.time() - start
tour = read_concorde_tour(tour_filename)
save_dataset((tour, duration), output_filename)
return calc_tsp_length(loc, tour), tour, duration
except Exception as e:
print("Exception occured")
print(e)
return None
def solve_lkh_log(executable, directory, name, loc, runs=1, disable_cache=False):
problem_filename = os.path.join(directory, "{}.lkh{}.vrp".format(name, runs))
tour_filename = os.path.join(directory, "{}.lkh{}.tour".format(name, runs))
output_filename = os.path.join(directory, "{}.lkh{}.pkl".format(name, runs))
param_filename = os.path.join(directory, "{}.lkh{}.par".format(name, runs))
log_filename = os.path.join(directory, "{}.lkh{}.log".format(name, runs))
try:
# May have already been run
if os.path.isfile(output_filename) and not disable_cache:
tour, duration = load_dataset(output_filename)
else:
write_tsplib(problem_filename, loc, name=name)
params = {"PROBLEM_FILE": problem_filename, "OUTPUT_TOUR_FILE": tour_filename, "RUNS": runs, "SEED": 1234}
write_lkh_par(param_filename, params)
with open(log_filename, 'w') as f:
start = time.time()
check_call([executable, param_filename], stdout=f, stderr=f)
duration = time.time() - start
tour = read_tsplib(tour_filename)
save_dataset((tour, duration), output_filename)
return calc_tsp_length(loc, tour), tour, duration
except Exception as e:
print("Exception occured")
print(e)
return None
def write_lkh_par(filename, parameters):
default_parameters = { # Use none to include as flag instead of kv
"MAX_TRIALS": 10000,
"RUNS": 10,
"TRACE_LEVEL": 1,
"SEED": 0
}
with open(filename, 'w') as f:
for k, v in {**default_parameters, **parameters}.items():
if v is None:
f.write("{}\n".format(k))
else:
f.write("{} = {}\n".format(k, v))
def write_tsplib(filename, loc, name="problem"):
with open(filename, 'w') as f:
f.write("\n".join([
"{} : {}".format(k, v)
for k, v in (
("NAME", name),
("TYPE", "TSP"),
("DIMENSION", len(loc)),
("EDGE_WEIGHT_TYPE", "EUC_2D"),
)
]))
f.write("\n")
f.write("NODE_COORD_SECTION\n")
f.write("\n".join([
"{}\t{}\t{}".format(i + 1, int(x * 10000000 + 0.5), int(y * 10000000 + 0.5)) # tsplib does not take floats
for i, (x, y) in enumerate(loc)
]))
f.write("\n")
f.write("EOF\n")
def read_concorde_tour(filename):
with open(filename, 'r') as f:
n = None
tour = []
for line in f:
if n is None:
n = int(line)
else:
tour.extend([int(node) for node in line.rstrip().split(" ")])
assert len(tour) == n, "Unexpected tour length"
return tour
def read_tsplib(filename):
with open(filename, 'r') as f:
tour = []
dimension = 0
started = False
for line in f:
if started:
loc = int(line)
if loc == -1:
break
tour.append(loc)
if line.startswith("DIMENSION"):
dimension = int(line.split(" ")[-1])
if line.startswith("TOUR_SECTION"):
started = True
assert len(tour) == dimension
tour = np.array(tour).astype(int) - 1 # Subtract 1 as depot is 1 and should be 0
return tour.tolist()
def calc_tsp_length(loc, tour):
assert len(np.unique(tour)) == len(tour), "Tour cannot contain duplicates"
assert len(tour) == len(loc)
sorted_locs = np.array(loc)[np.concatenate((tour, [tour[0]]))]
return np.linalg.norm(sorted_locs[1:] - sorted_locs[:-1], axis=-1).sum()
def _calc_insert_cost(D, prv, nxt, ins):
"""
Calculates insertion costs of inserting ins between prv and nxt
:param D: distance matrix
:param prv: node before inserted node, can be vector
:param nxt: node after inserted node, can be vector
:param ins: node to insert
:return:
"""
return (
D[prv, ins]
+ D[ins, nxt]
- D[prv, nxt]
)
def _calc_dynamic_insert_cost(D, prv, nxt, ins, time):
"""
Calculates insertion costs of inserting ins between prv and nxt
:param D: distance matrix
:param prv: node before inserted node, can be vector
:param nxt: node after inserted node, can be vector
:param ins: node to insert
:return:
"""
if time[0] == len(D)-1:
time_after = 0
else:
time_after = time[0]+1
time_before = time[0]-1
return (
D[time[0]][prv, ins]
+ D[time_after][ins, nxt]
- D[time[0]][ prv, nxt]
)
def run_insertion(loc, method):
n = len(loc)
D = distance_matrix(loc, loc)
mask = np.zeros(n, dtype=bool)
tour = [] # np.empty((0, ), dtype=int)
for i in range(n):
feas = mask == 0
feas_ind = np.flatnonzero(mask == 0)
if method == 'random':
# Order of instance is random so do in order for deterministic results
a = i
elif method == 'nearest':
if i == 0:
a = 0 # order does not matter so first is random
else:
a = feas_ind[D[np.ix_(feas, ~feas)].min(1).argmin()] # node nearest to any in tour
elif method == 'cheapest':
assert False, "Not yet implemented" # try all and find cheapest insertion cost
elif method == 'farthest':
if i == 0:
a = D.max(1).argmax() # Node with farthest distance to any other node
else:
a = feas_ind[D[np.ix_(feas, ~feas)].min(1).argmax()] # node which has closest node in tour farthest
mask[a] = True
if len(tour) == 0:
tour = [a]
else:
# Find index with least insert cost
ind_insert = np.argmin(
_calc_insert_cost(
D,
tour,
np.roll(tour, -1),
a
)
)
tour.insert(ind_insert + 1, a)
cost = D[tour, np.roll(tour, -1)].sum()
return cost, tour
def run_dynamic_insertion(loc, method):
n = len(loc[0])
D = distance_matrix(loc[0], loc[0])
mask = np.zeros(n, dtype=bool)
tour = [] # np.empty((0, ), dtype=int)
D = []
for i, val in enumerate(loc):
D.append(distance_matrix(loc[i], loc[i]))
for i in range(n):
#D = distance_matrix(loc[i], loc[i])
feas = mask == 0
feas_ind = np.flatnonzero(mask == 0)
if method == 'random':
# Order of instance is random so do in order for deterministic results
a = i
elif method == 'nearest':
if i == 0:
a = 0 # order does not matter so first is random
else:
a = feas_ind[D[np.ix_(feas, ~feas)].min(1).argmin()] # node nearest to any in tour
elif method == 'cheapest':
assert False, "Not yet implemented" # try all and find cheapest insertion cost
elif method == 'farthest':
if i == 0:
a = D[i].max(1).argmax() # Node with farthest distance to any other node
else:
a = feas_ind[D[i][np.ix_(feas, ~feas)].min(1).argmax()] # node which has closest node in tour farthest
mask[a] = True
if len(tour) == 0:
tour = [a]
else:
# Find index with least insert cost
index = [j for j in range(len(tour))]
ind_insert = np.argmin(
_calc_dynamic_insert_cost(
D,
tour,
np.roll(tour, -1),
a,
index
)
)
tour.insert(ind_insert + 1, a)
cost = 0
for i in range(n-1):
cost += D[i][tour[i]][tour[i+1]]
cost += D[n-1][tour[n-1]][tour[0]]
return cost, tour
def solve_insertion(directory, name, loc, method='random'):
start = time.time()
cost, tour = run_dynamic_insertion(loc, method)
duration = time.time() - start
return cost, tour, duration
def calc_batch_pdist(dataset):
if len(dataset.size()) == 4:
diff = (dataset[:, :, :, None, :] - dataset[:, :, None, :, :])
return torch.matmul(diff[:, :, :, :, None, :], diff[:, :, :, :, :, None]).squeeze(-1).squeeze(-1).sqrt()
else:
diff = (dataset[:, :, None, :] - dataset[:, None, :, :])
return torch.matmul(diff[:, :, :, None, :], diff[:, :, :, :, None]).squeeze(-1).squeeze(-1).sqrt()
def nearest_neighbour(dataset, start='first'):
dist = calc_batch_pdist(dataset)
batch_size, graph_size, _ = dataset.size()
total_dist = dataset.new(batch_size).zero_()
if not isinstance(start, torch.Tensor):
if start == 'random':
start = dataset.new().long().new(batch_size).zero_().random_(0, graph_size)
elif start == 'first':
start = dataset.new().long().new(batch_size).zero_()
elif start == 'center':
_, start = dist.mean(2).min(1) # Minimum total distance to others
else:
assert False, "Unknown start: {}".format(start)
current = start
dist_to_startnode = torch.gather(dist, 2, current.view(-1, 1, 1).expand(batch_size, graph_size, 1)).squeeze(2)
tour = [current]
for i in range(graph_size - 1):
# Mark out current node as option
dist.scatter_(2, current.view(-1, 1, 1).expand(batch_size, graph_size, 1), np.inf)
nn_dist = torch.gather(dist, 1, current.view(-1, 1, 1).expand(batch_size, 1, graph_size)).squeeze(1)
min_nn_dist, current = nn_dist.min(1)
total_dist += min_nn_dist
tour.append(current)
total_dist += torch.gather(dist_to_startnode, 1, current.view(-1, 1)).squeeze(1)
return total_dist, torch.stack(tour, dim=1)
def dynamic_nearest_neighbour(dataset, start='center'):
dist_all = calc_batch_pdist(dataset)
batch_size, time, graph_size, _ = dataset.size()
total_dist = dataset.new(batch_size).zero_()
dist = dist_all[:, 0, :, :]
if not isinstance(start, torch.Tensor):
if start == 'random':
start = dataset[:, 0, :, :].new().long().new(batch_size).zero_().random_(0, graph_size)
elif start == 'first':
start = dataset[:, 0, :, :].new().long().new(batch_size).zero_()
elif start == 'center':
_, start = dist.mean(2).min(1) # Minimum total distance to others
else:
assert False, "Unknown start: {}".format(start)
current = start
dist_to_startnode = torch.gather(dist, 2, current.view(-1, 1, 1).expand(batch_size, graph_size, 1)).squeeze(2)
tour = [current]
for i in range(graph_size - 1):
# Mark out current node as option
dist_all.scatter_(3, current.view(-1, 1, 1, 1).expand(batch_size, time, graph_size, 1), np.inf)
dist = dist_all[:, i, :, :]
nn_dist = torch.gather(dist, 1, current.view(-1, 1, 1).expand(batch_size, 1, graph_size)).squeeze(1)
min_nn_dist, current = nn_dist.min(1)
total_dist += min_nn_dist
tour.append(current)
total_dist += torch.gather(dist_to_startnode, 1, current.view(-1, 1)).squeeze(1)
return total_dist, torch.stack(tour, dim=1)
def solve_all_nn(dataset_path, eval_batch_size=1024, no_cuda=False, dataset_n=None, progress_bar_mininterval=0.1):
import torch
from torch.utils.data import DataLoader
from problems import TSP
from utils import move_to
dataloader = DataLoader(
TSP.make_dataset(filename=dataset_path, num_samples=dataset_n if dataset_n is not None else 1000000),
batch_size=eval_batch_size
)
device = torch.device("cuda:0" if torch.cuda.is_available() and not no_cuda else "cpu")
results = []
for batch in tqdm(dataloader, mininterval=progress_bar_mininterval):
start = time.time()
batch = move_to(batch, device)
lengths, tours = dynamic_nearest_neighbour(batch)
lengths_check, _ = TSP.get_costs(batch, tours)
#assert (torch.abs(lengths - lengths_check.data) < 1e-5).all()
duration = time.time() - start
results.extend(
[(cost.item(), np.trim_zeros(pi.cpu().numpy(), 'b'), duration) for cost, pi in zip(lengths_check, tours)])
return results, eval_batch_size
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--method",
help="Name of the method to evaluate, 'nn', 'gurobi' or '(nearest|random|farthest)_insertion'")
parser.add_argument("--datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument("--cpus", type=int, help="Number of CPUs to use, defaults to all cores")
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA (only for Tsiligirides)')
parser.add_argument('--disable_cache', action='store_true', help='Disable caching')
parser.add_argument('--max_calc_batch_size', type=int, default=1000, help='Size for subbatches')
parser.add_argument('--progress_bar_mininterval', type=float, default=0.1, help='Minimum interval')
parser.add_argument('-n', type=int, help="Number of instances to process")
parser.add_argument('--offset', type=int, help="Offset where to start processing")
parser.add_argument('--results_dir', default='results', help="Name of results directory")
opts = parser.parse_args()
opts.method = 'nn'
opts.datasets = ["../../data/dynamic_tsp/dynamic_tsp20_validation_seed4321.pkl"]
#opts.datasets = ["../../data/tsp/tsp20_test_seed1234.pkl"]
opts.f = True
assert opts.o is None or len(opts.datasets) == 1, "Cannot specify result filename with more than one dataset"
for dataset_path in opts.datasets:
assert os.path.isfile(check_extension(dataset_path)), "File does not exist!"
dataset_basename, ext = os.path.splitext(os.path.split(dataset_path)[-1])
if opts.o is None:
results_dir = os.path.join(opts.results_dir, "tsp", dataset_basename)
os.makedirs(results_dir, exist_ok=True)
out_file = os.path.join(results_dir, "{}{}{}-{}{}".format(
dataset_basename,
"offs{}".format(opts.offset) if opts.offset is not None else "",
"n{}".format(opts.n) if opts.n is not None else "",
opts.method, ext
))
else:
out_file = opts.o
assert opts.f or not os.path.isfile(
out_file), "File already exists! Try running with -f option to overwrite."
match = re.match(r'^([a-z_]+)(\d*)$', opts.method)
assert match
method = match[1]
runs = 1 if match[2] == '' else int(match[2])
if method == "nn":
assert opts.offset is None, "Offset not supported for nearest neighbor"
eval_batch_size = opts.max_calc_batch_size
results, parallelism = solve_all_nn(
dataset_path, eval_batch_size, opts.no_cuda, opts.n,
opts.progress_bar_mininterval
)
elif method in ("gurobi", "gurobigap", "gurobit", "concorde", "lkh") or method[-9:] == 'insertion':
target_dir = os.path.join(results_dir, "{}-{}".format(
dataset_basename,
opts.method
))
assert opts.f or not os.path.isdir(target_dir), \
"Target dir already exists! Try running with -f option to overwrite."
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
# TSP contains single loc array rather than tuple
dataset = [(instance, ) for instance in load_dataset(dataset_path)]
if method == "concorde":
use_multiprocessing = False
executable = os.path.abspath(os.path.join('problems', 'tsp', 'concorde', 'concorde', 'TSP', 'concorde'))
def run_func(args):
return solve_concorde_log(executable, *args, disable_cache=opts.disable_cache)
elif method == "lkh":
use_multiprocessing = False
executable = get_lkh_executable()
def run_func(args):
return solve_lkh_log(executable, *args, runs=runs, disable_cache=opts.disable_cache)
elif method[:6] == "gurobi":
use_multiprocessing = True # We run one thread per instance
def run_func(args):
return solve_gurobi(*args, disable_cache=opts.disable_cache,
timeout=runs if method[6:] == "t" else None,
gap=float(runs) if method[6:] == "gap" else None)
else:
assert method[-9:] == "insertion"
use_multiprocessing = True
def run_func(args):
return solve_insertion(*args, opts.method.split("_")[0])
results, parallelism = run_all_in_pool(
run_func,
target_dir, dataset, opts, use_multiprocessing=use_multiprocessing
)
else:
assert False, "Unknown method: {}".format(opts.method)
costs, tours, durations = zip(*results) # Not really costs since they should be negative
print("Average cost: {} +- {}".format(np.mean(costs), 2 * np.std(costs) / np.sqrt(len(costs))))
print("Average serial duration: {} +- {}".format(
np.mean(durations), 2 * np.std(durations) / np.sqrt(len(durations))))
print("Average parallel duration: {}".format(np.mean(durations) / parallelism))
print("Calculated total duration: {}".format(timedelta(seconds=int(np.sum(durations) / parallelism))))
save_dataset((results, parallelism), out_file)
| 21,577 | 36.789842 | 120 | py |
GTA-RL | GTA-RL-master/problems/tsp/state_tsp.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
class StateTSP(NamedTuple):
# Fixed input
loc: torch.Tensor
dist: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the loc and dist tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
first_a: torch.Tensor
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
if self.visited_.dtype == torch.uint8:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.loc.size(-2))
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice) # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
first_a=self.first_a[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key] if self.cur_coord is not None else None,
)
@staticmethod
def initialize(input, visited_dtype=torch.uint8, index=-1):
if index != -1:
loc = input[:, index, :, :]
else:
loc = input
batch_size, n_loc, _ = loc.size()
prev_a = torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device)
return StateTSP(
loc=loc,
dist=(loc[:, :, None, :] - loc[:, None, :, :]).norm(p=2, dim=-1),
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
first_a=prev_a,
prev_a=prev_a,
# Keep visited with depot so we can scatter efficiently (if there is an action for depot)
visited_=( # Visited as mask is easier to understand, as long more memory efficient
torch.zeros(
batch_size, 1, n_loc,
dtype=torch.uint8, device=loc.device
)
if visited_dtype == torch.uint8
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=None,
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def update_state(self, input, index=-1):
if index != -1:
loc = input[:, index, :, :]
else:
loc = input
return self._replace(loc=loc,
dist=(loc[:, :, None, :] - loc[:, None, :, :]).norm(p=2, dim=-1))
def get_final_cost(self):
assert self.all_finished()
# assert self.visited_.
return self.lengths + (self.loc[self.ids, self.first_a, :] - self.cur_coord).norm(p=2, dim=-1)
def update(self, selected):
# Update the state
prev_a = selected[:, None] # Add dimension for step
# Add the length
# cur_coord = self.loc.gather(
# 1,
# selected[:, None, None].expand(selected.size(0), 1, self.loc.size(-1))
# )[:, 0, :]
cur_coord = self.loc[self.ids, prev_a]
lengths = self.lengths
if self.cur_coord is not None: # Don't add length for first action (selection of start node)
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Update should only be called with just 1 parallel step, in which case we can check this way if we should update
first_a = prev_a if self.i.item() == 0 else self.first_a
if self.visited_.dtype == torch.uint8:
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
visited_ = mask_long_scatter(self.visited_, prev_a)
return self._replace(first_a=first_a, prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1)
def all_finished(self):
# Exactly n steps
return self.i.item() >= self.loc.size(-2)
def get_current_node(self):
return self.prev_a
def get_mask(self):
return self.visited > 0 # Hacky way to return bool or uint8 depending on pytorch version
def get_nn(self, k=None):
# Insert step dimension
# Nodes already visited get inf so they do not make it
if k is None:
k = self.loc.size(-2) - self.i.item() # Number of remaining
return (self.dist[self.ids, :, :] + self.visited.float()[:, :, None, :] * 1e6).topk(k, dim=-1, largest=False)[1]
def get_nn_current(self, k=None):
assert False, "Currently not implemented, look into which neighbours to use in step 0?"
# Note: if this is called in step 0, it will have k nearest neighbours to node 0, which may not be desired
# so it is probably better to use k = None in the first iteration
if k is None:
k = self.loc.size(-2)
k = min(k, self.loc.size(-2) - self.i.item()) # Number of remaining
return (
self.dist[
self.ids,
self.prev_a
] +
self.visited.float() * 1e6
).topk(k, dim=-1, largest=False)[1]
def construct_solutions(self, actions):
return actions
| 5,751 | 37.346667 | 121 | py |
GTA-RL | GTA-RL-master/problems/vrp/problem_vrp.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.vrp.state_cvrp import StateCVRP
from problems.vrp.state_sdvrp import StateSDVRP
from utils.beam_search import beam_search
class CVRP(object):
NAME = 'cvrp' # Capacitated Vehicle Routing Problem
VEHICLE_CAPACITY = 1.0 # (w.l.o.g. vehicle capacity is 1, demands should be scaled)
@staticmethod
def get_costs(dataset, pi):
batch_size, graph_size = dataset['demand'].size()
# Check that tours are valid, i.e. contain 0 to n -1
sorted_pi = pi.data.sort(1)[0]
# Sorting it should give all zeros at front and then 1...n
assert (
torch.arange(1, graph_size + 1, out=pi.data.new()).view(1, -1).expand(batch_size, graph_size) ==
sorted_pi[:, -graph_size:]
).all() and (sorted_pi[:, :-graph_size] == 0).all(), "Invalid tour"
# Visiting depot resets capacity so we add demand = -capacity (we make sure it does not become negative)
demand_with_depot = torch.cat(
(
torch.full_like(dataset['demand'][:, :1], -CVRP.VEHICLE_CAPACITY),
dataset['demand']
),
1
)
d = demand_with_depot.gather(1, pi)
used_cap = torch.zeros_like(dataset['demand'][:, 0])
for i in range(pi.size(1)):
used_cap += d[:, i] # This will reset/make capacity negative if i == 0, e.g. depot visited
# Cannot use less than 0
used_cap[used_cap < 0] = 0
assert (used_cap <= CVRP.VEHICLE_CAPACITY + 1e-5).all(), "Used more than capacity"
# Gather dataset in order of tour
if len(dataset['loc'].size()) == 4:
batch, time, nodes, coords = dataset['loc'].size()
loc_with_depot = torch.cat((dataset['depot'][:, None, None, :].expand((batch, time, 1, coords)), dataset['loc']), 2)
pi_ordered = pi[:, None, :, None].expand((-1, time, -1, coords))
d = loc_with_depot.gather(2, pi_ordered).diagonal(dim1=1, dim2=2)
d = d.permute(0, 2, 1)
else:
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
# Length is distance (L2-norm of difference) of each next location to its prev and of first and last to depot
return (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1)
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=1) # Last to depot, will be 0 if depot is last
), None
@staticmethod
def make_dataset(*args, **kwargs):
return VRPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateCVRP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096, dynamic=False):
assert model is not None, "Provide model"
if dynamic:
def propose_expansions(beam, fixed):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
return beam_search(dynamic, CVRP, input, model, beam_size, propose_expansions)
else:
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = CVRP.make_state(
input, visited_dtype=torch.int64 if compress_mask else torch.uint8
)
return beam_search(state, beam_size, propose_expansions)
class SDVRP(object):
NAME = 'sdvrp' # Split Delivery Vehicle Routing Problem
VEHICLE_CAPACITY = 1.0 # (w.l.o.g. vehicle capacity is 1, demands should be scaled)
@staticmethod
def get_costs(dataset, pi):
batch_size, graph_size = dataset['demand'].size()
# Each node can be visited multiple times, but we always deliver as much demand as possible
# We check that at the end all demand has been satisfied
demands = torch.cat(
(
torch.full_like(dataset['demand'][:, :1], -SDVRP.VEHICLE_CAPACITY),
dataset['demand']
),
1
)
rng = torch.arange(batch_size, out=demands.data.new().long())
used_cap = torch.zeros_like(dataset['demand'][:, 0])
a_prev = None
for a in pi.transpose(0, 1):
assert a_prev is None or (demands[((a_prev == 0) & (a == 0)), :] == 0).all(), \
"Cannot visit depot twice if any nonzero demand"
d = torch.min(demands[rng, a], SDVRP.VEHICLE_CAPACITY - used_cap)
demands[rng, a] -= d
used_cap += d
used_cap[a == 0] = 0
a_prev = a
assert (demands == 0).all(), "All demand must be satisfied"
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
# Length is distance (L2-norm of difference) of each next location to its prev and of first and last to depot
return (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1)
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=1) # Last to depot, will be 0 if depot is last
), None
@staticmethod
def make_dataset(*args, **kwargs):
return VRPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateSDVRP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
assert not compress_mask, "SDVRP does not support compression of the mask"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = SDVRP.make_state(input)
return beam_search(state, beam_size, propose_expansions)
def make_instance(args):
depot, loc, demand, capacity, *args = args
grid_size = 1
if len(args) > 0:
depot_types, customer_types, grid_size = args
return {
'loc': torch.tensor(loc, dtype=torch.float) / grid_size,
'demand': torch.tensor(demand, dtype=torch.float) / capacity,
'depot': torch.tensor(depot, dtype=torch.float) / grid_size
}
class VRPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution=None, is_dynamic = False):
super(VRPDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [make_instance(args) for args in data[offset:offset+num_samples]]
else:
# From VRP with RL paper https://arxiv.org/abs/1802.04240
CAPACITIES = {
10: 20.,
20: 30.,
40: 35.,
50: 40.,
100: 50.
}
if is_dynamic:
self.data = [
{
'loc': self.get_dynamic_data(size, strength=0.1),
# Uniform 1 - 9, scaled by capacities
'demand': (torch.FloatTensor(size).uniform_(0, 9).int() + 1).float() / CAPACITIES[size],
'depot': torch.FloatTensor(2).uniform_(0, 1)
}
for i in range(num_samples)
]
else:
self.data = [
{
'loc': torch.FloatTensor(size, 2).uniform_(0, 1),
# Uniform 1 - 9, scaled by capacities
'demand': (torch.FloatTensor(size).uniform_(0, 9).int() + 1).float() / CAPACITIES[size],
'depot': torch.FloatTensor(2).uniform_(0, 1)
}
for i in range(num_samples)
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
def get_dynamic_data(self, size, strength=0.01):
total_nodes = []
next = torch.FloatTensor(size, 2).uniform_(0, 1) # Create initial coordinates
for i in range(size*2):
total_nodes.append(next)
next = torch.clip(torch.add(next, torch.FloatTensor(size, 2).uniform_(-strength, strength))
, 0, 1) # Change the previous coordinates between 0 and 1
return torch.stack(total_nodes, dim=0)
class DCVRP(CVRP):
@staticmethod
def make_dataset(*args, **kwargs):
kwargs['is_dynamic'] = True
return VRPDataset(*args, **kwargs)
| 9,547 | 37.345382 | 128 | py |
GTA-RL | GTA-RL-master/problems/vrp/state_sdvrp.py | import torch
from typing import NamedTuple
class StateSDVRP(NamedTuple):
# Fixed input
coords: torch.Tensor
demand: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and demands tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
used_capacity: torch.Tensor
demands_with_depot: torch.Tensor # Keeps track of remaining demands
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
VEHICLE_CAPACITY = 1.0 # Hardcoded
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice) # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
used_capacity=self.used_capacity[key],
demands_with_depot=self.demands_with_depot[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key],
)
@staticmethod
def initialize(input):
depot = input['depot']
loc = input['loc']
demand = input['demand']
batch_size, n_loc, _ = loc.size()
return StateSDVRP(
coords=torch.cat((depot[:, None, :], loc), -2),
demand=demand,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
used_capacity=demand.new_zeros(batch_size, 1),
demands_with_depot=torch.cat((
demand.new_zeros(batch_size, 1),
demand[:, :]
), 1)[:, None, :],
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=input['depot'][:, None, :], # Add step dimension
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_final_cost(self):
assert self.all_finished()
return self.lengths + (self.coords[self.ids, 0, :] - self.cur_coord).norm(p=2, dim=-1)
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
# Add the length
cur_coord = self.coords[self.ids, selected]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Not selected_demand is demand of first node (by clamp) so incorrect for nodes that visit depot!
selected_demand = self.demands_with_depot.gather(-1, prev_a[:, :, None])[:, :, 0]
delivered_demand = torch.min(selected_demand, self.VEHICLE_CAPACITY - self.used_capacity)
# Increase capacity if depot is not visited, otherwise set to 0
#used_capacity = torch.where(selected == 0, 0, self.used_capacity + delivered_demand)
used_capacity = (self.used_capacity + delivered_demand) * (prev_a != 0).float()
# demands_with_depot = demands_with_depot.clone()[:, 0, :]
# Add one dimension since we write a single value
demands_with_depot = self.demands_with_depot.scatter(
-1,
prev_a[:, :, None],
self.demands_with_depot.gather(-1, prev_a[:, :, None]) - delivered_demand[:, :, None]
)
return self._replace(
prev_a=prev_a, used_capacity=used_capacity, demands_with_depot=demands_with_depot,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1
)
def all_finished(self):
return self.i.item() >= self.demands_with_depot.size(-1) and not (self.demands_with_depot > 0).any()
def get_current_node(self):
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
# Nodes that cannot be visited are already visited or too much demand to be served now
mask_loc = (self.demands_with_depot[:, :, 1:] == 0) | (self.used_capacity[:, :, None] >= self.VEHICLE_CAPACITY)
# Cannot visit the depot if just visited and still unserved nodes
mask_depot = (self.prev_a == 0) & ((mask_loc == 0).int().sum(-1) > 0)
return torch.cat((mask_depot[:, :, None], mask_loc), -1)
def construct_solutions(self, actions):
return actions
| 4,821 | 39.183333 | 119 | py |
GTA-RL | GTA-RL-master/problems/vrp/state_cvrp.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
class StateCVRP(NamedTuple):
# Fixed input
coords: torch.Tensor # Depot + loc
demand: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and demands tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
used_capacity: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
VEHICLE_CAPACITY = 1.0 # Hardcoded
@property
def visited(self):
if self.visited_.dtype == torch.uint8:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.demand.size(-1))
@property
def dist(self):
return (self.coords[:, :, None, :] - self.coords[:, None, :, :]).norm(p=2, dim=-1)
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice) # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
used_capacity=self.used_capacity[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key],
)
# Warning: cannot override len of NamedTuple, len should be number of fields, not batch size
# def __len__(self):
# return len(self.used_capacity)
@staticmethod
def initialize(input, visited_dtype=torch.uint8, index=-1):
depot = input['depot']
if index != -1:
loc = input['loc'][:,index,:,:]
else:
loc = input['loc']
demand = input['demand']
batch_size, n_loc, _ = loc.size()
return StateCVRP(
coords=torch.cat((depot[:, None, :], loc), -2),
demand=demand,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
used_capacity=demand.new_zeros(batch_size, 1),
visited_=( # Visited as mask is easier to understand, as long more memory efficient
# Keep visited_ with depot so we can scatter efficiently
torch.zeros(
batch_size, 1, n_loc + 1,
dtype=torch.uint8, device=loc.device
)
if visited_dtype == torch.uint8
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=input['depot'][:, None, :], # Add step dimension
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_final_cost(self):
assert self.all_finished()
return self.lengths + (self.coords[self.ids, 0, :] - self.cur_coord).norm(p=2, dim=-1)
def update_state(self, input, index=-1):
depot = input['depot']
if index != -1:
loc = input['loc'][:,index,:,:]
else:
loc = input['loc']
return self._replace(coords=torch.cat((depot[:, None, :], loc), -2))
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
n_loc = self.demand.size(-1) # Excludes depot
# Add the length
cur_coord = self.coords[self.ids, selected]
# cur_coord = self.coords.gather(
# 1,
# selected[:, None].expand(selected.size(0), 1, self.coords.size(-1))
# )[:, 0, :]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Not selected_demand is demand of first node (by clamp) so incorrect for nodes that visit depot!
#selected_demand = self.demand.gather(-1, torch.clamp(prev_a - 1, 0, n_loc - 1))
selected_demand = self.demand[self.ids, torch.clamp(prev_a - 1, 0, n_loc - 1)]
# Increase capacity if depot is not visited, otherwise set to 0
#used_capacity = torch.where(selected == 0, 0, self.used_capacity + selected_demand)
used_capacity = (self.used_capacity + selected_demand) * (prev_a != 0).float()
if self.visited_.dtype == torch.uint8:
# Note: here we do not subtract one as we have to scatter so the first column allows scattering depot
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
# This works, will not set anything if prev_a -1 == -1 (depot)
visited_ = mask_long_scatter(self.visited_, prev_a - 1)
return self._replace(
prev_a=prev_a, used_capacity=used_capacity, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1
)
def all_finished(self):
return self.i.item() >= self.demand.size(-1) and self.visited.all()
def get_finished(self):
return self.visited.sum(-1) == self.visited.size(-1)
def get_current_node(self):
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
if self.visited_.dtype == torch.uint8:
visited_loc = self.visited_[:, :, 1:]
else:
visited_loc = mask_long2bool(self.visited_, n=self.demand.size(-1))
# For demand steps_dim is inserted by indexing with id, for used_capacity insert node dim for broadcasting
exceeds_cap = (self.demand[self.ids, :] + self.used_capacity[:, :, None] > self.VEHICLE_CAPACITY)
# Nodes that cannot be visited are already visited or too much demand to be served now
mask_loc = visited_loc.to(exceeds_cap.dtype) | exceeds_cap
# Cannot visit the depot if just visited and still unserved nodes
mask_depot = (self.prev_a == 0) & ((mask_loc == 0).int().sum(-1) > 0)
return torch.cat((mask_depot[:, :, None], mask_loc), -1)
def construct_solutions(self, actions):
return actions
| 6,807 | 39.766467 | 118 | py |
GTA-RL | GTA-RL-master/problems/op/op_baseline.py | import argparse
import os
import numpy as np
from utils import run_all_in_pool
from utils.data_utils import check_extension, load_dataset, save_dataset
from subprocess import check_call, check_output
import tempfile
import time
from datetime import timedelta
from problems.op.opga.opevo import run_alg as run_opga_alg
from tqdm import tqdm
import re
MAX_LENGTH_TOL = 1e-5
# Run install_compass.sh to install
def solve_compass(executable, depot, loc, demand, capacity):
with tempfile.TemporaryDirectory() as tempdir:
problem_filename = os.path.join(tempdir, "problem.oplib")
output_filename = os.path.join(tempdir, "output.tour")
param_filename = os.path.join(tempdir, "params.par")
starttime = time.time()
write_oplib(problem_filename, depot, loc, demand, capacity)
params = {"PROBLEM_FILE": problem_filename, "OUTPUT_TOUR_FILE": output_filename}
write_compass_par(param_filename, params)
output = check_output([executable, param_filename])
result = read_oplib(output_filename, n=len(demand))
duration = time.time() - starttime
return result, output, duration
def solve_compass_log(executable, directory, name, depot, loc, prize, max_length, disable_cache=False):
problem_filename = os.path.join(directory, "{}.oplib".format(name))
tour_filename = os.path.join(directory, "{}.tour".format(name))
output_filename = os.path.join(directory, "{}.compass.pkl".format(name))
log_filename = os.path.join(directory, "{}.log".format(name))
try:
# May have already been run
if os.path.isfile(output_filename) and not disable_cache:
tour, duration = load_dataset(output_filename)
else:
write_oplib(problem_filename, depot, loc, prize, max_length, name=name)
with open(log_filename, 'w') as f:
start = time.time()
check_call([executable, '--op', '--op-ea4op', problem_filename, '-o', tour_filename],
stdout=f, stderr=f)
duration = time.time() - start
tour = read_oplib(tour_filename, n=len(prize))
if not calc_op_length(depot, loc, tour) <= max_length:
print("Warning: length exceeds max length:", calc_op_length(depot, loc, tour), max_length)
assert calc_op_length(depot, loc, tour) <= max_length + MAX_LENGTH_TOL, "Tour exceeds max_length!"
save_dataset((tour, duration), output_filename)
return -calc_op_total(prize, tour), tour, duration
except Exception as e:
print("Exception occured")
print(e)
return None
def calc_op_total(prize, tour):
# Subtract 1 since vals index start with 0 while tour indexing starts with 1 as depot is 0
assert (np.array(tour) > 0).all(), "Depot cannot be in tour"
assert len(np.unique(tour)) == len(tour), "Tour cannot contain duplicates"
return np.array(prize)[np.array(tour) - 1].sum()
def calc_op_length(depot, loc, tour):
assert len(np.unique(tour)) == len(tour), "Tour cannot contain duplicates"
loc_with_depot = np.vstack((np.array(depot)[None, :], np.array(loc)))
sorted_locs = loc_with_depot[np.concatenate(([0], tour, [0]))]
return np.linalg.norm(sorted_locs[1:] - sorted_locs[:-1], axis=-1).sum()
def write_compass_par(filename, parameters):
default_parameters = { # Use none to include as flag instead of kv
"SPECIAL": None,
"MAX_TRIALS": 10000,
"RUNS": 10,
"TRACE_LEVEL": 1,
"SEED": 0
}
with open(filename, 'w') as f:
for k, v in {**default_parameters, **parameters}.items():
if v is None:
f.write("{}\n".format(k))
else:
f.write("{} = {}\n".format(k, v))
def read_oplib(filename, n):
with open(filename, 'r') as f:
tour = []
dimension = 0
started = False
for line in f:
if started:
loc = int(line)
if loc == -1:
break
tour.append(loc)
if line.startswith("DIMENSION"):
dimension = int(line.split(" ")[-1])
if line.startswith("NODE_SEQUENCE_SECTION"):
started = True
assert len(tour) > 0, "Unexpected length"
tour = np.array(tour).astype(int) - 1 # Subtract 1 as depot is 1 and should be 0
assert tour[0] == 0 # Tour should start with depot
assert tour[-1] != 0 # Tour should not end with depot
return tour[1:].tolist()
def write_oplib(filename, depot, loc, prize, max_length, name="problem"):
with open(filename, 'w') as f:
f.write("\n".join([
"{} : {}".format(k, v)
for k, v in (
("NAME", name),
("TYPE", "OP"),
("DIMENSION", len(loc) + 1),
("COST_LIMIT", int(max_length * 10000000 + 0.5)),
("EDGE_WEIGHT_TYPE", "EUC_2D"),
)
]))
f.write("\n")
f.write("NODE_COORD_SECTION\n")
f.write("\n".join([
"{}\t{}\t{}".format(i + 1, int(x * 10000000 + 0.5), int(y * 10000000 + 0.5)) # oplib does not take floats
#"{}\t{}\t{}".format(i + 1, x, y)
for i, (x, y) in enumerate([depot] + loc)
]))
f.write("\n")
f.write("NODE_SCORE_SECTION\n")
f.write("\n".join([
"{}\t{}".format(i + 1, d)
for i, d in enumerate([0] + prize)
]))
f.write("\n")
f.write("DEPOT_SECTION\n")
f.write("1\n")
f.write("-1\n")
f.write("EOF\n")
def solve_opga(directory, name, depot, loc, prize, max_length, disable_cache=False):
problem_filename = os.path.join(directory, "{}.opga.pkl".format(name))
if os.path.isfile(problem_filename) and not disable_cache:
(prize, tour, duration) = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
prize, tour, duration = run_opga_alg(
[(*pos, p) for p, pos in zip([0, 0] + prize, [depot, depot] + loc)],
max_length, return_sol=True, verbose=False
)
duration = time.time() - start # Measure clock time
save_dataset((prize, tour, duration), problem_filename)
# First and last node are depot(s), so first node is 2 but should be 1 (as depot is 0) so subtract 1
assert tour[0][3] == 0
assert tour[-1][3] == 1
return -prize, [i - 1 for x, y, p, i, t in tour[1:-1]], duration
def solve_gurobi(directory, name, depot, loc, prize, max_length, disable_cache=False, timeout=None, gap=None):
# Lazy import so we do not need to have gurobi installed to run this script
from problems.op.op_gurobi import solve_euclidian_op as solve_euclidian_op_gurobi
try:
problem_filename = os.path.join(directory, "{}.gurobi{}{}.pkl".format(
name, "" if timeout is None else "t{}".format(timeout), "" if gap is None else "gap{}".format(gap)))
if os.path.isfile(problem_filename) and not disable_cache:
(cost, tour, duration) = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
cost, tour = solve_euclidian_op_gurobi(
depot, loc, prize, max_length, threads=1, timeout=timeout, gap=gap
)
duration = time.time() - start # Measure clock time
save_dataset((cost, tour, duration), problem_filename)
# First and last node are depot(s), so first node is 2 but should be 1 (as depot is 0) so subtract 1
assert tour[0] == 0
tour = tour[1:]
assert calc_op_length(depot, loc, tour) <= max_length + MAX_LENGTH_TOL, "Tour exceeds max_length!"
total_cost = -calc_op_total(prize, tour)
assert abs(total_cost - cost) <= 1e-4, "Cost is incorrect"
return total_cost, tour, duration
except Exception as e:
# For some stupid reason, sometimes OR tools cannot find a feasible solution?
# By letting it fail we do not get total results, but we dcan retry by the caching mechanism
print("Exception occured")
print(e)
return None
def solve_ortools(directory, name, depot, loc, prize, max_length, sec_local_search=0, disable_cache=False):
# Lazy import so we do not require ortools by default
from problems.op.op_ortools import solve_op_ortools
try:
problem_filename = os.path.join(directory, "{}.ortools{}.pkl".format(name, sec_local_search))
if os.path.isfile(problem_filename) and not disable_cache:
objval, tour, duration = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
objval, tour = solve_op_ortools(depot, loc, prize, max_length, sec_local_search=sec_local_search)
duration = time.time() - start
save_dataset((objval, tour, duration), problem_filename)
assert tour[0] == 0, "Tour must start with depot"
tour = tour[1:]
assert calc_op_length(depot, loc, tour) <= max_length + MAX_LENGTH_TOL, "Tour exceeds max_length!"
assert abs(-calc_op_total(prize, tour) - objval) <= 1e-5, "Cost is incorrect"
return -calc_op_total(prize, tour), tour, duration
except Exception as e:
# For some stupid reason, sometimes OR tools cannot find a feasible solution?
# By letting it fail we do not get total results, but we dcan retry by the caching mechanism
print("Exception occured")
print(e)
return None
def run_all_tsiligirides(
dataset_path, sample, num_samples, eval_batch_size, max_calc_batch_size, no_cuda=False, dataset_n=None,
progress_bar_mininterval=0.1, seed=1234):
import torch
from torch.utils.data import DataLoader
from utils import move_to, sample_many
from problems.op.tsiligirides import op_tsiligirides
from problems.op.problem_op import OP
torch.manual_seed(seed)
dataloader = DataLoader(
OP.make_dataset(filename=dataset_path, num_samples=dataset_n if dataset_n is not None else 1000000),
batch_size=eval_batch_size
)
device = torch.device("cuda:0" if torch.cuda.is_available() and not no_cuda else "cpu")
results = []
for batch in tqdm(dataloader, mininterval=progress_bar_mininterval):
start = time.time()
batch = move_to(batch, device)
with torch.no_grad():
if num_samples * eval_batch_size > max_calc_batch_size:
assert eval_batch_size == 1
assert num_samples % max_calc_batch_size == 0
batch_rep = max_calc_batch_size
iter_rep = num_samples // max_calc_batch_size
else:
batch_rep = num_samples
iter_rep = 1
sequences, costs = sample_many(
lambda inp: (None, op_tsiligirides(inp, sample)),
OP.get_costs,
batch, batch_rep=batch_rep, iter_rep=iter_rep)
duration = time.time() - start
results.extend(
[(cost.item(), np.trim_zeros(pi.cpu().numpy(),'b'), duration) for cost, pi in zip(costs, sequences)])
return results, eval_batch_size
if __name__ == "__main__":
executable = os.path.abspath(os.path.join('problems', 'op', 'compass', 'compass'))
parser = argparse.ArgumentParser()
parser.add_argument("method", help="Name of the method to evaluate, 'compass', 'opga' or 'tsili'")
parser.add_argument("datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument("--cpus", type=int, help="Number of CPUs to use, defaults to all cores")
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA (only for Tsiligirides)')
parser.add_argument('--disable_cache', action='store_true', help='Disable caching')
parser.add_argument('--max_calc_batch_size', type=int, default=1000, help='Size for subbatches')
parser.add_argument('--progress_bar_mininterval', type=float, default=0.1, help='Minimum interval')
parser.add_argument('-n', type=int, help="Number of instances to process")
parser.add_argument('--offset', type=int, help="Offset where to start processing")
parser.add_argument('--results_dir', default='results', help="Name of results directory")
opts = parser.parse_args()
assert opts.o is None or len(opts.datasets) == 1, "Cannot specify result filename with more than one dataset"
for dataset_path in opts.datasets:
assert os.path.isfile(check_extension(dataset_path)), "File does not exist!"
dataset_basename, ext = os.path.splitext(os.path.split(dataset_path)[-1])
if opts.o is None:
results_dir = os.path.join(opts.results_dir, "op", dataset_basename)
os.makedirs(results_dir, exist_ok=True)
out_file = os.path.join(results_dir, "{}{}{}-{}{}".format(
dataset_basename,
"offs{}".format(opts.offset) if opts.offset is not None else "",
"n{}".format(opts.n) if opts.n is not None else "",
opts.method, ext
))
else:
out_file = opts.o
assert opts.f or not os.path.isfile(
out_file), "File already exists! Try running with -f option to overwrite."
match = re.match(r'^([a-z]+)(\d*)$', opts.method)
assert match
method = match[1]
runs = 1 if match[2] == '' else int(match[2])
if method == "tsili" or method == "tsiligreedy":
assert opts.offset is None, "Offset not supported for Tsiligirides"
if method == "tsiligreedy":
sample = False
num_samples = 1
else:
sample = True
num_samples = runs
eval_batch_size = max(1, opts.max_calc_batch_size // num_samples)
results, parallelism = run_all_tsiligirides(
dataset_path, sample, num_samples, eval_batch_size, opts.max_calc_batch_size, opts.no_cuda, opts.n,
opts.progress_bar_mininterval
)
elif method in ("compass", "opga", "gurobi", "gurobigap", "gurobit", "ortools"):
target_dir = os.path.join(results_dir, "{}-{}".format(
dataset_basename,
opts.method
))
assert opts.f or not os.path.isdir(target_dir), \
"Target dir already exists! Try running with -f option to overwrite."
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
dataset = load_dataset(dataset_path)
if method[:6] == "gurobi":
use_multiprocessing = True # We run one thread per instance
def run_func(args):
return solve_gurobi(*args, disable_cache=opts.disable_cache,
timeout=runs if method[6:] == "t" else None,
gap=float(runs) if method[6:] == "gap" else None)
elif method == "compass":
use_multiprocessing = False
def run_func(args):
return solve_compass_log(executable, *args, disable_cache=opts.disable_cache)
elif method == "opga":
use_multiprocessing = True
def run_func(args):
return solve_opga(*args, disable_cache=opts.disable_cache)
else:
assert method == "ortools"
use_multiprocessing = True
def run_func(args):
return solve_ortools(*args, sec_local_search=runs, disable_cache=opts.disable_cache)
results, parallelism = run_all_in_pool(
run_func,
target_dir, dataset, opts, use_multiprocessing=use_multiprocessing
)
else:
assert False, "Unknown method: {}".format(opts.method)
costs, tours, durations = zip(*results) # Not really costs since they should be negative
print("Average cost: {} +- {}".format(np.mean(costs), 2 * np.std(costs) / np.sqrt(len(costs))))
print("Average serial duration: {} +- {}".format(
np.mean(durations), 2 * np.std(durations) / np.sqrt(len(durations))))
print("Average parallel duration: {}".format(np.mean(durations) / parallelism))
print("Calculated total duration: {}".format(timedelta(seconds=int(np.sum(durations) / parallelism))))
save_dataset((results, parallelism), out_file)
| 16,891 | 41.764557 | 118 | py |
GTA-RL | GTA-RL-master/problems/op/problem_op.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.op.state_op import StateOP
from utils.beam_search import beam_search
class OP(object):
NAME = 'op' # Orienteering problem
@staticmethod
def get_costs(dataset, pi):
if pi.size(-1) == 1: # In case all tours directly return to depot, prevent further problems
assert (pi == 0).all(), "If all length 1 tours, they should be zero"
# Return
return torch.zeros(pi.size(0), dtype=torch.float, device=pi.device), None
# Check that tours are valid, i.e. contain 0 to n -1
sorted_pi = pi.data.sort(1)[0]
# Make sure each node visited once at most (except for depot)
assert ((sorted_pi[:, 1:] == 0) | (sorted_pi[:, 1:] > sorted_pi[:, :-1])).all(), "Duplicates"
prize_with_depot = torch.cat(
(
torch.zeros_like(dataset['prize'][:, :1]),
dataset['prize']
),
1
)
p = prize_with_depot.gather(1, pi)
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
length = (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=-1).sum(1) # Prevent error if len 1 seq
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=-1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=-1) # Last to depot, will be 0 if depot is last
)
assert (length <= dataset['max_length'] + 1e-5).all(), \
"Max length exceeded by {}".format((length - dataset['max_length']).max())
# We want to maximize total prize but code minimizes so return negative
return -p.sum(-1), None
@staticmethod
def make_dataset(*args, **kwargs):
return OPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateOP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = OP.make_state(
input, visited_dtype=torch.int64 if compress_mask else torch.uint8
)
return beam_search(state, beam_size, propose_expansions)
def generate_instance(size, prize_type):
# Details see paper
MAX_LENGTHS = {
20: 2.,
50: 3.,
100: 4.
}
loc = torch.FloatTensor(size, 2).uniform_(0, 1)
depot = torch.FloatTensor(2).uniform_(0, 1)
# Methods taken from Fischetti et al. 1998
if prize_type == 'const':
prize = torch.ones(size)
elif prize_type == 'unif':
prize = (1 + torch.randint(0, 100, size=(size, ))) / 100.
else: # Based on distance to depot
assert prize_type == 'dist'
prize_ = (depot[None, :] - loc).norm(p=2, dim=-1)
prize = (1 + (prize_ / prize_.max(dim=-1, keepdim=True)[0] * 99).int()).float() / 100.
return {
'loc': loc,
# Uniform 1 - 9, scaled by capacities
'prize': prize,
'depot': depot,
'max_length': torch.tensor(MAX_LENGTHS[size])
}
class OPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution='const'):
super(OPDataset, self).__init__()
assert distribution is not None, "Data distribution must be specified for OP"
# Currently the distribution can only vary in the type of the prize
prize_type = distribution
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [
{
'loc': torch.FloatTensor(loc),
'prize': torch.FloatTensor(prize),
'depot': torch.FloatTensor(depot),
'max_length': torch.tensor(max_length)
}
for depot, loc, prize, max_length in (data[offset:offset+num_samples])
]
else:
self.data = [
generate_instance(size, prize_type)
for i in range(num_samples)
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 4,855 | 33.197183 | 106 | py |
GTA-RL | GTA-RL-master/problems/op/tsiligirides.py | import torch
from problems.op.state_op import StateOP
def op_tsiligirides(batch, sample=False, power=4.0):
state = StateOP.initialize(batch)
all_a = []
while not state.all_finished():
# Compute scores
mask = state.get_mask()
p = (
(mask[..., 1:] == 0).float() *
state.prize[state.ids, 1:] /
((state.coords[state.ids, 1:, :] - state.cur_coord[:, :, None, :]).norm(p=2, dim=-1) + 1e-6)
) ** power
bestp, besta = p.topk(4, dim=-1)
bestmask = mask[..., 1:].gather(-1, besta)
# If no feasible actions, must go to depot
# mask == 0 means feasible, so if mask == 0 sums to 0 there are no feasible and
# all corresponding ps should be 0, so we need to add a column with a 1 that corresponds
# to selecting the end destination
to_depot = ((bestmask == 0).sum(-1, keepdim=True) == 0).float()
# best_p should be zero if we have to go to depot, but because of numeric stabilities, it isn't
p_ = torch.cat((to_depot, bestp), -1)
pnorm = p_ / p_.sum(-1, keepdim=True)
if sample:
a = pnorm[:, 0, :].multinomial(1) # Sample action
else:
# greedy
a = pnorm[:, 0, :].max(-1)[1].unsqueeze(-1) # Add 'sampling dimension'
# a == 0 means depot, otherwise subtract one
final_a = torch.cat((torch.zeros_like(besta[..., 0:1]), besta + 1), -1)[:, 0, :].gather(-1, a)
selected = final_a[..., 0] # Squeeze unnecessary sampling dimension
state = state.update(selected)
all_a.append(selected)
return torch.stack(all_a, -1)
| 1,672 | 37.906977 | 108 | py |
GTA-RL | GTA-RL-master/problems/op/state_op.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
import torch.nn.functional as F
class StateOP(NamedTuple):
# Fixed input
coords: torch.Tensor # Depot + loc
prize: torch.Tensor
# Max length is not a single value, but one for each node indicating max length tour should have when arriving
# at this node, so this is max_length - d(depot, node)
max_length: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and prizes tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
cur_total_prize: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
if self.visited_.dtype == torch.uint8:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.coords.size(-2))
@property
def dist(self):
return (self.coords[:, :, None, :] - self.coords[:, None, :, :]).norm(p=2, dim=-1)
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice) # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key],
cur_total_prize=self.cur_total_prize[key],
)
# Warning: cannot override len of NamedTuple, len should be number of fields, not batch size
# def __len__(self):
# return len(self.used_capacity)
@staticmethod
def initialize(input, visited_dtype=torch.uint8):
depot = input['depot']
loc = input['loc']
prize = input['prize']
max_length = input['max_length']
batch_size, n_loc, _ = loc.size()
coords = torch.cat((depot[:, None, :], loc), -2)
return StateOP(
coords=coords,
prize=F.pad(prize, (1, 0), mode='constant', value=0), # add 0 for depot
# max_length is max length allowed when arriving at node, so subtract distance to return to depot
# Additionally, substract epsilon margin for numeric stability
max_length=max_length[:, None] - (depot[:, None, :] - coords).norm(p=2, dim=-1) - 1e-6,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
visited_=( # Visited as mask is easier to understand, as long more memory efficient
# Keep visited_ with depot so we can scatter efficiently (if there is an action for depot)
torch.zeros(
batch_size, 1, n_loc + 1,
dtype=torch.uint8, device=loc.device
)
if visited_dtype == torch.uint8
else torch.zeros(batch_size, 1, (n_loc + 1 + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=input['depot'][:, None, :], # Add step dimension
cur_total_prize=torch.zeros(batch_size, 1, device=loc.device),
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_remaining_length(self):
# max_length[:, 0] is max length arriving at depot so original max_length
return self.max_length[self.ids, 0] - self.lengths
def get_final_cost(self):
assert self.all_finished()
# The cost is the negative of the collected prize since we want to maximize collected prize
return -self.cur_total_prize
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
# Add the length
cur_coord = self.coords[self.ids, selected]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Add the collected prize
cur_total_prize = self.cur_total_prize + self.prize[self.ids, selected]
if self.visited_.dtype == torch.uint8:
# Note: here we do not subtract one as we have to scatter so the first column allows scattering depot
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
# This works, by check_unset=False it is allowed to set the depot visited a second a time
visited_ = mask_long_scatter(self.visited_, prev_a, check_unset=False)
return self._replace(
prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, cur_total_prize=cur_total_prize, i=self.i + 1
)
def all_finished(self):
# All must be returned to depot (and at least 1 step since at start also prev_a == 0)
# This is more efficient than checking the mask
return self.i.item() > 0 and (self.prev_a == 0).all()
# return self.visited[:, :, 0].all() # If we have visited the depot we're done
def get_current_node(self):
"""
Returns the current node where 0 is depot, 1...n are nodes
:return: (batch_size, num_steps) tensor with current nodes
"""
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
exceeds_length = (
self.lengths[:, :, None] + (self.coords[self.ids, :, :] - self.cur_coord[:, :, None, :]).norm(p=2, dim=-1)
> self.max_length[self.ids, :]
)
# Note: this always allows going to the depot, but that should always be suboptimal so be ok
# Cannot visit if already visited or if length that would be upon arrival is too large to return to depot
# If the depot has already been visited then we cannot visit anymore
visited_ = self.visited.to(exceeds_length.dtype)
mask = visited_ | visited_[:, :, 0:1] | exceeds_length
# Depot can always be visited
# (so we do not hardcode knowledge that this is strictly suboptimal if other options are available)
mask[:, :, 0] = 0
return mask
def construct_solutions(self, actions):
return actions
| 7,026 | 42.91875 | 118 | py |
GTA-RL | GTA-RL-master/utils/tensor_functions.py | import torch
def compute_in_batches(f, calc_batch_size, *args, n=None):
"""
Computes memory heavy function f(*args) in batches
:param n: the total number of elements, optional if it cannot be determined as args[0].size(0)
:param f: The function that is computed, should take only tensors as arguments and return tensor or tuple of tensors
:param calc_batch_size: The batch size to use when computing this function
:param args: Tensor arguments with equally sized first batch dimension
:return: f(*args), this should be one or multiple tensors with equally sized first batch dimension
"""
if n is None:
n = args[0].size(0)
n_batches = (n + calc_batch_size - 1) // calc_batch_size # ceil
if n_batches == 1:
return f(*args)
# Run all batches
# all_res = [f(*batch_args) for batch_args in zip(*[torch.chunk(arg, n_batches) for arg in args])]
# We do not use torch.chunk such that it also works for other classes that support slicing
all_res = [f(*(arg[i * calc_batch_size:(i + 1) * calc_batch_size] for arg in args)) for i in range(n_batches)]
# Allow for functions that return None
def safe_cat(chunks, dim=0):
if chunks[0] is None:
assert all(chunk is None for chunk in chunks)
return None
return torch.cat(chunks, dim)
# Depending on whether the function returned a tuple we need to concatenate each element or only the result
if isinstance(all_res[0], tuple):
return tuple(safe_cat(res_chunks, 0) for res_chunks in zip(*all_res))
return safe_cat(all_res, 0)
| 1,608 | 44.971429 | 120 | py |
GTA-RL | GTA-RL-master/utils/monkey_patch.py | import torch
from itertools import chain
from collections import defaultdict, Iterable
from copy import deepcopy
def load_state_dict(self, state_dict):
"""Loads the optimizer state.
Arguments:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict['param_groups']
if len(groups) != len(saved_groups):
raise ValueError("loaded state dict has a different number of "
"parameter groups")
param_lens = (len(g['params']) for g in groups)
saved_lens = (len(g['params']) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError("loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group")
# Update the state
id_map = {old_id: p for old_id, p in
zip(chain(*(g['params'] for g in saved_groups)),
chain(*(g['params'] for g in groups)))}
def cast(param, value):
"""Make a deep copy of value, casting all tensors to device of param."""
if torch.is_tensor(value):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
if any(tp in type(param.data).__name__ for tp in {'Half', 'Float', 'Double'}):
value = value.type_as(param.data)
value = value.to(param.device)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group['params'] = group['params']
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.__setstate__({'state': state, 'param_groups': param_groups})
torch.optim.Optimizer.load_state_dict = load_state_dict | 2,734 | 38.071429 | 90 | py |
GTA-RL | GTA-RL-master/utils/functions.py | import warnings
import torch
import numpy as np
import os
import json
from tqdm import tqdm
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Pool
import torch.nn.functional as F
def load_problem(name):
from problems import TSP, DTSP, CVRP, DCVRP, SDVRP, OP, PCTSPDet, PCTSPStoch
problem = {
'tsp': TSP,
'dynamic_tsp' : DTSP,
'cvrp': CVRP,
'dynamic_cvrp' : DCVRP,
'sdvrp': SDVRP,
'op': OP,
'pctsp_det': PCTSPDet,
'pctsp_stoch': PCTSPStoch,
}.get(name, None)
assert problem is not None, "Currently unsupported problem: {}!".format(name)
return problem
def torch_load_cpu(load_path):
return torch.load(load_path, map_location=lambda storage, loc: storage) # Load on CPU
def move_to(var, device):
if isinstance(var, dict):
return {k: move_to(v, device) for k, v in var.items()}
return var.to(device)
def _load_model_file(load_path, model):
"""Loads the model with parameters from the file and returns optimizer state dict if it is in the file"""
# Load the model parameters from a saved state
load_optimizer_state_dict = None
print(' [*] Loading model from {}'.format(load_path))
load_data = torch.load(
os.path.join(
os.getcwd(),
load_path
), map_location=lambda storage, loc: storage)
if isinstance(load_data, dict):
load_optimizer_state_dict = load_data.get('optimizer', None)
load_model_state_dict = load_data.get('model', load_data)
else:
load_model_state_dict = load_data.state_dict()
state_dict = model.state_dict()
state_dict.update(load_model_state_dict)
model.load_state_dict(state_dict)
return model, load_optimizer_state_dict
def load_args(filename):
with open(filename, 'r') as f:
args = json.load(f)
# Backwards compatibility
if 'data_distribution' not in args:
args['data_distribution'] = None
probl, *dist = args['problem'].split("_")
if probl == "op":
args['problem'] = probl
args['data_distribution'] = dist[0]
return args
def load_model(path, epoch=None):
from nets.attention_model import AttentionModel
from nets.st_attention_model import StAttentionModel
from nets.pointer_network import PointerNetwork
if os.path.isfile(path):
model_filename = path
path = os.path.dirname(model_filename)
elif os.path.isdir(path):
if epoch is None:
epoch = max(
int(os.path.splitext(filename)[0].split("-")[1])
for filename in os.listdir(path)
if os.path.splitext(filename)[1] == '.pt'
)
model_filename = os.path.join(path, 'epoch-{}.pt'.format(epoch))
else:
assert False, "{} is not a valid directory or file".format(path)
args = load_args(os.path.join(path, 'args.json'))
problem = load_problem(args['problem'])
model_class = {
'attention': AttentionModel,
'st_attention': StAttentionModel,
'pointer': PointerNetwork
}.get(args.get('model', 'attention'), None)
assert model_class is not None, "Unknown model: {}".format(model_class)
model = model_class(
args['embedding_dim'],
args['hidden_dim'],
problem,
n_encode_layers=args['n_encode_layers'],
mask_inner=True,
mask_logits=True,
normalization=args['normalization'],
tanh_clipping=args['tanh_clipping'],
checkpoint_encoder=args.get('checkpoint_encoder', False),
shrink_size=args.get('shrink_size', None)
)
# Overwrite model parameters by parameters to load
load_data = torch_load_cpu(model_filename)
model.load_state_dict({**model.state_dict(), **load_data.get('model', {})})
model, *_ = _load_model_file(model_filename, model)
model.eval() # Put in eval mode
return model, args
def parse_softmax_temperature(raw_temp):
# Load from file
if os.path.isfile(raw_temp):
return np.loadtxt(raw_temp)[-1, 0]
return float(raw_temp)
def run_all_in_pool(func, directory, dataset, opts, use_multiprocessing=True):
# # Test
# res = func((directory, 'test', *dataset[0]))
# return [res]
num_cpus = os.cpu_count() if opts.cpus is None else opts.cpus
w = len(str(len(dataset) - 1))
offset = getattr(opts, 'offset', None)
if offset is None:
offset = 0
ds = dataset[offset:(offset + opts.n if opts.n is not None else len(dataset))]
pool_cls = (Pool if use_multiprocessing and num_cpus > 1 else ThreadPool)
with pool_cls(num_cpus) as pool:
results = list(tqdm(pool.imap(
func,
[
(
directory,
str(i + offset).zfill(w),
*problem
)
for i, problem in enumerate(ds)
]
), total=len(ds), mininterval=opts.progress_bar_mininterval))
failed = [str(i + offset) for i, res in enumerate(results) if res is None]
assert len(failed) == 0, "Some instances failed: {}".format(" ".join(failed))
return results, num_cpus
def do_batch_rep(v, n):
if isinstance(v, dict):
return {k: do_batch_rep(v_, n) for k, v_ in v.items()}
elif isinstance(v, list):
return [do_batch_rep(v_, n) for v_ in v]
elif isinstance(v, tuple):
return tuple(do_batch_rep(v_, n) for v_ in v)
return v[None, ...].expand(n, *v.size()).contiguous().view(-1, *v.size()[1:])
def sample_many(inner_func, get_cost_func, input, batch_rep=1, iter_rep=1):
"""
:param input: (batch_size, graph_size, node_dim) input node features
:return:
"""
input = do_batch_rep(input, batch_rep)
costs = []
pis = []
for i in range(iter_rep):
_log_p, pi = inner_func(input)
# pi.view(-1, batch_rep, pi.size(-1))
cost, mask = get_cost_func(input, pi)
costs.append(cost.view(batch_rep, -1).t())
pis.append(pi.view(batch_rep, -1, pi.size(-1)).transpose(0, 1))
max_length = max(pi.size(-1) for pi in pis)
# (batch_size * batch_rep, iter_rep, max_length) => (batch_size, batch_rep * iter_rep, max_length)
pis = torch.cat(
[F.pad(pi, (0, max_length - pi.size(-1))) for pi in pis],
1
) # .view(embeddings.size(0), batch_rep * iter_rep, max_length)
costs = torch.cat(costs, 1)
# (batch_size)
mincosts, argmincosts = costs.min(-1)
# (batch_size, minlength)
minpis = pis[torch.arange(pis.size(0), out=argmincosts.new()), argmincosts]
return minpis, mincosts
| 6,679 | 30.214953 | 109 | py |
GTA-RL | GTA-RL-master/utils/boolmask.py | import torch
import torch.nn.functional as F
def _pad_mask(mask):
# By taking -size % 8, we get 0 if exactly divisible by 8
# and required padding otherwise (i.e. -1 % 8 = 7 pad)
pad = -mask.size(-1) % 8
if pad != 0:
mask = F.pad(mask, [0, pad])
return mask, mask.size(-1) // 8
def _mask_bool2byte(mask):
assert mask.dtype == torch.uint8
# assert (mask <= 1).all() # Precondition, disabled for efficiency
mask, d = _pad_mask(mask)
return (mask.view(*mask.size()[:-1], d, 8) << torch.arange(8, out=mask.new())).sum(-1, dtype=torch.uint8)
def _mask_byte2long(mask):
assert mask.dtype == torch.uint8
mask, d = _pad_mask(mask)
# Note this corresponds to a temporary factor 8
# memory overhead by converting to long before summing
# Alternatively, aggregate using for loop
return (mask.view(*mask.size()[:-1], d, 8).long() << (torch.arange(8, dtype=torch.int64, device=mask.device) * 8)).sum(-1)
def mask_bool2long(mask):
assert mask.dtype == torch.uint8
return _mask_byte2long(_mask_bool2byte(mask))
def _mask_long2byte(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] >> (torch.arange(8, out=mask.new()) * 8))[..., :n].to(torch.uint8).view(*mask.size()[:-1], -1)[..., :n]
def _mask_byte2bool(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] & (mask.new_ones(8) << torch.arange(8, out=mask.new()) * 1)).view(*mask.size()[:-1], -1)[..., :n] > 0
def mask_long2bool(mask, n=None):
assert mask.dtype == torch.int64
return _mask_byte2bool(_mask_long2byte(mask), n=n)
def mask_long_scatter(mask, values, check_unset=True):
"""
Sets values in mask in dimension -1 with arbitrary batch dimensions
If values contains -1, nothing is set
Note: does not work for setting multiple values at once (like normal scatter)
"""
assert mask.size()[:-1] == values.size()
rng = torch.arange(mask.size(-1), out=mask.new())
values_ = values[..., None] # Need to broadcast up do mask dim
# This indicates in which value of the mask a bit should be set
where = (values_ >= (rng * 64)) & (values_ < ((rng + 1) * 64))
# Optional: check that bit is not already set
assert not (check_unset and ((mask & (where.long() << (values_ % 64))) > 0).any())
# Set bit by shifting a 1 to the correct position
# (% not strictly necessary as bitshift is cyclic)
# since where is 0 if no value needs to be set, the bitshift has no effect
return mask | (where.long() << (values_ % 64))
| 2,588 | 36.521739 | 131 | py |
GTA-RL | GTA-RL-master/utils/lexsort.py | import torch
import numpy as np
def torch_lexsort(keys, dim=-1):
if keys[0].is_cuda:
return _torch_lexsort_cuda(keys, dim)
else:
# Use numpy lex sort
return torch.from_numpy(np.lexsort([k.numpy() for k in keys], axis=dim))
def _torch_lexsort_cuda(keys, dim=-1):
"""
Function calculates a lexicographical sort order on GPU, similar to np.lexsort
Relies heavily on undocumented behavior of torch.sort, namely that when sorting more than
2048 entries in the sorting dim, it performs a sort using Thrust and it uses a stable sort
https://github.com/pytorch/pytorch/blob/695fd981924bd805704ecb5ccd67de17c56d7308/aten/src/THC/generic/THCTensorSort.cu#L330
"""
MIN_NUMEL_STABLE_SORT = 2049 # Minimum number of elements for stable sort
# Swap axis such that sort dim is last and reshape all other dims to a single (batch) dimension
reordered_keys = tuple(key.transpose(dim, -1).contiguous() for key in keys)
flat_keys = tuple(key.view(-1) for key in keys)
d = keys[0].size(dim) # Sort dimension size
numel = flat_keys[0].numel()
batch_size = numel // d
batch_key = torch.arange(batch_size, dtype=torch.int64, device=keys[0].device)[:, None].repeat(1, d).view(-1)
flat_keys = flat_keys + (batch_key,)
# We rely on undocumented behavior that the sort is stable provided that
if numel < MIN_NUMEL_STABLE_SORT:
n_rep = (MIN_NUMEL_STABLE_SORT + numel - 1) // numel # Ceil
rep_key = torch.arange(n_rep, dtype=torch.int64, device=keys[0].device)[:, None].repeat(1, numel).view(-1)
flat_keys = tuple(k.repeat(n_rep) for k in flat_keys) + (rep_key,)
idx = None # Identity sorting initially
for k in flat_keys:
if idx is None:
_, idx = k.sort(-1)
else:
# Order data according to idx and then apply
# found ordering to current idx (so permutation of permutation)
# such that we can order the next key according to the current sorting order
_, idx_ = k[idx].sort(-1)
idx = idx[idx_]
# In the end gather only numel and strip of extra sort key
if numel < MIN_NUMEL_STABLE_SORT:
idx = idx[:numel]
# Get only numel (if we have replicated), swap axis back and shape results
return idx[:numel].view(*reordered_keys[0].size()).transpose(dim, -1) % d
| 2,382 | 41.553571 | 127 | py |
GTA-RL | GTA-RL-master/utils/beam_search.py | import time
import torch
from typing import NamedTuple
from utils.lexsort import torch_lexsort
def beam_search(dynamic, *args, **kwargs):
if dynamic:
beams, final_state = _dynamic_beam_search(*args, **kwargs)
else:
beams, final_state = _beam_search(*args, **kwargs)
return get_beam_search_results(beams, final_state)
def get_beam_search_results(beams, final_state):
beam = beams[-1] # Final beam
if final_state is None:
return None, None, None, None, beam.batch_size
# First state has no actions/parents and should be omitted when backtracking
actions = [beam.action for beam in beams[1:]]
parents = [beam.parent for beam in beams[1:]]
solutions = final_state.construct_solutions(backtrack(parents, actions))
return beam.score, solutions, final_state.get_final_cost()[:, 0], final_state.ids.view(-1), beam.batch_size
def _dynamic_beam_search(problem, input, model, beam_size, propose_expansions=None,
keep_states=False):
i = 0
state = problem.make_state(input, index=i)
beam = BatchBeam.initialize(state)
embeddings, _ = model.embedder(model._init_embed(input))
# Initial state
beams = [beam if keep_states else beam.clear_state()]
# Perform decoding steps
while not beam.all_finished():
beam = beam.update_state(input, i)
fixed = model._precompute(embeddings[:, i, :, :])
# Use the model to propose and score expansions
parent, action, score = beam.propose_expansions() if propose_expansions is None else propose_expansions(beam, fixed)
if parent is None:
return beams, None
# Expand and update the state according to the selected actions
beam = beam.expand(parent, action, score=score)
# Get topk
beam = beam.topk(beam_size)
# Collect output of step
beams.append(beam if keep_states else beam.clear_state())
i += 1
# Return the final state separately since beams may not keep state
return beams, beam.state
def _beam_search(state, beam_size, propose_expansions=None,
keep_states=False):
beam = BatchBeam.initialize(state)
# Initial state
beams = [beam if keep_states else beam.clear_state()]
# Perform decoding steps
while not beam.all_finished():
# Use the model to propose and score expansions
parent, action, score = beam.propose_expansions() if propose_expansions is None else propose_expansions(beam)
if parent is None:
return beams, None
# Expand and update the state according to the selected actions
beam = beam.expand(parent, action, score=score)
# Get topk
beam = beam.topk(beam_size)
# Collect output of step
beams.append(beam if keep_states else beam.clear_state())
# Return the final state separately since beams may not keep state
return beams, beam.state
class BatchBeam(NamedTuple):
"""
Class that keeps track of a beam for beam search in batch mode.
Since the beam size of different entries in the batch may vary, the tensors are not (batch_size, beam_size, ...)
but rather (sum_i beam_size_i, ...), i.e. flattened. This makes some operations a bit cumbersome.
"""
score: torch.Tensor # Current heuristic score of each entry in beam (used to select most promising)
state: None # To track the state
parent: torch.Tensor
action: torch.Tensor
batch_size: int # Can be used for optimizations if batch_size = 1
device: None # Track on which device
# Indicates for each row to which batch it belongs (0, 0, 0, 1, 1, 2, ...), managed by state
@property
def ids(self):
return self.state.ids.view(-1) # Need to flat as state has steps dimension
def __getitem__(self, key):
assert torch.is_tensor(key) or isinstance(key, slice) # If tensor, idx all tensors by this tensor:
return self._replace(
# ids=self.ids[key],
score=self.score[key] if self.score is not None else None,
state=self.state[key],
parent=self.parent[key] if self.parent is not None else None,
action=self.action[key] if self.action is not None else None
)
# Do not use __len__ since this is used by namedtuple internally and should be number of fields
# def __len__(self):
# return len(self.ids)
@staticmethod
def initialize(state):
batch_size = len(state.ids)
device = state.ids.device
return BatchBeam(
score=torch.zeros(batch_size, dtype=torch.float, device=device),
state=state,
parent=None,
action=None,
batch_size=batch_size,
device=device
)
def propose_expansions(self):
mask = self.state.get_mask()
# Mask always contains a feasible action
expansions = torch.nonzero(mask[:, 0, :] == 0)
parent, action = torch.unbind(expansions, -1)
return parent, action, None
def expand(self, parent, action, score=None):
return self._replace(
score=score, # The score is cleared upon expanding as it is no longer valid, or it must be provided
state=self.state[parent].update(action), # Pass ids since we replicated state
parent=parent,
action=action
)
def update_state(self, input, index):
new_state = self.state.update_state(input=input, index=index)
return self._replace(state=new_state)
def topk(self, k):
idx_topk = segment_topk_idx(self.score, k, self.ids)
return self[idx_topk]
def all_finished(self):
return self.state.all_finished()
def cpu(self):
return self.to(torch.device('cpu'))
def to(self, device):
if device == self.device:
return self
return self._replace(
score=self.score.to(device) if self.score is not None else None,
state=self.state.to(device),
parent=self.parent.to(device) if self.parent is not None else None,
action=self.action.to(device) if self.action is not None else None
)
def clear_state(self):
return self._replace(state=None)
def size(self):
return self.state.ids.size(0)
def segment_topk_idx(x, k, ids):
"""
Finds the topk per segment of data x given segment ids (0, 0, 0, 1, 1, 2, ...).
Note that there may be fewer than k elements in a segment so the returned length index can vary.
x[result], ids[result] gives the sorted elements per segment as well as corresponding segment ids after sorting.
:param x:
:param k:
:param ids:
:return:
"""
assert x.dim() == 1
assert ids.dim() == 1
# Since we may have varying beam size per batch entry we cannot reshape to (batch_size, beam_size)
# And use default topk along dim -1, so we have to be creative
# Now we have to get the topk per segment which is really annoying :(
# we use lexsort on (ids, score), create array with offset per id
# offsets[ids] then gives offsets repeated and only keep for which arange(len) < offsets + k
splits_ = torch.nonzero(ids[1:] - ids[:-1])
if len(splits_) == 0: # Only one group
_, idx_topk = x.topk(min(k, x.size(0)))
return idx_topk
splits = torch.cat((ids.new_tensor([0]), splits_[:, 0] + 1))
# Make a new array in which we store for each id the offset (start) of the group
# This way ids does not need to be increasing or adjacent, as long as each group is a single range
group_offsets = splits.new_zeros((splits.max() + 1,))
group_offsets[ids[splits]] = splits
offsets = group_offsets[ids] # Look up offsets based on ids, effectively repeating for the repetitions per id
# We want topk so need to sort x descending so sort -x (be careful with unsigned data type!)
idx_sorted = torch_lexsort((-(x if x.dtype != torch.uint8 else x.int()).detach(), ids))
# This will filter first k per group (example k = 2)
# ids = [0, 0, 0, 1, 1, 1, 1, 2]
# splits = [0, 3, 7]
# offsets = [0, 0, 0, 3, 3, 3, 3, 7]
# offs+2 = [2, 2, 2, 5, 5, 5, 5, 9]
# arange = [0, 1, 2, 3, 4, 5, 6, 7]
# filter = [1, 1, 0, 1, 1, 0, 0, 1]
# Use filter to get only topk of sorting idx
return idx_sorted[torch.arange(ids.size(0), out=ids.new()) < offsets + k]
def backtrack(parents, actions):
# Now backtrack to find aligned action sequences in reversed order
cur_parent = parents[-1]
reversed_aligned_sequences = [actions[-1]]
for parent, sequence in reversed(list(zip(parents[:-1], actions[:-1]))):
reversed_aligned_sequences.append(sequence.gather(-1, cur_parent))
cur_parent = parent.gather(-1, cur_parent)
return torch.stack(list(reversed(reversed_aligned_sequences)), -1)
class CachedLookup(object):
def __init__(self, data):
self.orig = data
self.key = None
self.current = None
def __getitem__(self, key):
assert not isinstance(key, slice), "CachedLookup does not support slicing, " \
"you can slice the result of an index operation instead"
assert torch.is_tensor(key) # If tensor, idx all tensors by this tensor:
if self.key is None:
self.key = key
self.current = self.orig[key]
elif len(key) != len(self.key) or (key != self.key).any():
self.key = key
self.current = self.orig[key]
return self.current
| 9,624 | 35.877395 | 124 | py |
just-ask | just-ask-main/main_howtovqa.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from transformers import get_cosine_schedule_with_warmup
import numpy as np
import random
import os
import pickle
import logging
from args import get_args
from model.multimodal_transformer import MMT_VideoQA
from loss import Contrastive_Loss
from data.howtovqa_loader import HowToVQA_Dataset, howtovqa_collate_fn
from data.webvidvqa_loader import WebVidVQA_Dataset
from train.train_howtovqa import train_howtovqa, eval_howtovqa
from transformers import DistilBertTokenizer
# args, logging
args = get_args()
assert args.checkpoint_dir
if not (os.path.isdir(args.save_dir)):
os.mkdir(os.path.join(args.save_dir))
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(os.path.join(args.save_dir, "stdout.log"), "w+")
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
logging.info(args)
# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# Model
model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
model = nn.DataParallel(model)
if args.pretrain_path != "":
model.load_state_dict(torch.load(args.pretrain_path))
logging.info(f"Loaded checkpoint {args.pretrain_path}")
model.cuda()
logging.info("Using {} GPUs".format(torch.cuda.device_count()))
logging.info(
f"Nb of trainable params: {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
)
# Load captions, dataloaders
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
if args.dataset == "howtovqa":
with open(args.caption_path, "rb") as caption_file:
caption = pickle.load(caption_file)
logging.info("Pickle loaded")
trainset = HowToVQA_Dataset(
csv_path=args.train_csv_path,
caption=caption,
features_path=args.features_path,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
train=True,
n_pair=args.n_pair,
bert_tokenizer=bert_tokenizer,
max_feats=args.max_feats,
)
train_loader = DataLoader(
trainset,
batch_size=args.batch_size,
num_workers=args.num_thread_reader,
shuffle=True,
drop_last=True,
collate_fn=howtovqa_collate_fn,
)
valset = HowToVQA_Dataset(
csv_path=args.val_csv_path,
caption=caption,
features_path=args.features_path,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
train=False,
n_pair=args.n_pair,
bert_tokenizer=bert_tokenizer,
max_feats=args.max_feats,
)
val_loader = DataLoader(
valset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
collate_fn=howtovqa_collate_fn,
)
elif args.dataset == "webvidvqa":
with open(args.webvidvqa_caption_path, "rb") as caption_file:
caption = pickle.load(caption_file)
logging.info("Pickle loaded")
trainset = WebVidVQA_Dataset(
csv_path=args.train_csv_path,
caption=caption,
features_path=args.features_path,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
train=True,
n_pair=1,
bert_tokenizer=bert_tokenizer,
max_feats=args.max_feats,
feature_dim=args.feature_dim,
)
train_loader = DataLoader(
trainset,
batch_size=args.batch_size,
num_workers=args.num_thread_reader,
shuffle=True,
drop_last=True,
collate_fn=howtovqa_collate_fn,
)
valset = WebVidVQA_Dataset(
csv_path=args.val_csv_path,
caption=caption,
features_path=args.features_path,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
train=False,
n_pair=1,
bert_tokenizer=bert_tokenizer,
max_feats=args.max_feats,
feature_dim=args.feature_dim,
)
val_loader = DataLoader(
valset,
batch_size=args.batch_size_val * args.n_pair,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
collate_fn=howtovqa_collate_fn,
)
logging.info("number of train videos: {}".format(len(train_loader.dataset)))
logging.info("number of val videos: {}".format(len(val_loader.dataset)))
# Loss, Optimizer, Scheduler
criterion = Contrastive_Loss()
criterion.cuda()
params_for_optimization = list(p for p in model.parameters() if p.requires_grad)
optimizer = optim.Adam(
params_for_optimization,
lr=args.lr,
)
scheduler = get_cosine_schedule_with_warmup(
optimizer, 0, len(train_loader) * args.epochs
)
# Train
for epoch in range(args.epochs):
eval_howtovqa(model, val_loader, args)
train_howtovqa(model, train_loader, optimizer, criterion, scheduler, epoch, args)
torch.save(model.state_dict(), os.path.join(args.save_dir, f"e{epoch}.pth"))
eval_howtovqa(model, val_loader, args)
| 5,378 | 29.050279 | 94 | py |
just-ask | just-ask-main/eval_videoqa_cm.py | import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import numpy as np
import random
import os
import logging
import collections
import pandas as pd
from transformers import DistilBertTokenizer
from args import get_args
from model.multimodal_transformer import MMT_VideoQA
from util import compute_a2v, tokenize, get_mask, compute_aggreeings
from tqdm import tqdm
class VideoQADataset(Dataset):
def __init__(
self,
csv_path,
features,
qmax_words=20,
bert_tokenizer=None,
a2id=None,
ivqa=False,
max_feats=20,
tmp_sample=0,
id2a=None,
mc=0,
):
self.data = pd.read_csv(csv_path)
self.features = features
self.qmax_words = qmax_words
self.a2id = a2id
self.bert_tokenizer = bert_tokenizer
self.ivqa = ivqa
self.max_feats = max_feats
self.tmp_sample = tmp_sample
self.id2a = id2a
self.mc = mc
def __len__(self):
return len(self.data)
def __getitem__(self, index):
vid_id = self.data["video_id"].values[index]
video = self.features[vid_id]
if len(video) < self.max_feats or not self.tmp_sample:
video = video[: self.max_feats]
vid_duration = len(video)
if len(video) < self.max_feats:
video = torch.cat(
[video, torch.zeros(self.max_feats - len(video), video.shape[1])]
)
else:
sampled = []
for j in range(self.max_feats):
sampled.append(video[(j * len(video)) // self.max_feats])
video = torch.stack(sampled)
vid_duration = len(video)
if self.ivqa:
answer_txt = collections.Counter(
[
self.data["answer1"].values[index],
self.data["answer2"].values[index],
self.data["answer3"].values[index],
self.data["answer4"].values[index],
self.data["answer5"].values[index],
]
)
answer_id = torch.zeros(len(self.a2id))
for x in answer_txt:
if x in self.a2id:
answer_id[self.a2id[x]] = answer_txt[x]
answer_txt = ", ".join(
[str(x) + "(" + str(answer_txt[x]) + ")" for x in answer_txt]
)
elif self.mc:
answer_id = int(self.data["answer"][index])
answer_txt = [self.data["a" + str(i + 1)][index] for i in range(self.mc)]
question_txt = self.data["question"][index]
qa_txt = [
question_txt + " " + x for x in answer_txt
] # concatenate question with each possible answer
question_embd = tokenize(
qa_txt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.qmax_words,
dynamic_padding=True,
truncation=True,
)
else:
answer_txt = self.data["answer"].values[index]
answer_id = self.a2id.get(
answer_txt, -1
) # put an answer_id -1 if not in top answers, that will be considered wrong during evaluation
if not self.mc:
question_txt = self.data["question"][index]
qa_txt = [
question_txt + " " + self.id2a[i] for i in range(len(self.id2a))
] # concatenate question with each possible answer
question_embd = tokenize(
qa_txt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.qmax_words,
dynamic_padding=True,
truncation=True,
)
return {
"video_id": vid_id,
"video": video,
"video_len": vid_duration,
"question": question_embd,
"answer_id": answer_id,
}
def videoqa_collate_fn(batch):
"""
:param batch: [dataset[i] for i in N]
:return: tensorized batch with the question and the ans candidates padded to the max length of the batch
"""
bs = len(batch)
que = [batch[i]["question"] for i in range(bs)]
maxquelen = max([x.shape[-1] for x in que])
nans = que[0].shape[0]
question = torch.zeros(bs, nans, maxquelen).long()
for i, tensor in enumerate(que):
n, l = tensor.shape
question[i, :, :l] = tensor
return {
"video_id": default_collate([batch[i]["video_id"] for i in range(bs)]),
"video": default_collate([batch[i]["video"] for i in range(bs)]),
"video_len": default_collate([batch[i]["video_len"] for i in range(bs)]),
"question": question,
"answer_id": default_collate([batch[i]["answer_id"] for i in range(bs)]),
}
def eval(model, val_loader, args, test=False):
model.eval()
count = 0
metrics, counts = collections.defaultdict(int), collections.defaultdict(int)
vid2ans = {}
with torch.no_grad():
for i, batch in tqdm(enumerate(val_loader)):
answer_id, video, question = (
batch["answer_id"].squeeze(),
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
question_mask = (question > 0).float()
video_mask = get_mask(video_len, video.size(1)).cuda()
count += answer_id.size(0)
predicts = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
mode="vqacm",
)
predicts = predicts.view(answer_id.size(0), -1)
if not args.mc:
topk = torch.topk(predicts, dim=1, k=10).indices.cpu()
if args.dataset != "ivqa":
answer_id_expanded = answer_id.view(-1, 1).expand_as(topk)
else:
answer_id = (answer_id / 2).clamp(max=1)
answer_id_expanded = answer_id
metrics = compute_aggreeings(
topk,
answer_id_expanded,
[1, 10],
["acc", "acc10"],
metrics,
ivqa=(args.dataset == "ivqa"),
)
else:
predicted = torch.max(predicts, dim=1).indices.cpu()
metrics["acc"] += (predicted == answer_id).sum().item()
"""video_id = batch["video_id"]
top1 = topk[:, 0]
for k in range(len(video_id)):
vid2ans[video_id[k]] = id2a[top1[k].item()]"""
step = "val" if not test else "test"
for k in metrics:
v = metrics[k] / count
logging.info(f"{step} {k}: {v:.2%}")
# pickle.dump(vid2ans, open(os.path.join(args.save_dir, "preds.pkl"), 'wb'))
return metrics["acc"] / count
# args, logging
args = get_args()
if not (os.path.isdir(args.save_dir)):
os.mkdir(os.path.join(args.save_dir))
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(os.path.join(args.save_dir, "stdout.log"), "w+")
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
logging.info(args)
# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# get answer embeddings
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
a2id, id2a, a2v = None, None, None
if not args.mc:
a2id, id2a, a2v = compute_a2v(
vocab_path=args.vocab_path,
bert_tokenizer=bert_tokenizer,
amax_words=args.amax_words,
)
logging.info(f"Length of Answer Vocabulary: {len(a2id)}")
# Model
model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
model.cuda()
logging.info("Using {} GPUs".format(torch.cuda.device_count()))
# Load pretrain path
model = nn.DataParallel(model)
if args.pretrain_path != "":
model.load_state_dict(torch.load(args.pretrain_path))
logging.info(
f"Nb of trainable params:{sum(p.numel() for p in model.parameters() if p.requires_grad)}"
)
# Dataloaders
features = torch.load(args.features_path)
test_dataset = VideoQADataset(
csv_path=args.test_csv_path,
features=features,
qmax_words=args.qmax_words,
bert_tokenizer=bert_tokenizer,
a2id=a2id,
ivqa=(args.dataset == "ivqa"),
max_feats=args.max_feats,
id2a=id2a,
mc=args.mc,
)
test_loader = DataLoader(
test_dataset,
batch_size=torch.cuda.device_count(),
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
collate_fn=videoqa_collate_fn,
)
logging.info("number of test instances: {}".format(len(test_loader.dataset)))
# Zero-shot VideoQA with cross-modal matching module
eval(model, test_loader, args, test=True)
| 9,394 | 31.735192 | 108 | py |
just-ask | just-ask-main/main_htm.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import torch.optim as optim
from args import get_args
import random
import os
import pickle
from torch.optim.lr_scheduler import StepLR
import logging
from transformers import DistilBertTokenizer
from data.howto_loader import HowTo_Dataset, howto_collate_fn
from data.videotext_loader import (
VideoText_Dataset,
Youcook_Dataset,
videotext_collate_fn,
)
from model.multimodal_transformer import MMT_VideoQA
from train.train_htm import train_mlmcm, eval_mlm, eval_retrieval
# args, logging
args = get_args()
assert args.checkpoint_dir
assert args.dataset == "howto100m"
if not (os.path.isdir(args.save_dir)):
os.mkdir(args.save_dir)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(os.path.join(args.save_dir, "stdout.log"), "w+")
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
logging.info(args)
# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# Model
model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
n_negs=args.n_negs,
)
model = nn.DataParallel(model)
model.cuda()
# Load captions, dataloaders
caption = pickle.load(open(args.caption_path, "rb"))
logging.info("Pickle loaded")
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
dataset = HowTo_Dataset(
csv_path=args.train_csv_path,
caption=caption,
features_path=args.features_path,
min_time=args.min_time,
max_time=args.max_feats,
max_words=args.qmax_words,
min_words=args.min_words,
n_pair=args.n_pair,
bert_tokenizer=bert_tokenizer,
)
dataset_size = len(dataset)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_thread_reader,
shuffle=True,
batch_sampler=None,
drop_last=True,
collate_fn=howto_collate_fn,
)
youcook_dataset = Youcook_Dataset(
data=args.youcook_val_path,
max_words=args.qmax_words,
bert_tokenizer=bert_tokenizer,
max_feats=args.max_feats,
)
youcook_loader = DataLoader(
youcook_dataset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
collate_fn=videotext_collate_fn,
)
msrvtt_dataset = VideoText_Dataset(
csv_path=args.msrvtt_test_csv_path,
features_path=args.msrvtt_test_features_path,
max_words=args.qmax_words,
bert_tokenizer=bert_tokenizer,
max_feats=args.max_feats,
)
msrvtt_loader = DataLoader(
msrvtt_dataset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
collate_fn=videotext_collate_fn,
)
# Optimizer, Scheduler
params_for_optimization = list(p for p in model.parameters() if p.requires_grad)
optimizer = optim.Adam(params_for_optimization, lr=args.lr)
scheduler = StepLR(optimizer, step_size=len(dataloader), gamma=args.lr_decay)
# Train
for epoch in range(args.epochs):
eval_mlm(model, youcook_loader, "YouCook2", epoch)
eval_retrieval(model, youcook_loader, "YouCook2", epoch)
eval_mlm(model, msrvtt_loader, "MSR-VTT", epoch)
eval_retrieval(model, msrvtt_loader, "MSR-VTT", epoch)
train_mlmcm(model, optimizer, dataloader, scheduler, epoch, args)
torch.save(model.state_dict(), os.path.join(args.save_dir, f"e{epoch}.pth"))
eval_mlm(model, youcook_loader, "YouCook2", args.epochs)
eval_retrieval(model, youcook_loader, "YouCook2", args.epochs)
eval_mlm(model, msrvtt_loader, "MSR-VTT", args.epochs)
eval_retrieval(model, msrvtt_loader, "MSR-VTT", args.epochs)
| 3,978 | 28.474074 | 82 | py |
just-ask | just-ask-main/eval_videoqa.py | import torch
import torch.nn as nn
import numpy as np
import random
import collections
from args import get_args
from model.multimodal_transformer import MMT_VideoQA
from util import (
compute_a2v,
get_mask,
compute_aggreeings,
get_types,
get_most_common,
compute_word_stats,
)
from data.videoqa_loader import get_videoqa_loaders
from transformers import DistilBertTokenizer
def eval(model, val_loader, a2v, args, types, most_common, splits, total):
count = 0
metrics, counts, metrics_word, counts_word = (
collections.defaultdict(int),
collections.defaultdict(int),
collections.defaultdict(int),
collections.defaultdict(int),
)
with torch.no_grad():
for i, batch in enumerate(val_loader):
answer_id, video, question = (
batch["answer_id"],
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
type = batch["type"]
question_mask = (question > 0).float()
video_mask = get_mask(video_len, video.size(1)).cuda()
count += answer_id.size(0)
predicts = model(
video, question, text_mask=question_mask, video_mask=video_mask
)
topk = torch.topk(predicts, dim=1, k=10).indices.cpu()
if args.dataset != "ivqa":
answer_id_expanded = answer_id.view(-1, 1).expand_as(topk)
else:
answer_id = (answer_id / 2).clamp(max=1)
answer_id_expanded = answer_id
for x, y in types.items(): # compute per type VideoQA stats
counts[x] += sum(type == y).item()
metrics = compute_aggreeings(
topk[type == y],
answer_id_expanded[type == y],
[1, 10],
[x + "/acc", x + "/acc10"],
metrics,
ivqa=(args.dataset == "ivqa"),
)
# compute per word VideoQA stats
metrics_word, counts_word = compute_word_stats(
topk,
answer_id.cpu(),
a2id,
a2v,
most_common,
metrics_word,
counts_word,
ivqa=(args.dataset == "ivqa"),
top10=True,
)
for k in range(1, len(splits)): # compute per splits VideoQA stats
agreeings_splitk = sum(
metrics_word["acc_" + w[0]]
for it, w in enumerate(most_common)
if it >= splits[k - 1] and it < splits[k]
)
agreeings10_splitk = sum(
metrics_word["acc10_" + w[0]]
for it, w in enumerate(most_common)
if it >= splits[k - 1] and it < splits[k]
)
counts_splitk = sum(
counts_word[w[0]]
for it, w in enumerate(most_common)
if it >= splits[k - 1] and it < splits[k]
)
print(
f"split {k}: {counts_splitk / total: .4f}, {agreeings_splitk / counts_splitk:.2%}, {agreeings10_splitk / counts_splitk:.2%}"
)
for x in types: # deduce from types stats the global stats
metrics["acc"] += metrics[x + "/acc"]
metrics["acc10"] += metrics[x + "/acc10"]
for k in metrics:
if "/" in k:
v = metrics[k] / counts[k.split("/")[0]]
print(f"test {k}: {v:.2%}")
else:
v = metrics[k] / count
print(f"test {k}: {v:.2%}")
return metrics["acc"] / count
# args
args = get_args()
assert args.pretrain_path
# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# get answer embeddings
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
a2id, id2a, a2v = None, None, None
if not args.mc:
a2id, id2a, a2v = compute_a2v(
vocab_path=args.vocab_path,
bert_tokenizer=bert_tokenizer,
amax_words=args.amax_words,
)
# Model
model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
model.cuda()
# Load pretrain path
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.pretrain_path))
model.eval()
with torch.no_grad():
model.module._compute_answer_embedding(a2v)
# Dataloaders
features = torch.load(args.features_path)
_, _, _, _, test_dataset, test_loader = get_videoqa_loaders(
args, features, a2id, bert_tokenizer
)
types = get_types(args.dataset)
most_common, splits, total = get_most_common(test_loader, ivqa=(args.dataset == "ivqa"))
eval(model, test_loader, a2v, args, types, most_common, splits, total)
| 4,923 | 29.395062 | 136 | py |
just-ask | just-ask-main/loss.py | import torch as torch
import torch.nn.functional as F
class Contrastive_Loss(torch.nn.Module):
def __init__(self):
super(Contrastive_Loss, self).__init__()
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, target):
return self.ce_loss(x, target)
class LogSoftmax(torch.nn.Module):
def __init__(self, dim):
super(LogSoftmax, self).__init__()
self.dim = dim
def forward(self, x, a):
nll = -F.log_softmax(x, self.dim, _stacklevel=5)
return (nll * a / a.sum(1, keepdim=True).clamp(min=1)).sum(dim=1).mean()
class NCELoss(torch.nn.Module):
def __init__(self, batch_size=4096):
super(NCELoss, self).__init__()
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, x):
batch_size = len(x)
target = torch.arange(batch_size).cuda()
x = torch.cat((x, x.t()), dim=1)
return self.ce_loss(x, target)
| 945 | 26.823529 | 80 | py |
just-ask | just-ask-main/demo_videoqa.py | import torch
import torch.nn as nn
import numpy as np
import random
from transformers import DistilBertTokenizer
from args import get_args
from model.multimodal_transformer import MMT_VideoQA
from util import compute_a2v, get_mask
import ffmpeg
from extract.s3dg import S3D
from extract.preprocessing import Preprocessing
from global_parameters import S3D_PATH
# args
args = get_args()
assert args.pretrain_path
assert args.question_example
assert args.video_example
# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# get answer embeddings
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
a2id, id2a, a2v = compute_a2v(
vocab_path=args.vocab_path,
bert_tokenizer=bert_tokenizer,
amax_words=args.amax_words,
)
print(f"Length of Answer Vocabulary: {len(a2id)}")
# Model
model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
model.cuda()
# Load pretrain path
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.pretrain_path))
model.eval()
model.module._compute_answer_embedding(a2v)
question_txt = args.question_example
video_path = args.video_example
# Tokenize Question
question = torch.tensor(
model.module.bert.bert_tokenizer.encode(
question_txt,
add_special_tokens=True,
padding="max_length",
max_length=args.qmax_words,
truncation=True,
),
dtype=torch.long,
)
question = question.cuda().unsqueeze(0)
question_mask = question > 0
# Video Extractor
video_extractor = S3D(512, space_to_depth=True, embd=1, feature_map=0)
video_extractor.load_state_dict(torch.load(S3D_PATH))
video_extractor.eval()
video_extractor = torch.nn.DataParallel(video_extractor)
video_extractor = video_extractor.cuda()
preprocess = Preprocessing(num_frames=16)
with torch.no_grad():
# Extract Video Feature
probe = ffmpeg.probe(video_path)
video_stream = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"), None
)
width = int(video_stream["width"])
height = int(video_stream["height"])
num, denum = video_stream["avg_frame_rate"].split("/")
frame_rate = int(num) / int(denum)
if height >= width:
h, w = int(height * 224 / width), 224
else:
h, w = 224, int(width * 224 / height)
assert frame_rate >= 1
cmd = ffmpeg.input(video_path).filter("fps", fps=16).filter("scale", w, h)
x = int((w - 224) / 2.0)
y = int((h - 224) / 2.0)
cmd = cmd.crop(x, y, 224, 224)
out, _ = cmd.output("pipe:", format="rawvideo", pix_fmt="rgb24").run(
capture_stdout=True, quiet=True
)
h, w = 224, 224
video = np.frombuffer(out, np.uint8).reshape([-1, h, w, 3])
video = torch.from_numpy(video.astype("float32"))
video = video.permute(0, 3, 1, 2)
video = video.squeeze().cuda()
video = preprocess(video)
video = video_extractor(video)
# Pad Video
if len(video) < args.max_feats:
video = video[: args.max_feats]
video_len = len(video)
if len(video) < args.max_feats:
video = torch.cat(
[video, torch.zeros(args.max_feats - len(video), video.shape[1]).cuda()]
)
else:
sampled = []
for j in range(args.max_feats):
sampled.append(video[(j * len(video)) // args.max_feats])
video = torch.stack(sampled)
video_len = len(video)
video_len = torch.Tensor([video_len])
video = video.unsqueeze(0)
video_mask = get_mask(video_len, video.size(1)).cuda()
# Get Predictions
predicts = model(
video, question=question, text_mask=question_mask, video_mask=video_mask
)
topk = torch.topk(predicts, dim=1, k=5)
topk_txt = [[id2a[x.item()] for x in y] for y in topk.indices.cpu()]
topk_scores = [[f"{x:.2f}".format() for x in y] for y in topk.values.cpu()]
topk_all = [
[x + "(" + y + ")" for x, y in zip(a, b)] for a, b in zip(topk_txt, topk_scores)
]
print(f"Top 5 answers and scores: {topk_all[0]}")
| 4,273 | 28.888112 | 88 | py |
just-ask | just-ask-main/main_videoqa.py | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
import os
import logging
from transformers import get_cosine_schedule_with_warmup, DistilBertTokenizer
from args import get_args
from model.multimodal_transformer import MMT_VideoQA
from loss import LogSoftmax
from util import compute_a2v
from train.train_videoqa import train, eval
from data.videoqa_loader import get_videoqa_loaders
# args, logging
args = get_args()
if not (os.path.isdir(args.save_dir)):
os.mkdir(os.path.join(args.save_dir))
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
logFormatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(os.path.join(args.save_dir, "stdout.log"), "w+")
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
logging.info(args)
# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# get answer embeddings
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
a2id, id2a, a2v = None, None, None
if not args.mc:
a2id, id2a, a2v = compute_a2v(
vocab_path=args.vocab_path,
bert_tokenizer=bert_tokenizer,
amax_words=args.amax_words,
)
logging.info(f"Length of Answer Vocabulary: {len(a2id)}")
# Model
model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
probe=args.probe
)
model.cuda()
logging.info("Using {} GPUs".format(torch.cuda.device_count()))
# Load pretrain path
model = nn.DataParallel(model)
if args.pretrain_path != "":
model.load_state_dict(torch.load(args.pretrain_path))
logging.info(f"Loaded checkpoint {args.pretrain_path}")
logging.info(
f"Nb of trainable params:{sum(p.numel() for p in model.parameters() if p.requires_grad)}"
)
# Dataloaders
features = torch.load(args.features_path)
(
train_dataset,
train_loader,
val_dataset,
val_loader,
test_dataset,
test_loader,
) = get_videoqa_loaders(args, features, a2id, bert_tokenizer)
logging.info("number of train instances: {}".format(len(train_loader.dataset)))
logging.info("number of val instances: {}".format(len(val_loader.dataset)))
logging.info("number of test instances: {}".format(len(test_loader.dataset)))
# Loss + Optimizer
if args.dataset == "ivqa":
criterion = LogSoftmax(dim=1)
else:
criterion = nn.CrossEntropyLoss()
params_for_optimization = list(p for p in model.parameters() if p.requires_grad)
optimizer = optim.Adam(
params_for_optimization, lr=args.lr, weight_decay=args.weight_decay
)
criterion.cuda()
# Training
if not args.test:
scheduler = get_cosine_schedule_with_warmup(
optimizer, 0, len(train_loader) * args.epochs
)
logging.info(
f"Set cosine schedule with {len(train_loader) * args.epochs} iterations"
)
eval(model, test_loader, a2v, args, test=True) # zero-shot VideoQA
best_val_acc = -float("inf")
best_epoch = 0
for epoch in range(args.epochs):
train(model, train_loader, a2v, optimizer, criterion, scheduler, epoch, args)
val_acc = eval(model, val_loader, a2v, args, test=False)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_epoch = epoch
torch.save(
model.state_dict(), os.path.join(args.save_dir, "best_model.pth")
)
logging.info(f"Best val model at epoch {best_epoch + 1}")
model.load_state_dict(
torch.load(
os.path.join(args.checkpoint_predir, args.checkpoint_dir, "best_model.pth")
)
)
# Evaluate on test set
eval(model, test_loader, a2v, args, test=True)
| 3,932 | 29.488372 | 93 | py |
just-ask | just-ask-main/util.py | import re
import torch
import torch.nn.functional as F
import json
import collections
import numpy as np
def tokenize(
seq,
tokenizer,
add_special_tokens=True,
max_length=10,
dynamic_padding=True,
truncation=True,
):
"""
:param seq: sequence of sequences of text
:param tokenizer: bert_tokenizer
:return: torch tensor padded up to length max_length of bert tokens
"""
tokens = tokenizer.batch_encode_plus(
seq,
add_special_tokens=add_special_tokens,
max_length=max_length,
padding="longest" if dynamic_padding else "max_length",
truncation=truncation,
)["input_ids"]
return torch.tensor(tokens, dtype=torch.long)
def compute_aggreeings(topk, answers, thresholds, names, metrics, ivqa=False):
""" Updates metrics dictionary by computing aggreeings for different thresholds """
if not ivqa:
for i, x in enumerate(thresholds):
agreeingsx = (topk[:, :x] == answers[:, :x]).sum().item()
metrics[names[i]] += agreeingsx
else:
for i, x in enumerate(thresholds):
predicted = F.one_hot(topk[:, :x], num_classes=answers.shape[-1]).sum(1)
metrics[names[i]] += (predicted * answers).max(1)[0].sum().item()
return metrics
class AverageMeter:
""" Computes and stores the average and current value for training stats """
def __init__(self):
self.reset()
def reset(self):
""" Reset all statistics """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
""" Update statistics """
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_mask(lengths, max_length):
""" Computes a batch of padding masks given batched lengths """
mask = 1 * (
torch.arange(max_length).unsqueeze(1).to(lengths.device) < lengths
).transpose(0, 1)
return mask
def compute_a2v(vocab_path, bert_tokenizer, amax_words):
""" Precomputes GloVe answer embeddings for all answers in the vocabulary """
a2id = json.load(open(vocab_path, "r"))
id2a = {v: k for k, v in a2id.items()}
a2v = tokenize(
list(a2id.keys()),
bert_tokenizer,
add_special_tokens=True,
max_length=amax_words,
dynamic_padding=True,
truncation=True,
)
if torch.cuda.is_available():
a2v = a2v.cuda() # (vocabulary_size, 1, we_dim)
return a2id, id2a, a2v
def mask_tokens(inputs, tokenizer, mlm_probability):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()
]
probability_matrix.masked_fill_(
torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0
)
if tokenizer._pad_token is not None:
padding_mask = labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def get_types(dataset):
""" Type2Id mapping for VideoQA datasets """
if dataset == "tgif":
return {"what": 0, "how": 1, "color": 2, "where": 3}
elif dataset == "activitynet":
return {
"motion": 0,
"spatial": 1,
"temporal": 2,
"yesno": 3,
"color": 4,
"object": 5,
"location": 6,
"number": 7,
"other": 8,
}
elif dataset == "msvd" or dataset == "msrvtt":
return {"what": 0, "how": 1, "color": 2, "where": 3, "who": 4, "when": 5}
elif dataset == "ivqa":
return {"scenes": 0}
else:
raise NotImplementedError
def get_most_common(loader, ivqa=False, n=4):
""" Outputs most common answers and splits in n parts the answers depending on their frequency"""
if ivqa:
ans = []
for a1, a2, a3, a4, a5 in zip(
list(loader.dataset.data["answer1"]),
list(loader.dataset.data["answer2"]),
list(loader.dataset.data["answer3"]),
list(loader.dataset.data["answer4"]),
list(loader.dataset.data["answer5"]),
):
counteri = collections.Counter([a1, a2, a3, a4, a5])
for w in counteri:
if (
counteri[w] >= 2
): # an answer is considered as right if it has been annotated by two workers
ans.append(w)
else:
ans = list(loader.dataset.data["answer"])
most_common = collections.Counter(ans).most_common()
total = sum(x[1] for x in most_common)
splits = [0] * (n + 1)
j = 0
for i in range(n):
cur_total = 0
while j < len(most_common) and cur_total < total / n:
cur_total += most_common[j][1]
j += 1
splits[i + 1] = j
return most_common, splits, total
def compute_word_stats(
topk, answers, a2id, a2v, most_common, metrics, counts, ivqa, top10=False
):
""" Similar as compute_agreeings, computes agreeings and counts for most common words """
if not ivqa:
for word, cword in most_common:
if word not in a2id:
counts[word] = cword
continue
predicted = topk[:, 0]
metrics[f"acc_{word}"] += (
(predicted[answers == a2id[word]] == a2id[word]).sum().item()
)
if top10:
predicted10 = topk[:, :10]
metrics[f"acc10_{word}"] += (
(predicted10[answers == a2id[word]] == a2id[word]).sum().item()
)
counts[word] += (answers == a2id[word]).sum().item()
else:
for word, cword in most_common:
if word not in a2id:
counts[word] = cword
continue
predicted = F.one_hot(topk[:, 0], num_classes=len(a2v))
ans_word = answers[:, a2id[word]]
metrics[f"acc_{word}"] += (
(predicted[:, a2id[word]][ans_word == 1] * ans_word[ans_word == 1])
.sum()
.item()
)
if top10:
predicted10 = F.one_hot(topk[:, :10], num_classes=len(a2v)).sum(1)
metrics[f"acc10_{word}"] += (
(
predicted10[:, a2id[word]][ans_word == 1]
* ans_word[ans_word == 1]
)
.sum()
.item()
)
counts[word] += (ans_word == 1).sum().item()
return metrics, counts
def compute_metrics(x):
sx = np.sort(-x, axis=1)
d = np.diag(-x)
d = d[:, np.newaxis]
ind = sx - d
ind = np.where(ind == 0)
ind = ind[1]
metrics = {}
metrics["R1"] = float(np.sum(ind == 0)) / len(ind)
metrics["R10"] = float(np.sum(ind < 10)) / len(ind)
metrics["R100"] = float(np.sum(ind < 100)) / len(ind)
metrics["MR"] = np.median(ind) + 1
return metrics
def print_computed_metrics(metrics):
r1 = metrics["R1"]
r10 = metrics["R10"]
r100 = metrics["R100"]
mr = metrics["MR"]
return "R@1: {:.4f} - R@10: {:.4f} - R@100: {:.4f} - Median R: {}".format(
r1, r10, r100, mr
)
| 8,758 | 32.559387 | 161 | py |
just-ask | just-ask-main/videoqageneration/generate_questions_webvid.py | import pickle
import os
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
import torch
import math
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import argparse
import sys
import pandas as pd
sys.path.insert(0, os.getcwd())
from global_parameters import TRANSFORMERS_PATH, qas_dir, WEBVID_PATH
class Question_Generation_Dataset(Dataset):
def __init__(self, caption, ext_answers, tokenizer):
self.data = caption # dictionary mapping vid_id to text
self.answers = ext_answers # dictionary mapping vid_id to list of answers
self.video_ids = list(ext_answers.keys())
self.tokenizer = tokenizer
def _prepare_inputs_for_qg_from_answers_hl(self, text, answers):
# prepare inputs for answer-aware question generation
inputs = []
for a in answers:
try:
start = text.index(a)
except ValueError: # substring not found
start = text.index(
a.capitalize()
) # preremoved the 2% examples that leads to substring not found anyway
text_hl = f"{text[:start]} <hl> {a} <hl> {text[start + len(a):]}"
input = f"generate question: {text_hl}"
inputs.append(input)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512,
):
# batch tokenizer
inputs = self.tokenizer(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
return_tensors="pt",
)
return inputs
def __getitem__(self, index):
vid = self.video_ids[index]
text = self.data[vid]
answer = self.answers[vid]
qg_inputs = self._prepare_inputs_for_qg_from_answers_hl(text, answer)
inputs = self._tokenize(qg_inputs, padding=True, truncation=True)
return {
"text": text,
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"answers": answer,
"video_id": vid,
}
def __len__(self):
return len(self.video_ids)
def qgen_collate_fn(batch):
"""
:param batch: [dataset[i] for i in N]
:return: tensorized batch with the question and the ans candidates padded to the max length of the batch
"""
text = [x["text"] for x in batch]
input_ids = torch.cat([x["input_ids"] for x in batch], 0)
attention_mask = torch.cat([x["attention_mask"] for x in batch], 0)
answers = [x["answers"] for x in batch]
video_id = [x["video_id"] for x in batch]
return {
"text": text,
"input_ids": input_ids,
"attention_mask": attention_mask,
"answers": answers,
"video_id": video_id,
}
parser = argparse.ArgumentParser("")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--max_length", type=int, default=32)
parser.add_argument("--num_beams", type=int, default=4)
args = parser.parse_args()
# Load captions
train_videos = pd.read_csv(os.path.join(WEBVID_PATH, "results_2M_train.csv"))
val_videos = pd.read_csv(os.path.join(WEBVID_PATH, "results_2M_val.csv"))
print("Descriptions loaded")
videos = {}
for _, row in train_videos.iterrows():
videos[row["videoid"]] = row["name"]
for _, row in val_videos.iterrows():
videos[row["videoid"]] = row["name"]
done = os.listdir(qas_dir)
doneset = set(x[:-4] for x in done)
videos = {x: y for x, y in videos.items() if str(x) not in doneset}
print("Descriptions prepared")
print(len(videos))
# Load answers
ext_answers = pickle.load(
open(os.path.join(WEBVID_PATH, "webvid_answers.pkl"), "rb")
)
ext_answers = {int(x): y for x, y in ext_answers.items() if str(x) not in doneset}
videos = {x: y for x, y in videos.items() if x in ext_answers}
print(len(videos))
print(len(ext_answers))
# Answer-aware question generation transformer model
tokenizer = AutoTokenizer.from_pretrained(
"valhalla/t5-base-qg-hl", cache_dir=TRANSFORMERS_PATH
)
model = AutoModelForSeq2SeqLM.from_pretrained(
"valhalla/t5-base-qg-hl", cache_dir=TRANSFORMERS_PATH
)
model.cuda()
print("Models loaded")
# Dataloader
dataset = Question_Generation_Dataset(
caption=videos, ext_answers=ext_answers, tokenizer=tokenizer
)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.n_workers,
shuffle=True,
collate_fn=qgen_collate_fn,
)
print("Dataloaders prepared")
for i, batch in tqdm(enumerate(dataloader)):
text, input_ids, attention_mask, answers, video_id = (
batch["text"],
batch["input_ids"].squeeze(1).cuda(),
batch["attention_mask"].squeeze(1).cuda(),
batch["answers"],
batch["video_id"],
)
# Verify if the video has already been processed
todo_batch_list = [
j
for j in range(len(video_id))
if not os.path.exists(os.path.join(qas_dir, str(video_id[j]) + ".pkl"))
]
if not len(todo_batch_list):
continue
text = [text[j] for j in todo_batch_list]
todo_batch_list_a = []
idx = 0
todo_batch_set = set(todo_batch_list)
for j, ans in enumerate(answers):
if j in todo_batch_list:
todo_batch_list_a.extend([idx + k for k in range(len(answers[j]))])
idx += len(answers[j])
todo_batch = torch.Tensor(todo_batch_list_a).long().cuda()
input_ids = torch.index_select(input_ids, 0, todo_batch)
attention_mask = torch.index_select(attention_mask, 0, todo_batch)
answers = [answers[j] for j in todo_batch_list]
# Batch inference
n_iter = int(math.ceil(len(input_ids) / float(args.batch_size)))
outs = torch.zeros(len(input_ids), args.max_length).long()
with torch.no_grad():
for k in range(n_iter):
batch_outputs = (
model.generate(
input_ids=input_ids[
k * args.batch_size : (k + 1) * args.batch_size
],
attention_mask=attention_mask[
k * args.batch_size : (k + 1) * args.batch_size
],
max_length=args.max_length,
num_beams=args.num_beams,
)
.detach()
.cpu()
)
outs[
k * args.batch_size : (k + 1) * args.batch_size, : batch_outputs.size(1)
] = batch_outputs
# Decoding
questions = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
# Save
video_id = [video_id[j] for j in todo_batch_list]
qidx = 0
for j, vid in enumerate(video_id):
if not os.path.exists(os.path.join(qas_dir, str(vid) + ".pkl")):
pickle.dump(
{
"text": text[j],
"question": questions[qidx : qidx + len(answers[j])],
"answer": answers[j],
},
open(os.path.join(qas_dir, str(vid) + ".pkl"), "wb"),
)
qidx += len(answers[j]) | 7,313 | 32.39726 | 108 | py |
just-ask | just-ask-main/videoqageneration/extract_answers_webvid.py | import pickle
import os
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import argparse
import torch
import sys
import pandas as pd
sys.path.insert(0, os.getcwd()) # to correct with parent folder
from global_parameters import answers_dir, QG_REPO_DIR, WEBVID_PATH, TRANSFORMERS_PATH
sys.path.insert(0, os.path.join(QG_REPO_DIR, "question_generation"))
from pipelines import pipeline
class Answer_Extraction_Dataset(Dataset):
def __init__(self, caption, tokenizer):
self.data = caption # dictionnary mapping vid_id to text
self.video_ids = list(caption.keys())
self.tokenizer = tokenizer
def _prepare_inputs_for_ans_extraction(self, sent):
# prepare inputs for answer extraction
sent = str(sent).strip()
sent = " ".join(sent.split())
source_text = "extract answers:"
sent = "<hl> %s <hl>" % sent
source_text = "%s %s" % (source_text, sent)
source_text = source_text.strip()
return source_text
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512,
):
# batch tokenizer
inputs = self.tokenizer(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
return_tensors="pt",
)
return inputs
def __getitem__(self, index):
video_id = self.video_ids[index]
text = self.data[video_id]
source_text = self._prepare_inputs_for_ans_extraction(text)
inputs = self._tokenize([source_text])
return {
"text": text,
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"video_id": video_id,
}
def __len__(self):
return len(self.data)
parser = argparse.ArgumentParser("")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--max_length", type=int, default=32)
args = parser.parse_args()
# Load captions
train_videos = pd.read_csv(os.path.join(WEBVID_PATH, "results_10M_train.csv"))
val_videos = pd.read_csv(os.path.join(WEBVID_PATH, "results_2M_val.csv"))
print("Descriptions loaded")
videos = {}
for _, row in train_videos.iterrows():
videos[row["videoid"]] = row["name"]
for _, row in val_videos.iterrows():
videos[row["videoid"]] = row["name"]
done = os.listdir(answers_dir)
doneset = set(x[:-4] for x in done)
videos = {x: y for x, y in videos.items() if str(x) not in doneset}
print("Descriptions prepared")
print(len(videos))
# Answer extraction transformer model
ans_tokenizer = AutoTokenizer.from_pretrained(
"valhalla/t5-small-qa-qg-hl", cache_dir=TRANSFORMERS_PATH
)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(
"valhalla/t5-small-qa-qg-hl", cache_dir=TRANSFORMERS_PATH
)
ans_model.cuda()
print("Models loaded")
# Dataloader - shuffle so that if this script can be parallelized on an arbitrary number of GPUs
dataset = Answer_Extraction_Dataset(caption=videos, tokenizer=ans_tokenizer)
dataloader = DataLoader(
dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True
)
print("Dataloaders prepared")
# Inference
for i, batch in tqdm(enumerate(dataloader)):
text, input_ids, attention_mask, video_id = (
batch["text"],
batch["input_ids"].squeeze(1).cuda(),
batch["attention_mask"].squeeze(1).cuda(),
batch["video_id"],
)
# Verify if the video has already been processed
todo_batch_list = [
j
for j in range(len(video_id))
if not os.path.exists(
os.path.join(answers_dir, str(video_id[j].item()) + ".pkl")
)
]
if not len(todo_batch_list):
continue
text = [text[j] for j in todo_batch_list]
todo_batch = torch.Tensor(todo_batch_list).long().cuda()
input_ids = torch.index_select(input_ids, 0, todo_batch)
attention_mask = torch.index_select(attention_mask, 0, todo_batch)
# Batch inference
with torch.no_grad():
outs = (
ans_model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=args.max_length,
)
.detach()
.cpu()
)
# Decoding
dec = [ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs]
answers = [item.split("<sep>") for item in dec]
answers = [i[:-1] for i in answers]
answers = [
list(set([y.strip() for y in x if len(y.strip())])) for x in answers
] # remove duplicates
# Save
for j, idx in enumerate(todo_batch_list):
if not os.path.exists(
os.path.join(answers_dir, str(video_id[idx].item()) + ".pkl")
):
pickle.dump(
answers[j],
open(
os.path.join(answers_dir, str(video_id[idx].item()) + ".pkl"), "wb"
),
)
| 5,208 | 30.957055 | 96 | py |
just-ask | just-ask-main/videoqageneration/extract_answers.py | import pickle
import os
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import argparse
import math
import torch
import sys
from global_parameters import answers_dir, QG_REPO_DIR, HOWTO_PATH, TRANSFORMERS_PATH
sys.path.insert(0, os.path.join(QG_REPO_DIR, "question_generation"))
from pipelines import pipeline
class Answer_Extraction_Dataset(Dataset):
def __init__(self, caption, tokenizer):
self.data = caption # dictionnary mapping vid_id to lists of text sentences
self.video_ids = list(caption.keys())
self.tokenizer = tokenizer
def _prepare_inputs_for_ans_extraction(self, sents):
# prepare inputs for answer extraction
inputs = []
for sent in sents:
sent = str(sent).strip()
sent = " ".join(sent.split())
source_text = "extract answers:"
sent = "<hl> %s <hl>" % sent
source_text = "%s %s" % (source_text, sent)
source_text = source_text.strip()
inputs.append(source_text)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512,
):
# batch tokenizer
inputs = self.tokenizer(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
return_tensors="pt",
)
return inputs
def __getitem__(self, index):
video_id = self.video_ids[index]
text = self.data[video_id]["text"]
inputs = self._prepare_inputs_for_ans_extraction(text)
inputs = self._tokenize(inputs)
return {
"text": text,
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"video_id": video_id,
}
def __len__(self):
return len(self.data)
parser = argparse.ArgumentParser("")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--max_length", type=int, default=32)
args = parser.parse_args()
# Load infered sentences from speech transcripts
videos = pickle.load(
open(
os.path.join(
HOWTO_PATH, "caption_howto100m_sw_nointersec_norepeat_punctuated.pickle"
),
"rb",
)
)
done = os.listdir(answers_dir)
doneset = set(x[:11] for x in done)
videos = {x: y for x, y in videos.items() if x not in doneset}
# Answer extraction transformer model
ans_tokenizer = AutoTokenizer.from_pretrained(
"valhalla/t5-small-qa-qg-hl", cache_dir=TRANSFORMERS_PATH
)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(
"valhalla/t5-small-qa-qg-hl", cache_dir=TRANSFORMERS_PATH
)
ans_model.cuda()
# Dataloader - shuffle so that if this script can be parallelized on an arbitrary number of GPUs
dataset = Answer_Extraction_Dataset(caption=videos, tokenizer=ans_tokenizer)
dataloader = DataLoader(dataset, batch_size=1, num_workers=args.n_workers, shuffle=True)
# Inference
for i, batch in tqdm(enumerate(dataloader)):
text, input_ids, attention_mask, video_id = (
batch["text"],
batch["input_ids"].squeeze(0).cuda(),
batch["attention_mask"].squeeze(0).cuda(),
batch["video_id"][0],
)
# Verify if the video has already been processed
if os.path.exists(os.path.join(answers_dir, video_id + ".pkl")):
continue
# Batch inference
n_iter = int(math.ceil(len(input_ids) / float(args.batch_size)))
outs = torch.zeros(len(input_ids), args.max_length).long()
with torch.no_grad():
for k in range(n_iter):
batch_outputs = (
ans_model.generate(
input_ids=input_ids[
k * args.batch_size : (k + 1) * args.batch_size
],
attention_mask=attention_mask[
k * args.batch_size : (k + 1) * args.batch_size
],
max_length=args.max_length,
)
.detach()
.cpu()
)
outs[
k * args.batch_size : (k + 1) * args.batch_size, : batch_outputs.size(1)
] = batch_outputs
# Decoding
dec = [ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs]
answers = [item.split("<sep>") for item in dec]
answers = [i[:-1] for i in answers]
answers = [
list(set([y.strip() for y in x if len(y.strip())])) for x in answers
] # remove duplicates
answers = [
[x for x in y if x in text[l] or x.capitalize() in text[l]]
for l, y in enumerate(answers)
] # remove answers that we cannot find back in the original sentence
# Save
if os.path.exists(os.path.join(answers_dir, video_id + ".pkl")):
continue
pickle.dump(answers, open(os.path.join(answers_dir, video_id + ".pkl"), "wb"))
| 5,134 | 31.916667 | 96 | py |
just-ask | just-ask-main/videoqageneration/generate_questions.py | import pickle
import os
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
import torch
import math
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import argparse
from global_parameters import TRANSFORMERS_PATH, qas_dir, HOWTO_PATH
class Question_Generation_Dataset(Dataset):
def __init__(self, caption, ext_answers, tokenizer):
self.data = caption # dictionary mapping vid_id to lists of text, start, end
self.answers = ext_answers # dictionary mapping vid_id to list of answers, index corresponding to the original text
self.video_ids = list(ext_answers.keys())
self.tokenizer = tokenizer
def _prepare_inputs_for_qg_from_answers_hl(self, text, answers):
# prepare inputs for answer-aware question generation
inputs = []
for (sent, a) in zip(text, answers):
try:
start = sent.index(a)
except ValueError: # substring not found
start = text.index(a.capitalize())
text_hl = f"{sent[:start]} <hl> {a} <hl> {sent[start + len(a):]}"
input = f"generate question: {text_hl}"
inputs.append(input)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512,
):
# batch tokenizer
inputs = self.tokenizer(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
return_tensors="pt",
)
return inputs
def __getitem__(self, index):
vid = self.video_ids[index]
text = self.data[vid]["text"]
answer = self.answers[vid]["answer"]
indices = self.answers[vid]["idx"]
text = [text[i] for i in indices]
qg_inputs = self._prepare_inputs_for_qg_from_answers_hl(text, answer)
inputs = self._tokenize(qg_inputs, padding=True, truncation=True)
start = torch.tensor([self.data[vid]["start"][i] for i in indices])
end = torch.tensor([self.data[vid]["end"][i] for i in indices])
return {
"text": text,
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"answers": answer,
"video_id": vid,
"start": start,
"end": end,
}
def __len__(self):
return len(self.video_ids)
parser = argparse.ArgumentParser("")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--max_length", type=int, default=32)
parser.add_argument("--num_beams", type=int, default=4)
args = parser.parse_args()
# Load speech transcripts and extracted answers
videos = pickle.load(
open(
os.path.join(
HOWTO_PATH, "caption_howto100m_sw_nointersec_norepeat_punctuated.pickle"
),
"rb",
)
)
ext_answers = pickle.load(
open(
os.path.join(HOWTO_PATH, "howtovqa_answers.pickle"),
"rb",
)
)
done = os.listdir(qas_dir)
doneset = set(x[:11] for x in done)
videos = {x: y for x, y in videos.items() if x not in doneset}
ext_answers = {x: y for x, y in ext_answers.items() if x not in doneset}
# Answer-aware question generation transformer model
tokenizer = AutoTokenizer.from_pretrained(
"valhalla/t5-base-qg-hl", cache_dir=TRANSFORMERS_PATH
)
model = AutoModelForSeq2SeqLM.from_pretrained(
"valhalla/t5-base-qg-hl", cache_dir=TRANSFORMERS_PATH
)
model.cuda()
# Dataloader
dataset = Question_Generation_Dataset(
caption=videos, ext_answers=ext_answers, tokenizer=tokenizer
)
dataloader = DataLoader(dataset, batch_size=1, num_workers=args.n_workers, shuffle=True)
for i, batch in tqdm(enumerate(dataloader)):
text, input_ids, attention_mask, answers, video_id, start, end = (
batch["text"],
batch["input_ids"].squeeze(0).cuda(),
batch["attention_mask"].squeeze(0).cuda(),
batch["answers"],
batch["video_id"][0],
batch["start"].squeeze(0),
batch["end"].squeeze(0),
)
# Verify if the video has already been processed
if os.path.exists(os.path.join(qas_dir, video_id + ".pkl")):
continue
# Batch inference
n_iter = int(math.ceil(len(input_ids) / float(args.batch_size)))
outs = torch.zeros(len(input_ids), args.max_length).long()
with torch.no_grad():
for k in range(n_iter):
batch_outputs = (
model.generate(
input_ids=input_ids[
k * args.batch_size : (k + 1) * args.batch_size
],
attention_mask=attention_mask[
k * args.batch_size : (k + 1) * args.batch_size
],
max_length=args.max_length,
num_beams=args.num_beams,
)
.detach()
.cpu()
)
outs[
k * args.batch_size : (k + 1) * args.batch_size, : batch_outputs.size(1)
] = batch_outputs
# Decoding
questions = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
answers = [answers[l][0] for l in range(len(answers))]
text = [text[l][0] for l in range(len(answers))]
# Remove qa pairs for which the answer is fully in the question
idx = [k for k in range(len(questions)) if answers[k] not in questions[k]]
questions = [questions[k] for k in idx]
answers = [answers[k] for k in idx]
text = [text[k] for k in idx]
start = [start[k].item() for k in idx]
end = [end[k].item() for k in idx]
# Save
if os.path.exists(os.path.join(qas_dir, video_id + ".pkl")):
continue
pickle.dump(
{
"text": text,
"question": questions,
"answer": answers,
"start": start,
"end": end,
},
open(os.path.join(qas_dir, video_id + ".pkl"), "wb"),
)
| 6,150 | 32.612022 | 124 | py |
just-ask | just-ask-main/preproc/preproc_how2qa.py | from tqdm import tqdm
import pandas as pd
import os
import numpy as np
import torch
from global_parameters import HOW2QA_PATH, HOWTO_FEATURES_PATH
train_csv = pd.read_csv(os.path.join(HOW2QA_PATH, "how2QA_train_release.csv"))
train_csv.columns = ["vid_id", "timesteps", "a2", "a3", "a4", "question", "a1"]
print(len(train_csv)) # 35404
val_csv = pd.read_csv(os.path.join(HOW2QA_PATH, "how2QA_val_release.csv"))
val_csv.columns = ["vid_id", "timesteps", "a2", "a3", "a4", "question", "a1"]
print(len(val_csv)) # 2851
count = {}
missing_videos = []
missing_features = []
features = {}
def process(df):
idx = [[1, 2, 3, 4] for _ in range(len(df))]
for i in range(len(idx)):
np.random.shuffle(idx[i])
ids, a1, a2, a3, a4, answer, question, starts, ends = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for i, row in tqdm(df.iterrows()):
feat_path = HOWTO_FEATURES_PATH + row["vid_id"] + ".mp4" + ".npy"
if not os.path.exists(feat_path):
feat_path = HOWTO_FEATURES_PATH + row["vid_id"] + ".webm" + ".npy"
if not os.path.exists(feat_path):
missing_videos.append(row["vid_id"])
continue
start = int(float(row["timesteps"].split(":")[0][1:]))
end = int(float(row["timesteps"].split(":")[1][:-1]))
feature = torch.from_numpy(np.load(feat_path))
feature = feature[start : end + 1]
if len(feature) != end - start + 1:
missing_features.append((row["video_id"], start, end))
continue
starts.append(start)
ends.append(end)
id = count.get(row["vid_id"], 0)
ids.append(row["vid_id"] + "_" + str(id))
features[row["vid_id"] + "_" + str(id)] = feature.float()
count[row["vid_id"]] = count.get(row["vid_id"], 0) + 1
a1.append(row["a" + str(idx[i][0])])
a2.append(row["a" + str(idx[i][1])])
a3.append(row["a" + str(idx[i][2])])
a4.append(row["a" + str(idx[i][3])])
answer.append(idx[i].index(1))
question.append(row["question"])
return question, answer, ids, a1, a2, a3, a4, starts, ends
question, answer, ids, a1, a2, a3, a4, starts, ends = process(train_csv)
train_df = pd.DataFrame(
{
"question": question,
"answer": answer,
"video_id": ids,
"a1": a1,
"a2": a2,
"a3": a3,
"a4": a4,
"start": starts,
"end": ends,
},
columns=["question", "answer", "video_id", "a1", "a2", "a3", "a4", "start", "end"],
)
print(len(train_df)) # 35070, about 0.9% missing videos or features
question, answer, ids, a1, a2, a3, a4, starts, ends = process(val_csv)
val_df = pd.DataFrame(
{
"question": question,
"answer": answer,
"video_id": ids,
"a1": a1,
"a2": a2,
"a3": a3,
"a4": a4,
"start": starts,
"end": ends,
},
columns=["question", "answer", "video_id", "a1", "a2", "a3", "a4", "start", "end"],
)
print(len(val_df)) # 2833, about 0.6% missing videos or features
print(missing_videos) # 8e_jI7rLB04
print(len(missing_features)) # 239
train_df.to_csv(os.path.join(HOW2QA_PATH, "train.csv"), index=False)
val_df.to_csv(os.path.join(HOW2QA_PATH, "val.csv"), index=False)
val_df.to_csv(
os.path.join(HOW2QA_PATH, "test.csv"), index=False
) # evaluation on the public val
torch.save(features, os.path.join(HOW2QA_PATH, "s3d.pth")) | 3,490 | 31.626168 | 87 | py |
just-ask | just-ask-main/train/train_howtovqa.py | import torch
import torch.nn as nn
import logging
import collections
import numpy as np
from util import compute_aggreeings, AverageMeter, get_mask, mask_tokens
def eval_howtovqa(model, val_loader, args):
model.eval()
metrics = collections.defaultdict(int)
count = 0
with torch.no_grad():
for i, batch in enumerate(val_loader):
answer, video, question = (
batch["answer"].cuda(),
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"].squeeze()
video = video.squeeze()
video_mask = get_mask(video_len, video.size(1)).cuda()
question_mask = (question > 0).float()
count += answer.size(0)
atxt_unique, ans_idx, ans_inv = np.unique(
batch["atxt"], return_index=True, return_inverse=True
) # only keep unique answers
answer = answer[ans_idx]
fusion_proj, answer_proj = model(
video,
question=question,
answer=answer,
text_mask=question_mask,
video_mask=video_mask,
)
predicts = fusion_proj @ (answer_proj.t())
topk = torch.topk(predicts, dim=1, k=10).indices.cpu()
answer_id_expanded = torch.from_numpy(ans_inv).view(-1, 1).expand_as(topk)
metrics = compute_aggreeings(
topk,
answer_id_expanded,
[1, 10],
["rec", "rec10"],
metrics,
)
if args.mlm_prob:
inputs, labels = mask_tokens(
question.cpu(),
model.module.bert.bert_tokenizer,
mlm_probability=0.15,
)
mlm_loss = model(
video,
question=inputs.cuda(),
labels=labels.cuda(),
text_mask=question_mask,
video_mask=video_mask,
mode="mlm",
)
mlm_loss = mlm_loss.mean()
metrics["mlm_loss"] += mlm_loss * answer.size(0)
for k in metrics:
v = metrics[k] / count
if "mlm" in k:
logging.info(f"val {k}: {v:.4f}")
else:
logging.info(f"val {k}: {v:.2%}")
return metrics["rec"] / count
def train_howtovqa(model, train_loader, optimizer, criterion, scheduler, epoch, args):
model.train()
running_vqa_loss, running_mlm_loss = AverageMeter(), AverageMeter()
for i, batch in enumerate(train_loader):
answer, video, question = (
batch["answer"].cuda(),
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
video_mask = get_mask(video_len, video.size(1)).cuda()
question_mask = (question > 0).float()
N = answer.size(0)
atxt_unique, ans_idx, ans_inv = np.unique(
batch["atxt"], return_index=True, return_inverse=True
) # only keep unique answers
answer = answer[ans_idx]
fusion_proj, answer_proj = model(
video,
question=question,
answer=answer,
text_mask=question_mask,
video_mask=video_mask,
)
predicts = fusion_proj @ (answer_proj.t())
target = torch.from_numpy(ans_inv).cuda()
vqa_loss = criterion(predicts, target)
if args.mlm_prob:
inputs, labels = mask_tokens(
question.cpu(), model.module.bert.bert_tokenizer, mlm_probability=0.15
)
mlm_loss = model(
video,
question=inputs.cuda(),
labels=labels.cuda(),
text_mask=question_mask,
video_mask=video_mask,
mode="mlm",
)
mlm_loss = mlm_loss.mean()
loss = mlm_loss + vqa_loss
else:
loss = vqa_loss
optimizer.zero_grad()
loss.backward()
if args.clip:
nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip)
optimizer.step()
scheduler.step()
running_vqa_loss.update(vqa_loss.detach().cpu().item(), N)
if args.mlm_prob:
running_mlm_loss.update(mlm_loss.detach().cpu().item(), N)
if (i + 1) % (len(train_loader) // args.freq_display) == 0:
if args.mlm_prob:
logging.info(
f"Epoch {epoch + 1}, Epoch status: {float(i + 1) / len(train_loader):.4f}, Training VQA loss: {running_vqa_loss.avg:.4f}, "
f"Training MLM loss: {running_mlm_loss.avg:.4f}"
)
else:
logging.info(
f"Epoch {epoch + 1}, Epoch status: {float(i + 1) / len(train_loader):.4f}, Training loss: {running_vqa_loss.avg:.4f}"
)
running_vqa_loss.reset()
running_mlm_loss.reset()
| 5,094 | 34.381944 | 143 | py |
just-ask | just-ask-main/train/train_htm.py | import torch
import logging
import math
from tqdm import tqdm
from util import (
mask_tokens,
get_mask,
AverageMeter,
compute_metrics,
print_computed_metrics,
)
def train_mlmcm(model, optimizer, dataloader, scheduler, epoch, args):
model.train()
running_mlm_loss, running_cm_loss = AverageMeter(), AverageMeter()
for i, batch in enumerate(dataloader):
tokens = batch["text"]
text_mask = 1 * (tokens != 0).cuda()
video = batch["video"].cuda()
video_len = batch["video_len"].cuda()
video_mask = get_mask(video_len, video.size(1)).cuda()
inputs, labels = mask_tokens(
tokens, model.module.bert.bert_tokenizer, mlm_probability=args.mlm_prob
)
inputs, labels, tokens = inputs.cuda(), labels.cuda(), tokens.cuda()
mlm_loss = model(
video,
question=inputs,
labels=labels,
text_mask=text_mask,
video_mask=video_mask,
mode="mlm",
)
mlm_loss = mlm_loss.mean()
cm_loss = model(
video,
question=tokens,
text_mask=text_mask,
video_mask=video_mask,
mode="cm",
)
cm_loss = cm_loss.mean()
loss = mlm_loss + cm_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
running_mlm_loss.update(mlm_loss.detach().cpu().item())
running_cm_loss.update(cm_loss.detach().cpu().item())
if (i + 1) % (len(dataloader) // args.freq_display) == 0:
logging.info(
"Epoch %d, Epoch status: %.4f, Training MLM loss: %.4f, Training CM loss: %.4f"
% (
epoch,
float(i + 1) / len(dataloader),
running_mlm_loss.avg,
running_cm_loss.avg,
)
)
running_mlm_loss.reset()
running_cm_loss.reset()
def eval_mlm(model, eval_dataloader, dataset_name, epoch):
model.eval()
loss = AverageMeter()
with torch.no_grad():
for i_batch, data in enumerate(eval_dataloader):
tokens = data["text"]
inputs, labels = mask_tokens(
tokens, model.module.bert.bert_tokenizer, mlm_probability=0.15
)
inputs, labels, tokens = inputs.cuda(), labels.cuda(), tokens.cuda()
text_mask = 1 * (tokens != 0)
video = data["video"].cuda()
video_len = data["video_len"].cuda()
video_mask = get_mask(video_len, video.size(1)).cuda()
mlm_loss = model(
video,
inputs,
labels=labels,
text_mask=text_mask,
video_mask=video_mask,
mode="mlm",
)
mlm_loss = mlm_loss.mean()
loss.update(mlm_loss, len(tokens))
logging.info(f"Epoch {epoch}, Val {dataset_name} MLM loss: {loss.avg:.4f}")
def eval_retrieval(model, eval_dataloader, dataset_name, epoch):
model.eval()
with torch.no_grad():
for i_batch, data in enumerate(eval_dataloader):
assert i_batch == 0 # evaluation done in one batch
tokens = data["text"].cuda()
text_mask = 1 * (tokens != 0)
video = data["video"].cuda()
video_len = data["video_len"].cuda()
video_mask = get_mask(video_len, video.size(1)).cuda()
m = torch.zeros(len(tokens), len(video)).cuda()
n_gpus = torch.cuda.device_count()
video_rep = video.repeat(
n_gpus, 1, 1
) # repeat so that on each gpu there are all videos
video_mask_rep = video_mask.repeat(n_gpus, 1)
for j in tqdm(
range(math.ceil(len(tokens) // n_gpus))
): # one text passed with all videos on each gpu
tokens_one = tokens[j * n_gpus : (j + 1) * n_gpus]
text_mask_one = text_mask[j * n_gpus : (j + 1) * n_gpus]
output = model(
video_rep,
tokens_one,
text_mask=text_mask_one,
video_mask=video_mask_rep,
mode="retrieval",
)
m[j * n_gpus : (j + 1) * n_gpus] = output.view(n_gpus, -1)
m = m.detach().cpu().numpy()
metrics = compute_metrics(m)
logging.info(
f"Epoch {epoch}, Val {dataset_name} Text-Video Retrieval: "
+ print_computed_metrics(metrics)
)
| 4,622 | 34.022727 | 95 | py |
just-ask | just-ask-main/train/train_videoqa.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import collections
from util import compute_aggreeings, AverageMeter, get_mask, mask_tokens
def eval(model, val_loader, a2v, args, test=False):
model.eval()
count = 0
metrics, counts = collections.defaultdict(int), collections.defaultdict(int)
with torch.no_grad():
if not args.mc:
model.module._compute_answer_embedding(a2v)
for i, batch in enumerate(val_loader):
answer_id, answer, video, question = (
batch["answer_id"],
batch["answer"],
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
question_mask = (question > 0).float()
video_mask = get_mask(video_len, video.size(1)).cuda()
count += answer_id.size(0)
if not args.mc:
predicts = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
)
topk = torch.topk(predicts, dim=1, k=10).indices.cpu()
if args.dataset != "ivqa":
answer_id_expanded = answer_id.view(-1, 1).expand_as(topk)
else:
answer_id = (answer_id / 2).clamp(max=1)
answer_id_expanded = answer_id
metrics = compute_aggreeings(
topk,
answer_id_expanded,
[1, 10],
["acc", "acc10"],
metrics,
ivqa=(args.dataset == "ivqa"),
)
else:
fusion_proj, answer_proj = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
answer=answer.cuda(),
)
fusion_proj = fusion_proj.unsqueeze(2)
predicts = torch.bmm(answer_proj, fusion_proj).squeeze()
predicted = torch.max(predicts, dim=1).indices.cpu()
metrics["acc"] += (predicted == answer_id).sum().item()
step = "val" if not test else "test"
for k in metrics:
v = metrics[k] / count
logging.info(f"{step} {k}: {v:.2%}")
return metrics["acc"] / count
def train(model, train_loader, a2v, optimizer, criterion, scheduler, epoch, args):
model.train()
running_vqa_loss, running_acc, running_mlm_loss = (
AverageMeter(),
AverageMeter(),
AverageMeter(),
)
for i, batch in enumerate(train_loader):
answer_id, answer, video, question = (
batch["answer_id"],
batch["answer"],
batch["video"].cuda(),
batch["question"].cuda(),
)
video_len = batch["video_len"]
question_mask = (question > 0).float()
video_mask = (
get_mask(video_len, video.size(1)).cuda() if args.max_feats > 0 else None
)
N = answer_id.size(0)
if not args.mc:
model.module._compute_answer_embedding(a2v)
predicts = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
)
else:
fusion_proj, answer_proj = model(
video,
question,
text_mask=question_mask,
video_mask=video_mask,
answer=answer.cuda(),
)
fusion_proj = fusion_proj.unsqueeze(2)
predicts = torch.bmm(answer_proj, fusion_proj).squeeze()
if args.dataset == "ivqa":
a = (answer_id / 2).clamp(max=1).cuda()
vqa_loss = criterion(predicts, a)
predicted = torch.max(predicts, dim=1).indices.cpu()
predicted = F.one_hot(predicted, num_classes=len(a2v))
running_acc.update((predicted * a.cpu()).sum().item() / N, N)
else:
vqa_loss = criterion(predicts, answer_id.cuda())
predicted = torch.max(predicts, dim=1).indices.cpu()
running_acc.update((predicted == answer_id).sum().item() / N, N)
if args.mlm_prob:
inputs = batch["question"]
inputs, labels = mask_tokens(
inputs, model.module.bert.bert_tokenizer, mlm_probability=0.15
)
mlm_loss = model(
video,
question=inputs.cuda(),
labels=labels.cuda(),
text_mask=question_mask,
video_mask=video_mask,
mode="mlm",
)
mlm_loss = mlm_loss.mean()
loss = mlm_loss + vqa_loss
else:
loss = vqa_loss
optimizer.zero_grad()
loss.backward()
if args.clip:
nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip)
optimizer.step()
scheduler.step()
running_vqa_loss.update(vqa_loss.detach().cpu().item(), N)
if args.mlm_prob:
running_mlm_loss.update(mlm_loss.detach().cpu().item(), N)
if (i + 1) % (len(train_loader) // args.freq_display) == 0:
if args.mlm_prob:
logging.info(
f"Epoch {epoch + 1}, Epoch status: {float(i + 1) / len(train_loader):.4f}, Training VideoQA loss: "
f"{running_vqa_loss.avg:.4f}, Training acc: {running_acc.avg:.2%}, Training MLM loss: {running_mlm_loss.avg:.4f}"
)
else:
logging.info(
f"Epoch {epoch + 1}, Epoch status: {float(i + 1) / len(train_loader):.4f}, Training VideoQA loss: "
f"{running_vqa_loss.avg:.4f}, Training acc: {running_acc.avg:.2%}"
)
running_acc.reset()
running_vqa_loss.reset()
running_mlm_loss.reset()
| 6,057 | 36.165644 | 133 | py |
just-ask | just-ask-main/misc/server_videoqa.py | #!/usr/bin/env python
import os
import json
import torch
import torch.nn.functional as F
import pickle
import random
import urllib
import urllib.request
import cherrypy
from transformers import DistilBertTokenizer
from model.multimodal_transformer import MMT_VideoQA
from util import compute_a2v, get_mask
from args import get_args
from global_parameters import (
SERVER_HTML_PATH,
SERVER_FEATURE_PATH,
) # to be defined in this file
class Server(object):
def __init__(
self,
vqa_model,
vqa_model2,
model_ckpt,
model_ckpt2,
video_features_path,
a2v,
id2a,
T,
Q,
default_data,
max_videos,
):
"""
:param vqa_model: first model used for the demo
:param vqa_model2: second model used for the demo
:param model_ckpt: path to weights for the first model
:param model_ckpt2: path to weights for the second model
:param video_features_path: path to the features corresponding to the videos used in the demo
:param a2v: map answer to tokens for all answers in a given answer dictionary
:param id2a: map index to answer
:param T: maximum number of video features
:param Q: maximum number of tokens in the question
:param default_data: map video_id to question, start, end
:param max_videos: maximum number of videos in the demo
"""
self.video_features = torch.load(video_features_path)
# load weights for the first model on CPU
self.vqa_model = vqa_model
weights = torch.load(model_ckpt, map_location=torch.device("cpu"))
weights = {x.split("module.")[1]: weights[x] for x in weights}
self.vqa_model.load_state_dict(weights)
self.vqa_model.eval()
self.vqa_model._compute_answer_embedding(a2v)
# load weights for the second model on CPU
self.vqa_model2 = vqa_model2
weights2 = torch.load(model_ckpt2, map_location=torch.device("cpu"))
weights2 = {x.split("module.")[1]: weights2[x] for x in weights2}
self.vqa_model2.load_state_dict(weights2)
self.vqa_model2.eval()
self.vqa_model2._compute_answer_embedding(a2v)
self.all_video_ids = list(self.video_features.keys())[:max_videos]
self.id2a = id2a
self.T = T
self.Q = Q
self.default_data = default_data
self.max_videos = max_videos
@cherrypy.expose
def index(self):
index_html = '<head><link rel="icon" href="https://antoyang.github.io/img/favicon.ico" type="image/x-icon"/>'
index_html += '<link href="https://antoyang.github.io/css/bootstrap.min.css" rel="stylesheet"></head>'
index_html += "<center><h1> <a href='https://antoyang.github.io/just-ask.html'> Just Ask </a> VideoQA Demo </h1></center>"
index_html += "<center><h2> Choose a video for which you want to ask a question </h2></center>"
index_html += "<center><h3> Default question, start and end timestamps are from the iVQA test set annotations. Nothing is pre-computed for these videos. </h3></center><br>"
index_html += '<div class="container">' # grid of videos
for i, vid in enumerate(self.all_video_ids):
url = "https://www.youtube.com/oembed"
params = {
"format": "json",
"url": "https://www.youtube.com/watch?v=%s" % vid,
}
query_string = urllib.parse.urlencode(params)
url = url + "?" + query_string
try:
with urllib.request.urlopen(
url
) as response: # get thumbnail and title from YouTube
response_text = response.read()
data = json.loads(response_text.decode())
# pprint.pprint(data)
title = data["title"]
thumbnail_url = data["thumbnail_url"]
except: # if the video is deleted: json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
title = "Unavailable Video"
thumbnail_url = "https://images.drivereasy.com/wp-content/uploads/2017/10/this-video-is-not-available-1.jpg"
if i % 4 == 0: # 4 videos per row
index_html += '<div class="row">'
index_html += '<div class="col-md-3 col-sm-12"><center><a href="vqa?video_id={}"><img src={} height="180" width="240"></img></a><br>'.format(
vid, thumbnail_url
)
index_html += '<a href="vqa?video_id={}">{}</a></center></div>'.format(
vid, title
)
if (i % 4 == 3) or (
i == min(len(self.all_video_ids), self.max_videos) - 1
): # end of row
index_html += "</div><br><br>"
index_html += "</div>"
index_html += "<center><a href='reload' class='btn btn-primary btn-lg active'>More videos!</a></center><br>"
index_html += "<center><h2> Built by <a href='https://antoyang.github.io/'> Antoine Yang </a> </h2> </center><br>"
return index_html
@cherrypy.expose
def vqa(self, video_id, start=0, end=5, question="", model="finetuned"):
if video_id not in self.video_features:
return (
f'Video {video_id} is not available, <a href="/">go back to index</a>.'
)
html_path = SERVER_HTML_PATH
with open(html_path, "r") as f:
html = f.read()
if not str(start).isdigit():
return 'Start time (in seconds) must be a positive integer, <a href="/">go back to index</a>.'
if not str(end).isdigit():
return 'End time (in seconds) must be a positive integer, <a href="/">go back to index</a>.'
if not question: # put default data
flag = False
start = self.default_data[video_id]["start"]
end = self.default_data[video_id]["end"]
question = self.default_data[video_id]["question"]
else:
flag = True # a question is asked
html = html.format(video_id, start, end, video_id, start, end, question)
feature = self.video_features[video_id][int(start) : int(end) + 1]
if len(feature) == 0:
return f'Features are not available for video {video_id} between start {start} seconds and {end} seconds, <a href="/">go back to index</a>.'
if flag:
# prepare padded features and tokens, masks
video_len = torch.Tensor([len(feature)])
if len(feature) < self.vqa_model.T:
feature = torch.cat(
[
feature,
torch.zeros(self.vqa_model.T - len(feature), feature.size(1)),
],
dim=0,
)
else:
sampled = []
for j in range(self.vqa_model.T):
sampled.append(feature[(j * len(feature)) // self.vqa_model.T])
feature = torch.stack(sampled)
feature = feature.unsqueeze(0)
video_mask = get_mask(video_len, self.vqa_model.Q)
tokens = torch.tensor(
self.vqa_model.bert.bert_tokenizer.encode(
question,
add_special_tokens=True,
padding="max_length",
max_length=self.vqa_model.Q,
truncation=True,
),
dtype=torch.long,
).unsqueeze(0)
question_mask = tokens > 0
with torch.no_grad(): # forward
if (
model == "zeroshot"
): # assumes that the first model is the zeroshot one
predicts = self.vqa_model(
feature,
question=tokens,
video_mask=video_mask,
text_mask=question_mask,
)
elif model == "finetuned":
predicts = self.vqa_model2(
feature,
question=tokens,
video_mask=video_mask,
text_mask=question_mask,
)
else:
raise NotImplementedError
predicts = F.softmax(predicts, dim=1)
topk = torch.topk(predicts, dim=1, k=5) # top 5 answers
topk_txt = [
[self.id2a[x.item()] for x in y] for y in topk.indices.cpu()
]
topk_scores = [[x * 100 for x in y] for y in topk.values.cpu()]
progress_bar = ""
for i in range(5): # plot answer logits with a nice progress bar
progress_bar += f'<div class="row"><div class="col-md-3" style="height: 5%;"><h3 style="color: #428bca !important;" class="center">{topk_txt[0][i]}</h3></div>'
progress_bar += f'<div class="col-md-9" style="height: 5%;"><div class="progress" style="margin-top: 20px !important;"><div class="progress-bar" style="color: black; width: {topk_scores[0][i]}%;" width: {topk_scores[0][i]}%;" role="progressbar" aria-valuenow="{topk_scores[0][i]}" aria-valuemin="0" aria-valuemax="1">{topk_scores[0][i]:.2f}%</div></div></div></div>'
html += '<div class="col-sm-offset-2 col-sm-8"> <b> Question input </b>: {} <br> <b> <br> Top 5 answers ({} model) </b>: {} </div></div>'.format(
question, model, progress_bar
)
return html + "</div><br><br></body></html>"
@cherrypy.expose
def reload(self): # same as index after a randomizing the videos
self.all_video_ids = random.sample(
list(self.video_features.keys()), self.max_videos
)
index_html = '<head><link rel="icon" href="https://antoyang.github.io/img/favicon.ico" type="image/x-icon"/>'
index_html += '<link href="https://antoyang.github.io/css/bootstrap.min.css" rel="stylesheet"></head>' # https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css
index_html += "<center><h1> <a href='https://antoyang.github.io/just-ask.html'> Just Ask </a> VideoQA Demo </h1></center>"
index_html += "<center><h2> Choose a video for which you want to ask a question </h2></center>"
index_html += "<center><h3> Default question, start and end timestamps are from the iVQA test set annotations. Nothing is pre-computed for these videos. </h3></center><br>"
index_html += '<div class="container">'
for i, vid in enumerate(self.all_video_ids):
url = "https://www.youtube.com/oembed"
params = {
"format": "json",
"url": "https://www.youtube.com/watch?v=%s" % vid,
}
query_string = urllib.parse.urlencode(params)
url = url + "?" + query_string
try:
with urllib.request.urlopen(url) as response:
response_text = response.read()
data = json.loads(response_text.decode())
title = data["title"]
thumbnail_url = data["thumbnail_url"]
except:
title = "Unavailable Video"
thumbnail_url = "https://images.drivereasy.com/wp-content/uploads/2017/10/this-video-is-not-available-1.jpg"
if i % 4 == 0:
index_html += '<div class="row">'
index_html += '<div class="col-md-3 col-sm-12"><center><a href="vqa?video_id={}"><img src={} height="180" width="240"></img></a><br>'.format(
vid, thumbnail_url
)
index_html += '<a href="vqa?video_id={}">{}</a></center></div>'.format(
vid, title
)
if (i % 4 == 3) or (i == min(len(self.all_video_ids), self.max_videos) - 1):
index_html += "</div><br><br>"
index_html += "</div>"
index_html += "<center><a href='reload' class='btn btn-primary btn-lg active'>More videos!</a></center><br>"
index_html += "<center><h2> Built by <a href='https://antoyang.github.io/'> Antoine Yang </a> </h2> </center><br>"
return index_html
def run():
args = get_args()
port = args.port
cherrypy.config.update({"server.socket_port": port})
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
conf = {
"/": {
"tools.sessions.on": True,
"tools.staticdir.root": os.path.abspath(os.getcwd()),
},
"/js": {"tools.staticdir.on": True, "tools.staticdir.dir": "./js"},
}
dir_map = {
"activitynet": "ActivityNet-QA",
"msrvtt": "MSRVTT-QA",
"msvd": "MSVD-QA",
"ivqa": "iVQA",
}
feature_path = os.path.join(
SERVER_FEATURE_PATH, dir_map[args.dataset], "full_s3d_features_test.pth"
) # path to S3D features extracted for the full video duration
default_data = pickle.load(
open(
os.path.join(
SERVER_FEATURE_PATH, dir_map[args.dataset], "default_test.pkl"
),
"rb",
)
) # dictionary mapping video_id to question, start and end extracted from the dataset
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
a2id, id2a, a2v = compute_a2v(
vocab_path=args.vocab_path,
bert_tokenizer=bert_tokenizer,
amax_words=args.amax_words,
)
a2v = a2v.cpu()
vqa_model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
vqa_model2 = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
print(f"http server is running at port {port}")
cherrypy.quickstart(
Server(
vqa_model,
vqa_model2,
args.pretrain_path,
args.pretrain_path2,
feature_path,
a2v,
id2a,
args.max_feats,
args.qmax_words,
default_data,
args.nb_examples,
),
"/",
conf,
)
if __name__ == "__main__":
run()
| 14,705 | 42 | 382 | py |
just-ask | just-ask-main/extract/video_loader.py | import torch as th
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import ffmpeg
class VideoLoader(Dataset):
"""Pytorch video loader."""
def __init__(
self,
csv,
framerate=1,
size=112,
centercrop=False,
):
self.csv = pd.read_csv(csv)
self.centercrop = centercrop
self.size = size
self.framerate = framerate
def __len__(self):
return len(self.csv)
def _get_video_dim(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
width = int(video_stream["width"])
height = int(video_stream["height"])
num, denum = video_stream["avg_frame_rate"].split("/")
frame_rate = int(num) / int(denum)
return height, width, frame_rate
def _get_output_dim(self, h, w):
if isinstance(self.size, tuple) and len(self.size) == 2:
return self.size
elif h >= w:
return int(h * self.size / w), self.size
else:
return self.size, int(w * self.size / h)
def __getitem__(self, idx):
video_path = self.csv["video_path"].values[idx]
output_file = self.csv["feature_path"].values[idx]
if not (os.path.isfile(output_file)) and os.path.isfile(video_path):
print("Decoding video: {}".format(video_path))
try:
h, w, fr = self._get_video_dim(video_path)
except:
print("ffprobe failed at: {}".format(video_path))
return {
"video": th.zeros(1),
"input": video_path,
"output": output_file,
}
if fr < 1:
print("Corrupted Frame Rate: {}".format(video_path))
return {
"video": th.zeros(1),
"input": video_path,
"output": output_file,
}
height, width = self._get_output_dim(h, w)
try:
cmd = (
ffmpeg.input(video_path)
.filter("fps", fps=self.framerate)
.filter("scale", width, height)
)
if self.centercrop:
x = int((width - self.size) / 2.0)
y = int((height - self.size) / 2.0)
cmd = cmd.crop(x, y, self.size, self.size)
out, _ = cmd.output("pipe:", format="rawvideo", pix_fmt="rgb24").run(
capture_stdout=True, quiet=True
)
except:
print("ffmpeg error at: {}".format(video_path))
return {
"video": th.zeros(1),
"input": video_path,
"output": output_file,
}
if self.centercrop and isinstance(self.size, int):
height, width = self.size, self.size
video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
video = th.from_numpy(video.astype("float32"))
video = video.permute(0, 3, 1, 2)
else:
video = th.zeros(1)
return {"video": video, "input": video_path, "output": output_file}
| 3,410 | 33.11 | 87 | py |
just-ask | just-ask-main/extract/preprocessing.py | import torch as th
class Normalize(object):
def __init__(self, mean, std):
self.mean = th.FloatTensor(mean).view(1, 3, 1, 1)
self.std = th.FloatTensor(std).view(1, 3, 1, 1)
def __call__(self, tensor):
tensor = (tensor - self.mean) / (self.std + 1e-8)
return tensor
class Preprocessing(object):
def __init__(self, num_frames=16):
self.norm = Normalize(mean=[110.6, 103.2, 96.3], std=[1.0, 1.0, 1.0])
self.num_frames = num_frames
def _zero_pad(self, tensor, size):
n = size - len(tensor) % size
if n == size:
return tensor
else:
z = th.zeros(n, tensor.shape[1], tensor.shape[2], tensor.shape[3])
return th.cat((tensor, z), 0)
def __call__(self, tensor):
tensor = tensor[: len(tensor) - len(tensor) % self.num_frames]
tensor = tensor / 255.0
tensor = tensor.view(
-1, self.num_frames, tensor.shape[1], tensor.shape[2], tensor.shape[3]
)
tensor = tensor.transpose(1, 2)
return tensor
| 1,075 | 29.742857 | 82 | py |
just-ask | just-ask-main/extract/s3dg.py | """Contains the definition for Gated Separable 3D network (S3D-G).
"""
import torch as th
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import re
from global_parameters import S3D_DICT_PATH
class InceptionBlock(nn.Module):
def __init__(
self,
input_dim,
num_outputs_0_0a,
num_outputs_1_0a,
num_outputs_1_0b,
num_outputs_2_0a,
num_outputs_2_0b,
num_outputs_3_0b,
gating=True,
):
super(InceptionBlock, self).__init__()
self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, [1, 1, 1])
self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, [1, 1, 1])
self.conv_b1_b = STConv3D(
num_outputs_1_0a, num_outputs_1_0b, [3, 3, 3], padding=1, separable=True
)
self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, [1, 1, 1])
self.conv_b2_b = STConv3D(
num_outputs_2_0a, num_outputs_2_0b, [3, 3, 3], padding=1, separable=True
)
self.maxpool_b3 = th.nn.MaxPool3d((3, 3, 3), stride=1, padding=1)
self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, [1, 1, 1])
self.gating = gating
self.output_dim = (
num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b
)
if gating:
self.gating_b0 = SelfGating(num_outputs_0_0a)
self.gating_b1 = SelfGating(num_outputs_1_0b)
self.gating_b2 = SelfGating(num_outputs_2_0b)
self.gating_b3 = SelfGating(num_outputs_3_0b)
def forward(self, input):
"""Inception block"""
b0 = self.conv_b0(input)
b1 = self.conv_b1_a(input)
b1 = self.conv_b1_b(b1)
b2 = self.conv_b2_a(input)
b2 = self.conv_b2_b(b2)
b3 = self.maxpool_b3(input)
b3 = self.conv_b3_b(b3)
if self.gating:
b0 = self.gating_b0(b0)
b1 = self.gating_b1(b1)
b2 = self.gating_b2(b2)
b3 = self.gating_b3(b3)
return th.cat((b0, b1, b2, b3), dim=1)
class SelfGating(nn.Module):
def __init__(self, input_dim):
super(SelfGating, self).__init__()
self.fc = nn.Linear(input_dim, input_dim)
def forward(self, input_tensor):
"""Feature gating as used in S3D-G."""
spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4])
weights = self.fc(spatiotemporal_average)
weights = th.sigmoid(weights)
return weights[:, :, None, None, None] * input_tensor
class STConv3D(nn.Module):
def __init__(
self, input_dim, output_dim, kernel_size, stride=1, padding=0, separable=False
):
super(STConv3D, self).__init__()
self.separable = separable
self.relu = nn.ReLU(inplace=True)
assert len(kernel_size) == 3
if separable and kernel_size[0] != 1:
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
if isinstance(stride, list) and len(stride) == 3:
spatial_stride = [1, stride[1], stride[2]]
temporal_stride = [stride[0], 1, 1]
else:
spatial_stride = [1, stride, stride]
temporal_stride = [stride, 1, 1]
if isinstance(padding, list) and len(padding) == 3:
spatial_padding = [0, padding[1], padding[2]]
temporal_padding = [padding[0], 0, 0]
else:
spatial_padding = [0, padding, padding]
temporal_padding = [padding, 0, 0]
if separable:
self.conv1 = nn.Conv3d(
input_dim,
output_dim,
kernel_size=spatial_kernel_size,
stride=spatial_stride,
padding=spatial_padding,
bias=False,
)
self.bn1 = nn.BatchNorm3d(output_dim)
self.conv2 = nn.Conv3d(
output_dim,
output_dim,
kernel_size=temporal_kernel_size,
stride=temporal_stride,
padding=temporal_padding,
bias=False,
)
self.bn2 = nn.BatchNorm3d(output_dim)
else:
self.conv1 = nn.Conv3d(
input_dim,
output_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False,
)
self.bn1 = nn.BatchNorm3d(output_dim)
def forward(self, input):
out = self.relu(self.bn1(self.conv1(input)))
if self.separable:
out = self.relu(self.bn2(self.conv2(out)))
return out
def get_padding_shape(filter_shape, stride):
def _pad_top_bottom(filter_dim, stride_val):
pad_along = max(filter_dim - stride_val, 0)
pad_top = pad_along // 2
pad_bottom = pad_along - pad_top
return pad_top, pad_bottom
padding_shape = []
for filter_dim, stride_val in zip(filter_shape, stride):
pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val)
padding_shape.append(pad_top)
padding_shape.append(pad_bottom)
depth_top = padding_shape.pop(0)
depth_bottom = padding_shape.pop(0)
padding_shape.append(depth_top)
padding_shape.append(depth_bottom)
return tuple(padding_shape)
class MaxPool3dTFPadding(th.nn.Module):
def __init__(self, kernel_size, stride=None, padding="SAME"):
super(MaxPool3dTFPadding, self).__init__()
if padding == "SAME":
padding_shape = get_padding_shape(kernel_size, stride)
self.padding_shape = padding_shape
self.pad = th.nn.ConstantPad3d(padding_shape, 0)
self.pool = th.nn.MaxPool3d(kernel_size, stride, ceil_mode=True)
def forward(self, inp):
inp = self.pad(inp)
out = self.pool(inp)
return out
class Sentence_Embedding(nn.Module):
def __init__(
self,
embd_dim,
num_embeddings=66250,
word_embedding_dim=300,
token_to_word_path=S3D_DICT_PATH,
max_words=16,
output_dim=2048,
):
super(Sentence_Embedding, self).__init__()
self.word_embd = nn.Embedding(num_embeddings, word_embedding_dim)
self.fc1 = nn.Linear(word_embedding_dim, output_dim)
self.fc2 = nn.Linear(output_dim, embd_dim)
self.word_to_token = {}
self.max_words = max_words
token_to_word = np.load(token_to_word_path)
for i, t in enumerate(token_to_word):
self.word_to_token[t] = i + 1
def _zero_pad_tensor_token(self, tensor, size):
if len(tensor) >= size:
return tensor[:size]
else:
zero = th.zeros(size - len(tensor)).long()
return th.cat((tensor, zero), dim=0)
def _split_text(self, sentence):
w = re.findall(r"[\w']+", str(sentence))
return w
def _words_to_token(self, words):
words = [
self.word_to_token[word] for word in words if word in self.word_to_token
]
if words:
we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words)
return we
else:
return th.zeros(self.max_words).long()
def _words_to_ids(self, x):
split_x = [self._words_to_token(self._split_text(sent)) for sent in x]
return th.stack(split_x, dim=0)
def forward(self, x):
x = self._words_to_ids(x)
x = self.word_embd(x)
x = F.relu(self.fc1(x))
x = th.max(x, dim=1)[0]
x = self.fc2(x)
return x
class S3D(nn.Module):
def __init__(
self,
num_classes=400,
gating=True,
space_to_depth=False,
embd=False,
feature_map=False,
):
super(S3D, self).__init__()
self.num_classes = num_classes
self.gating = gating
self.space_to_depth = space_to_depth
if space_to_depth:
self.conv1 = STConv3D(
24, 64, [2, 4, 4], stride=1, padding=(1, 2, 2), separable=False
)
else:
self.conv1 = STConv3D(
3, 64, [3, 7, 7], stride=2, padding=(1, 3, 3), separable=False
)
self.conv_2b = STConv3D(64, 64, [1, 1, 1], separable=False)
self.conv_2c = STConv3D(64, 192, [3, 3, 3], padding=1, separable=True)
self.gating = SelfGating(192)
self.maxpool_2a = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME"
)
self.maxpool_3a = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME"
)
self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32)
self.mixed_3c = InceptionBlock(
self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64
)
self.maxpool_4a = MaxPool3dTFPadding(
kernel_size=(3, 3, 3), stride=(2, 2, 2), padding="SAME"
)
self.mixed_4b = InceptionBlock(
self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64
)
self.mixed_4c = InceptionBlock(
self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64
)
self.mixed_4d = InceptionBlock(
self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64
)
self.mixed_4e = InceptionBlock(
self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64
)
self.mixed_4f = InceptionBlock(
self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128
)
self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding="SAME"
)
self.mixed_5b = InceptionBlock(
self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128
)
self.mixed_5c = InceptionBlock(
self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128
)
self.fc = nn.Linear(self.mixed_5c.output_dim, num_classes)
self.text_module = Sentence_Embedding(512)
self.mixed_5c_embd = embd
self.feature_map = feature_map
def _space_to_depth(self, input):
B, C, T, H, W = input.shape
input = input.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2)
input = input.permute(0, 3, 5, 7, 1, 2, 4, 6)
input = input.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2)
return input
def forward(self, inputs):
"""Defines the I3D/S3DG base architecture."""
# out = {}
if self.space_to_depth:
inputs = self._space_to_depth(inputs)
# 'Conv2d_1a_7x7'
net = self.conv1(inputs)
if self.space_to_depth:
net = net[:, :, 1:, 1:, 1:]
# out['Conv2d_1a_7x7'] = net
# 'MaxPool_2a_3x3'
net = self.maxpool_2a(net)
# out['MaxPool_2a_3x3'] = net
#'Conv2d_2b_1x1'
net = self.conv_2b(net)
# out['Conv2d_2b_1x1'] = net
# 'Conv2d_2c_3x3'
net = self.conv_2c(net)
# out['Conv2d_2c_3x3'] = net
if self.gating:
net = self.gating(net)
# out['gating_1'] = net
# 'MaxPool_3a_3x3'
net = self.maxpool_3a(net)
# out['MaxPool_3a_3x3'] = net
# end_point = 'Mixed_3b'
net = self.mixed_3b(net)
# out['Mixed_3b'] = net
# end_point = 'Mixed_3c'
net = self.mixed_3c(net)
# out['Mixed_3c'] = net
# end_point = 'MaxPool_4a_3x3'
net = self.maxpool_4a(net)
# out['MaxPool_4a_3x3'] = net
# end_point = 'Mixed_4b'
net = self.mixed_4b(net)
# out['Mixed_4b'] = net
# end_point = 'Mixed_4c'
net = self.mixed_4c(net)
# out['Mixed_4c'] = net
# end_point = 'Mixed_4d'
net = self.mixed_4d(net)
# out['Mixed_4d'] = net
# end_point = 'Mixed_4e'
net = self.mixed_4e(net)
# out['Mixed_4e'] = net
# end_point = 'Mixed_4f'
net = self.mixed_4f(net)
# out['Mixed_4f'] = net
# end_point = 'MaxPool_5a_2x2'
net = self.maxpool_5a(net)
# out['MaxPool_5a_2x2'] = net
# end_point = 'Mixed_5b'
net = self.mixed_5b(net)
# out['Mixed_5b'] = net
# end_point = 'Mixed_5c'
net = self.mixed_5c(net)
# out['Mixed_5c'] = net
# out['Avgpool'] = net
if self.mixed_5c_embd:
if not self.feature_map:
net = th.mean(net, dim=[2, 3, 4])
return net
else:
if not self.feature_map:
net = th.mean(net, dim=[2, 3, 4])
else:
net = net.permute(0, 2, 3, 4, 1)
net = self.fc(net)
# out['final'] = net
return net
| 12,861 | 33.856369 | 86 | py |
just-ask | just-ask-main/extract/extract.py | import torch as th
import math
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
import argparse
from extract.video_loader import VideoLoader
from torch.utils.data import DataLoader
from extract.s3dg import S3D
from extract.preprocessing import Preprocessing
from extract.random_sequence_shuffler import RandomSequenceSampler
from global_parameters import S3D_PATH
parser = argparse.ArgumentParser(description="Easy video feature extractor")
parser.add_argument(
"--csv",
type=str,
help="input csv with columns video_path (input video) and feature_path (output path to feature)",
)
parser.add_argument(
"--batch_size", type=int, default=32, help="batch size for extraction"
)
parser.add_argument(
"--half_precision",
type=int,
default=0,
help="whether to output half precision float or not",
)
parser.add_argument(
"--num_decoding_thread",
type=int,
default=0,
help="number of parallel threads for video decoding",
)
parser.add_argument(
"--l2_normalize",
type=int,
default=0,
help="whether to l2 normalize the output feature",
)
parser.add_argument("--fps", type=int, default=16, help="framerate")
parser.add_argument(
"--model_path", type=str, default=S3D_PATH, help="path to s3d model checkpoint"
)
parser.add_argument("--mixed_5c", type=int, default=1, help="mixed_5c feature")
parser.add_argument(
"--feature_dim", type=int, default=1024, help="output video feature dimension"
)
parser.add_argument("--cudnn_benchmark", type=int, default=0, help="cudnn benchmark")
args = parser.parse_args()
if args.cudnn_benchmark:
th.backends.cudnn.benchmark = True
dataset = VideoLoader(
args.csv,
framerate=args.fps,
size=224,
centercrop=True,
)
n_dataset = len(dataset)
sampler = RandomSequenceSampler(n_dataset, 10)
loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=args.num_decoding_thread,
sampler=sampler if n_dataset > 10 else None,
)
preprocess = Preprocessing(num_frames=args.fps)
model = S3D(512, space_to_depth=True, embd=args.mixed_5c, feature_map=0)
model_data = th.load(args.model_path)
model.load_state_dict(model_data)
model.eval()
model = th.nn.DataParallel(model)
model = model.cuda()
with th.no_grad():
for k, data in enumerate(loader):
input_file = data["input"][0]
output_file = data["output"][0]
if len(data["video"].shape) > 3:
print(
"Computing features of video {}/{}: {}".format(
k + 1, n_dataset, input_file
)
)
video = data["video"].squeeze()
if len(video.shape) == 4:
video = preprocess(video)
n_chunk = len(video)
features = th.cuda.FloatTensor(n_chunk, args.feature_dim).fill_(0)
n_iter = int(math.ceil(n_chunk / float(args.batch_size)))
for i in tqdm(range(n_iter)):
min_ind = i * args.batch_size
max_ind = (i + 1) * args.batch_size
video_batch = video[min_ind:max_ind].cuda()
batch_features = model(video_batch)
if args.l2_normalize:
batch_features = F.normalize(batch_features, dim=1)
features[min_ind:max_ind] = batch_features
features = features.cpu().numpy()
if args.half_precision:
features = features.astype("float16")
np.save(output_file, features)
else:
print("Video {} already processed.".format(input_file))
| 3,644 | 32.440367 | 101 | py |
just-ask | just-ask-main/extract/random_sequence_shuffler.py | from torch.utils.data.sampler import Sampler
import numpy as np
class RandomSequenceSampler(Sampler):
def __init__(self, n_sample, seq_len):
self.n_sample = n_sample
self.seq_len = seq_len
def _pad_ind(self, ind):
zeros = np.zeros(self.seq_len - self.n_sample % self.seq_len)
ind = np.concatenate((ind, zeros))
return ind
def __iter__(self):
idx = np.arange(self.n_sample)
if self.n_sample % self.seq_len != 0:
idx = self._pad_ind(idx)
idx = np.reshape(idx, (-1, self.seq_len))
np.random.shuffle(idx)
idx = np.reshape(idx, (-1))
return iter(idx.astype(int))
def __len__(self):
return self.n_sample + (self.seq_len - self.n_sample % self.seq_len)
| 775 | 28.846154 | 76 | py |
just-ask | just-ask-main/extract/merge_features.py | import numpy as np
import argparse
import os
import torch
from tqdm import tqdm
import pandas as pd
from global_parameters import MSVD_PATH, HOW2QA_PATH
parser = argparse.ArgumentParser(description="Feature merger")
parser.add_argument("--folder", type=str, required=True, help="folder of features")
parser.add_argument(
"--output_path", type=str, required=True, help="output path for features"
)
parser.add_argument(
"--dataset",
type=str,
help="dataset",
required=True,
choices=["ivqa", "msrvtt", "msvd", "activitynet"],
)
parser.add_argument(
"--pad",
type=int,
help="set as diff of 0 to trunc and pad up to a certain nb of seconds",
default=0,
)
args = parser.parse_args()
files = os.listdir(args.folder)
files = [x for x in files if x[-4:] == ".npy"]
# Get mapping from feature file name to dataset video_id
if args.dataset == "msrvtt":
mapping = {x: int(x.split(".")[0][5:]) for x in files}
elif args.dataset == "msvd":
f = open(os.path.join(MSVD_PATH, "youtube_mapping.txt"))
mapping = {}
for line in f.readlines():
l = line.split(" ")
idx = l[1].split("\n")[0][3:]
mapping[l[0] + ".avi.npy"] = int(idx)
elif args.dataset in ["ivqa", "activitynet"]:
mapping = {x: x[:11] for x in files}
else:
raise NotImplementedError
features = {}
for i in tqdm(range(len(files))):
x = files[i]
feat = torch.from_numpy(np.load(os.path.join(args.folder, x)))
if args.pad and len(feat) < args.pad:
feat = torch.cat([feat, torch.zeros(args.pad - len(feat), feat.shape[1])])
elif args.pad:
feat = feat[: args.pad]
features[mapping[x]] = feat.float()
torch.save(features, args.output_path)
| 1,710 | 26.596774 | 83 | py |
just-ask | just-ask-main/data/howto_loader.py | import torch
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
from util import tokenize
class HowTo_Dataset(Dataset):
def __init__(
self,
csv_path,
caption,
features_path,
min_time=10,
max_time=20,
min_words=10,
max_words=20,
n_pair=32,
bert_tokenizer=None,
):
"""
:param csv: path to a csv with video_id and video_path columns
:param caption: caption: dictionary mapping video_id to a dictionary mapping start, end, text to corresponding lists
:param features_path: path to the directory of features
:param min_time: minimum duration of a clip in seconds
:param max_time: maximum duration of a clip in seconds
:param min_words: minimum number of words in a clip
:param max_words: maximum number of words in a clip
:param n_pair: number of clips to sample from each video
:param bert_tokenizer: BERT tokenizer
"""
self.csv = pd.read_csv(csv_path)
self.caption = caption
self.feature_path = features_path
self.min_time = min_time
self.max_time = max_time
self.min_words = min_words
self.max_words = max_words
self.n_pair = n_pair
self.bert_tokenizer = bert_tokenizer
def __len__(self):
return len(self.csv)
def _get_text(self, caption, n_pair_max):
n_caption = len(caption["start"])
k = min(n_pair_max, n_caption)
starts = np.zeros(k)
ends = np.zeros(k)
text = [""] * k
r_ind = np.random.choice(range(n_caption), k, replace=False)
for i in range(k):
ind = r_ind[i]
text[i], starts[i], ends[i] = self._get_single_text(caption, ind)
text_embds = tokenize(
text,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.max_words,
dynamic_padding=True,
truncation=True,
)
return text_embds, text, starts, ends
def _get_single_text(self, caption, ind):
start, end = ind, ind
words = str(caption["text"][ind])
n_words = len(words.split(" "))
diff = caption["end"][end] - caption["start"][start]
while n_words < self.min_words or diff < self.min_time:
if start > 0 and end < len(caption["end"]) - 1:
next_words = str(caption["text"][end + 1])
prev_words = str(caption["text"][start - 1])
d1 = caption["end"][end + 1] - caption["start"][start]
d2 = caption["end"][end] - caption["start"][start - 1]
if (self.min_time > 0 and d2 <= d1) or (
self.min_time == 0 and len(next_words) <= len(prev_words)
):
start -= 1
words = prev_words + " " + words
else:
end += 1
words = words + " " + next_words
elif start > 0:
prev_words = str(caption["text"][start - 1])
words = prev_words + " " + words
start -= 1
elif end < len(caption["end"]) - 1:
next_words = str(caption["text"][end + 1])
words = words + " " + next_words
end += 1
else:
break
diff = caption["end"][end] - caption["start"][start]
n_words = len(words.split(" "))
return words, caption["start"][start], caption["end"][end]
def _get_video(self, vid_path, s, e):
feature_path = os.path.join(self.feature_path, vid_path)
video = torch.from_numpy(np.load(feature_path)).float()
video_len = torch.ones(len(s))
output = torch.zeros(len(s), self.max_time, video.shape[-1])
for i in range(len(s)):
start = int(s[i])
end = int(e[i]) + 1
slice = video[start:end]
assert len(slice) >= 1
if len(slice) < self.max_time:
video_len[i] = len(slice)
output[i] = torch.cat(
[slice, torch.zeros(self.max_time - len(slice), slice.size(1))],
dim=0,
)
else:
video_len[i] = self.max_time
output[i] = slice[: self.max_time]
return output, video_len
def __getitem__(self, idx):
video_id = self.csv["video_id"].values[idx]
vid_path = self.csv["video_path"].values[idx]
text, caption, starts, ends = self._get_text(
self.caption[video_id], self.n_pair
)
video, video_len = self._get_video(vid_path, starts, ends)
return {
"video": video,
"video_len": video_len,
"text": text,
}
def howto_collate_fn(batch):
"""
:param batch: [dataset[i] for i in N]
:return: tensorized batch with the text padded to the max length of the batch
"""
bs = len(batch)
video = torch.cat([batch[i]["video"] for i in range(bs)], 0)
video_len = torch.cat([batch[i]["video_len"] for i in range(bs)], 0)
text = [batch[i]["text"] for i in range(bs)]
maxlen = max([x.shape[1] for x in text])
text_padded = torch.zeros(sum(x.shape[0] for x in text), maxlen).long()
idx = 0
for i, tensor in enumerate(text):
n, l = tensor.shape
text_padded[idx : idx + n, :l] = tensor
idx += n
return {
"video": video,
"video_len": video_len,
"text": text_padded,
}
| 5,636 | 34.012422 | 124 | py |
just-ask | just-ask-main/data/webvidvqa_loader.py | import torch
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
from torch.utils.data.dataloader import default_collate
from util import tokenize
class WebVidVQA_Dataset(Dataset):
def __init__(
self,
csv_path,
caption,
features_path,
qmax_words=20,
amax_words=20,
train=True,
n_pair=32,
max_feats=20,
bert_tokenizer=None,
feature_dim=1024,
):
"""
Difference with HowToVQA dataloader: here all QAs from a video correspond to the whole video (no start and end timestamps)
:param csv_path: path to a csv with video_id and video_path columns
:param caption: dictionary mapping video_id to a dictionary mapping start, end, question and answer to corresponding lists
:param features_path: path to the directory of features
:param qmax_words: maximum number of words in the question
:param amax_words: maximum number of words in the answer
:param train: whether to train or validate
:param n_pair: number of clips to sample from each video
:param max_feats: maximum number of video features
:param bert_tokenizer: BERT tokenizer
:param feature_dim: dimension of the visual features
"""
self.data = pd.read_csv(csv_path)
self.caption = caption
self.feature_path = features_path
self.qmax_words = qmax_words
self.amax_words = amax_words
self.train = train
self.n_pair = n_pair
self.max_feats = max_feats
self.bert_tokenizer = bert_tokenizer
self.feature_dim = feature_dim
def __len__(self):
return len(self.data)
def _get_text(self, caption, n_pair_max, train=True):
n_caption = len(caption["question"])
n_pair_max = min(n_caption, n_pair_max)
atxt = [""] * n_pair_max
qtxt = [""] * n_pair_max
r_ind = (
np.random.choice(range(n_caption), n_pair_max, replace=False)
if train
else np.arange(n_pair_max)
) # sample clips
for i in range(n_pair_max):
ind = r_ind[i]
if isinstance(caption["question"][ind], list):
idx = (
np.random.randint(len(caption["question"][ind]))
if self.train
else 0
)
atxt[i], qtxt[i] = (
str(caption["answer"][ind]),
str(caption["question"][ind][idx]),
)
else:
atxt[i], qtxt[i] = (
str(caption["answer"][ind]),
str(caption["question"][ind]),
)
question = tokenize(
qtxt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.qmax_words,
dynamic_padding=True,
truncation=True,
)
answer = tokenize(
atxt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.amax_words,
dynamic_padding=True,
truncation=True,
)
return atxt, answer, qtxt, question
def _get_video(self, vid_path):
feature_path = os.path.join(self.feature_path, vid_path)
try:
video = torch.from_numpy(np.load(feature_path)).float()
except FileNotFoundError:
video = (
torch.zeros(1, self.feature_dim)
)
if len(video) > self.max_feats:
video_len = self.max_feats
sampled = []
for j in range(self.max_feats):
sampled.append(video[(j * len(video)) // self.max_feats])
padded_video = torch.stack(sampled)
else:
padded_video = video
video_len = len(video)
feature = padded_video.unsqueeze(0)
video_len = np.array([video_len], dtype=np.int)
return feature, video_len
def __getitem__(self, idx):
video_id = str(self.data["video_id"].values[idx])
if "feature_path" in self.data:
video_path = self.data["feature_path"].values[idx]
else:
video_path = video_id + ".mp4.npy"
atxt, answer, qtxt, question = self._get_text(
self.caption[video_id], self.n_pair, train=self.train
)
video, video_len = self._get_video(video_path)
return {
"video_id": video_id,
"video_path": video_path,
"atxt": atxt,
"qtxt": qtxt,
"start": np.array(
[0], dtype=np.int
), # for compatibility with HowToVQA collate function
"end": np.array(
[0], dtype=np.int
), # for compatibility with HowToVQA collate function
"video": video,
"video_len": video_len,
"answer": answer,
"question": question,
}
| 5,059 | 32.959732 | 130 | py |
just-ask | just-ask-main/data/videotext_loader.py | import torch as th
from torch.utils.data import Dataset
import pandas as pd
import pickle
class VideoText_Dataset(Dataset):
def __init__(
self,
csv_path,
features_path,
max_words=30,
bert_tokenizer=None,
max_feats=20,
):
"""
Args:
"""
self.data = pd.read_csv(csv_path)
self.features = th.load(features_path)
self.max_words = max_words
self.bert_tokenizer = bert_tokenizer
self.max_feats = max_feats
def __len__(self):
return len(self.data)
def _bert_tokenizer(self, text):
tokens = th.tensor(
self.bert_tokenizer.encode(
text,
add_special_tokens=True,
padding="max_length",
max_length=self.max_words,
truncation=True,
),
dtype=th.long,
)
return tokens
def __getitem__(self, idx):
text = self.data["sentence"].values[idx]
text_embd = th.tensor(
self.bert_tokenizer.encode(
text,
add_special_tokens=True,
padding="longest",
max_length=self.max_words,
truncation=True,
),
dtype=th.long,
)
video_id = self.data["video_id"].values[idx]
video = self.features[video_id]
if len(video) < self.max_feats:
video = video[: self.max_feats]
vid_duration = len(video)
if len(video) < self.max_feats: # pad
video = th.cat(
[video, th.zeros(self.max_feats - len(video), video.shape[1])]
)
else:
sampled = []
for j in range(self.max_feats):
sampled.append(video[(j * len(video)) // self.max_feats])
video = th.stack(sampled)
vid_duration = len(video)
return {"video": video, "video_len": vid_duration, "text": text_embd}
class Youcook_Dataset(Dataset):
"""Youcook dataset loader."""
def __init__(
self,
data,
max_words=30,
bert_tokenizer=None,
max_feats=20,
):
"""
Args:
"""
self.data = pickle.load(open(data, "rb"))
self.max_words = max_words
self.bert_tokenizer = bert_tokenizer
self.max_feats = max_feats
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
text = self.data[idx]["caption"]
text_embd = th.tensor(
self.bert_tokenizer.encode(
text,
add_special_tokens=True,
padding="longest",
max_length=self.max_words,
truncation=True,
),
dtype=th.long,
)
video = th.from_numpy(self.data[idx]["feature"]).float()
if len(video) <= self.max_feats:
video = video[: self.max_feats]
vid_duration = len(video)
if len(video) < self.max_feats: # pad
video = th.cat(
[video, th.zeros(self.max_feats - len(video), video.shape[1])]
)
else:
sampled = []
for j in range(self.max_feats):
sampled.append(video[(j * len(video)) // self.max_feats])
video = th.stack(sampled)
vid_duration = len(video)
return {"video": video, "video_len": vid_duration, "text": text_embd}
def videotext_collate_fn(batch):
"""
:param batch: [dataset[i] for i in N]
:return: tensorized batch with the text padded to the max length of the batch
"""
bs = len(batch)
video = th.stack([batch[i]["video"] for i in range(bs)], 0)
video_len = th.tensor([batch[i]["video_len"] for i in range(bs)], dtype=th.long)
text = [batch[i]["text"] for i in range(bs)]
maxlen = max([len(x) for x in text])
text_padded = th.zeros(bs, maxlen).long()
for i, tensor in enumerate(text):
l = len(tensor)
text_padded[i, :l] = tensor
return {
"video": video,
"video_len": video_len,
"text": text_padded,
}
| 4,213 | 28.263889 | 84 | py |
just-ask | just-ask-main/data/videoqa_loader.py | import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import pandas as pd
import collections
from util import tokenize
class VideoQADataset(Dataset):
def __init__(
self,
csv_path,
features,
qmax_words=20,
amax_words=5,
bert_tokenizer=None,
a2id=None,
ivqa=False,
max_feats=20,
mc=0,
):
"""
:param csv_path: path to a csv containing columns video_id, question, answer
:param features: dictionary mapping video_id to torch tensor of features
:param qmax_words: maximum number of words for a question
:param amax_words: maximum number of words for an answer
:param bert_tokenizer: BERT tokenizer
:param a2id: answer to index mapping
:param ivqa: whether to use iVQA or not
:param max_feats: maximum frames to sample from a video
"""
self.data = pd.read_csv(csv_path)
self.features = features
self.qmax_words = qmax_words
self.amax_words = amax_words
self.a2id = a2id
self.bert_tokenizer = bert_tokenizer
self.ivqa = ivqa
self.max_feats = max_feats
self.mc = mc
def __len__(self):
return len(self.data)
def __getitem__(self, index):
vid_id = self.data["video_id"].values[index]
video = self.features[vid_id]
if len(video) < self.max_feats:
video = video[: self.max_feats]
vid_duration = len(video)
if len(video) < self.max_feats:
video = torch.cat(
[video, torch.zeros(self.max_feats - len(video), video.shape[1])]
)
else:
sampled = []
for j in range(self.max_feats):
sampled.append(video[(j * len(video)) // self.max_feats])
video = torch.stack(sampled)
vid_duration = len(video)
type, answer, answer_len = 0, 0, 0
if self.ivqa:
answer_txt = collections.Counter(
[
self.data["answer1"].values[index],
self.data["answer2"].values[index],
self.data["answer3"].values[index],
self.data["answer4"].values[index],
self.data["answer5"].values[index],
]
)
answer_id = torch.zeros(len(self.a2id))
for x in answer_txt:
if x in self.a2id:
answer_id[self.a2id[x]] = answer_txt[x]
answer_txt = ", ".join(
[str(x) + "(" + str(answer_txt[x]) + ")" for x in answer_txt]
)
elif self.mc:
answer_id = int(self.data["answer"][index])
answer_txt = [self.data["a" + str(i + 1)][index] for i in range(self.mc)]
answer = tokenize(
answer_txt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.amax_words,
dynamic_padding=True,
truncation=True,
)
else:
answer_txt = self.data["answer"].values[index]
answer_id = self.a2id.get(
answer_txt, -1
) # put an answer_id -1 if not in top answers, that will be considered wrong during evaluation
if not self.mc:
type = self.data["type"].values[index]
question_txt = self.data["question"][index]
question_embd = torch.tensor(
self.bert_tokenizer.encode(
question_txt,
add_special_tokens=True,
padding="longest",
max_length=self.qmax_words,
truncation=True,
),
dtype=torch.long,
)
return {
"video_id": vid_id,
"video": video,
"video_len": vid_duration,
"question": question_embd,
"question_txt": question_txt,
"type": type,
"answer_id": answer_id,
"answer_txt": answer_txt,
"answer": answer,
}
def videoqa_collate_fn(batch):
"""
:param batch: [dataset[i] for i in N]
:return: tensorized batch with the question and the ans candidates padded to the max length of the batch
"""
qmax_len = max(len(batch[i]["question"]) for i in range(len(batch)))
for i in range(len(batch)):
if len(batch[i]["question"]) < qmax_len:
batch[i]["question"] = torch.cat(
[
batch[i]["question"],
torch.zeros(qmax_len - len(batch[i]["question"]), dtype=torch.long),
],
0,
)
if not isinstance(batch[0]["answer"], int):
amax_len = max(x["answer"].size(1) for x in batch)
for i in range(len(batch)):
if batch[i]["answer"].size(1) < amax_len:
batch[i]["answer"] = torch.cat(
[
batch[i]["answer"],
torch.zeros(
(
batch[i]["answer"].size(0),
amax_len - batch[i]["answer"].size(1),
),
dtype=torch.long,
),
],
1,
)
return default_collate(batch)
def get_videoqa_loaders(args, features, a2id, bert_tokenizer):
train_dataset = VideoQADataset(
csv_path=args.train_csv_path,
features=features,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
bert_tokenizer=bert_tokenizer,
a2id=a2id,
ivqa=(args.dataset == "ivqa"),
max_feats=args.max_feats,
mc=args.mc,
)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.num_thread_reader,
shuffle=True,
drop_last=True,
collate_fn=videoqa_collate_fn,
)
test_dataset = VideoQADataset(
csv_path=args.test_csv_path,
features=features,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
bert_tokenizer=bert_tokenizer,
a2id=a2id,
ivqa=(args.dataset == "ivqa"),
max_feats=args.max_feats,
mc=args.mc,
)
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
collate_fn=videoqa_collate_fn,
)
val_dataset = VideoQADataset(
csv_path=args.val_csv_path,
features=features,
qmax_words=args.qmax_words,
amax_words=args.amax_words,
bert_tokenizer=bert_tokenizer,
a2id=a2id,
ivqa=(args.dataset == "ivqa"),
max_feats=args.max_feats,
mc=args.mc,
)
val_loader = DataLoader(
val_dataset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
collate_fn=videoqa_collate_fn,
)
return (
train_dataset,
train_loader,
val_dataset,
val_loader,
test_dataset,
test_loader,
)
| 7,370 | 30.909091 | 108 | py |
just-ask | just-ask-main/data/howtovqa_loader.py | import torch
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
from torch.utils.data.dataloader import default_collate
from util import tokenize
class HowToVQA_Dataset(Dataset):
def __init__(
self,
csv_path,
caption,
features_path,
qmax_words=20,
amax_words=20,
train=True,
n_pair=32,
max_feats=20,
bert_tokenizer=None,
):
"""
:param csv_path: path to a csv with video_id and video_path columns
:param caption: dictionary mapping video_id to a dictionary mapping start, end, question and answer to corresponding lists
:param features_path: path to the directory of features
:param qmax_words: maximum number of words in the question
:param amax_words: maximum number of words in the answer
:param train: whether to train or validate
:param n_pair: number of clips to sample from each video
:param max_feats: maximum number of video features
:param bert_tokenizer: BERT tokenizer
"""
self.data = pd.read_csv(csv_path)
self.caption = caption
self.feature_path = features_path
self.qmax_words = qmax_words
self.amax_words = amax_words
self.train = train
self.n_pair = n_pair
self.max_feats = max_feats
self.bert_tokenizer = bert_tokenizer
def __len__(self):
return len(self.data)
def _get_text(self, caption, n_pair_max, train=True):
n_caption = len(caption["start"])
n_pair_max = min(n_caption, n_pair_max)
start = np.zeros(n_pair_max)
end = np.zeros(n_pair_max)
atxt = [""] * n_pair_max
qtxt = [""] * n_pair_max
r_ind = (
np.random.choice(range(n_caption), n_pair_max, replace=False)
if train
else np.arange(n_pair_max)
) # sample clips
for i in range(n_pair_max):
ind = r_ind[i]
atxt[i], qtxt[i], start[i], end[i] = (
str(caption["answer"][ind]),
str(caption["question"][ind]),
caption["start"][ind],
caption["end"][ind],
)
question = tokenize(
qtxt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.qmax_words,
dynamic_padding=True,
truncation=True,
)
answer = tokenize(
atxt,
self.bert_tokenizer,
add_special_tokens=True,
max_length=self.amax_words,
dynamic_padding=True,
truncation=True,
)
return start, end, atxt, answer, qtxt, question
def _get_video(self, vid_path, start, end):
feature_path = os.path.join(self.feature_path, vid_path)
video = torch.from_numpy(np.load(feature_path)).float()
video_len = np.zeros(len(start))
feature = torch.zeros(len(start), self.max_feats, video.shape[-1])
for i in range(len(start)):
s = int(start[i])
e = int(end[i]) + 1
slice = video[s:e]
video_len[i] = min(self.max_feats, len(slice))
if len(slice) < self.max_feats:
padded_slice = torch.cat(
[slice, torch.zeros(self.max_feats - len(slice), slice.shape[1])]
)
else:
padded_slice = slice[: self.max_feats]
feature[i] = padded_slice
return feature, video_len
def __getitem__(self, idx):
video_id = self.data["video_id"].values[idx]
video_path = self.data["video_path"].values[idx]
start, end, atxt, answer, qtxt, question = self._get_text(
self.caption[video_id], self.n_pair, train=self.train
)
video, video_len = self._get_video(video_path, start, end)
return {
"video_id": video_id,
"video_path": video_path,
"atxt": atxt,
"qtxt": qtxt,
"start": start,
"end": end,
"video": video,
"video_len": video_len,
"answer": answer,
"question": question,
}
def howtovqa_collate_fn(batch):
"""
:param batch: [dataset[i] for i in N]
:return: tensorized batch with the question and the ans candidates padded to the max length of the batch
"""
bs = len(batch)
video_id = default_collate([batch[i]["video_id"] for i in range(bs)])
video_path = default_collate([batch[i]["video_path"] for i in range(bs)])
atxt = [batch[i]["atxt"] for i in range(bs)]
atxt = [x for y in atxt for x in y]
qtxt = [batch[i]["qtxt"] for i in range(bs)]
qtxt = [x for y in qtxt for x in y]
start = torch.cat([torch.from_numpy(batch[i]["start"]) for i in range(bs)], 0)
end = torch.cat([torch.from_numpy(batch[i]["end"]) for i in range(bs)], 0)
video = torch.cat([batch[i]["video"] for i in range(bs)], 0)
video_len = torch.cat(
[torch.from_numpy(batch[i]["video_len"]) for i in range(bs)], 0
)
ans = [batch[i]["answer"] for i in range(bs)]
maxalen = max([x.shape[1] for x in ans])
answer = torch.zeros(sum(x.shape[0] for x in ans), maxalen).long()
idx = 0
for i, tensor in enumerate(ans):
n, l = tensor.shape
answer[idx : idx + n, :l] = tensor
idx += n
que = [batch[i]["question"] for i in range(bs)]
maxquelen = max([x.shape[1] for x in que])
question = torch.zeros(sum(x.shape[0] for x in que), maxquelen).long()
idx = 0
for i, tensor in enumerate(que):
n, l = tensor.shape
question[idx : idx + n, :l] = tensor
idx += n
return {
"video_id": video_id,
"video_path": video_path,
"atxt": atxt,
"qtxt": qtxt,
"start": start,
"end": end,
"video": video,
"video_len": video_len,
"answer": answer,
"question": question,
}
| 6,044 | 32.39779 | 130 | py |
just-ask | just-ask-main/model/multimodal_transformer.py | from transformers.activations import gelu
import torch.nn as nn
import numpy as np
import torch
import math
from model.language_model import Bert, AModel
import copy
from transformers.modeling_outputs import BaseModelOutput
from transformers import DistilBertConfig
def create_sinusoidal_embeddings(n_pos, dim, out):
with torch.no_grad():
position_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
]
)
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = nn.Dropout(p=config.attention_dropout)
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.pruned_heads = set()
def forward(self, query, key, value, mask, head_mask=None, output_attentions=False):
"""
Parameters
----------
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Outputs
-------
weights: torch.tensor(bs, n_heads, seq_length, seq_length)
Attention weights
context: torch.tensor(bs, seq_length, dim)
Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
""" separate heads """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" group heads """
return (
x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
)
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
mask = (
(mask == 0).view(mask_reshp).expand_as(scores)
) # (bs, n_heads, q_length, k_length)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)
weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if output_attentions:
return (context, weights)
else:
return (context,)
class FFN(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(p=config.dropout)
self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
assert config.activation in [
"relu",
"gelu",
], "activation ({}) must be in ['relu', 'gelu']".format(config.activation)
self.activation = gelu if config.activation == "gelu" else nn.ReLU()
def forward(self, input):
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x)
return x
class TransformerBlock(nn.Module):
def __init__(self, config):
super().__init__()
assert config.dim % config.n_heads == 0
self.attention = MultiHeadSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(self, x, attn_mask=None, head_mask=None, output_attentions=False):
"""
Parameters
----------
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
Outputs
-------
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length)
The attention weights
ffn_output: torch.tensor(bs, seq_length, dim)
The output of the transformer block contextualization.
"""
# Self-Attention
sa_output = self.attention(
query=x,
key=x,
value=x,
mask=attn_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
(
sa_output,
sa_weights,
) = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples
assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(
ffn_output + sa_output
) # (bs, seq_length, dim)
output = (ffn_output,)
if output_attentions:
output = (sa_weights,) + output
return output
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.n_layers = config.n_layers
layer = TransformerBlock(config)
self.layer = nn.ModuleList(
[copy.deepcopy(layer) for _ in range(config.n_layers)]
)
def forward(
self,
x,
attn_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=None,
):
"""
Parameters
----------
x: torch.tensor(bs, seq_length, dim)
Input sequence embedded.
attn_mask: torch.tensor(bs, seq_length)
Attention mask on the sequence.
Outputs
-------
hidden_state: torch.tensor(bs, seq_length, dim)
Sequence of hiddens states in the last (top) layer
all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_state = x
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if head_mask is not None:
layer_outputs = layer_module(
x=hidden_state,
attn_mask=attn_mask,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
else:
layer_outputs = layer_module(
x=hidden_state,
attn_mask=attn_mask,
head_mask=None,
output_attentions=output_attentions,
)
hidden_state = layer_outputs[-1]
if output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(
v
for v in [hidden_state, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_state,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class Embeddings(nn.Module):
def __init__(
self, d_model, language_len, vision_len, dropout, sinusoidal_pos_embds
):
super().__init__()
max_position_embeddings = language_len + vision_len
self.position_embeddings = nn.Embedding(max_position_embeddings, d_model)
if sinusoidal_pos_embds:
create_sinusoidal_embeddings(
n_pos=max_position_embeddings,
dim=d_model,
out=self.position_embeddings.weight,
)
self.modality_embedding = nn.Embedding(2, d_model)
self.language_len = language_len
self.vision_len = vision_len
self.LayerNorm = nn.LayerNorm(d_model, eps=1e-12)
self.dropout = nn.Dropout(dropout)
def forward(self, embeddings):
seq_length = embeddings.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=embeddings.device
) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(
embeddings[:, :, 0]
) # (bs, max_seq_length)
position_embeddings = self.position_embeddings(
position_ids
) # (bs, max_seq_length, dim)
modality_embeddings = self.modality_embedding(
torch.tensor(
[0] * self.language_len + [1] * self.vision_len, dtype=torch.long
).to(embeddings.device)
)
embeddings = (
embeddings + position_embeddings + modality_embeddings
) # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
class MMT_VideoQA(nn.Module):
def __init__(
self,
feature_dim=1024,
word_dim=768,
N=2,
h=8,
d_model=512,
d_ff=2048,
dropout=0.1,
Q=20,
T=20,
vocab_size=30522,
baseline="",
n_negs=1,
probe=False
):
"""
:param feature_dim: dimension of the input video features
:param word_dim: dimension of the input question features
:param N: number of transformer layers
:param h: number of transformer heads
:param d_model: dimension for the transformer and final embedding
:param d_ff: hidden dimension in the transformer
:param dropout: dropout rate in the transformer
:param Q: maximum number of tokens in the question
:param T: maximum number of video features
:param vocab_size: size of the vocabulary for the masked language modeling head
:param baseline: set as "qa" not to use the video
:param n_negs: number of negatives sampled for cross-modal matching
:param probe: whether or not to freeze all parameters but the heads
"""
super(MMT_VideoQA, self).__init__()
# video modules
self.linear_video = nn.Linear(feature_dim, d_model)
self.norm_video = nn.LayerNorm(d_model, eps=1e-12)
# question post bert modules
self.linear_question = nn.Linear(word_dim, d_model)
self.norm_question = nn.LayerNorm(d_model, eps=1e-12)
# positional and modality encoding
self.position = Embeddings(d_model, Q, T, dropout, True)
# video and question fusion modules
self.config = DistilBertConfig.from_pretrained(
"distilbert-base-uncased",
n_layers=N,
dim=d_model,
dropout=dropout,
hidden_dim=d_ff,
attention_dropout=dropout,
n_heads=h,
)
self.mmt = Transformer(self.config)
self.vqproj = nn.Sequential(nn.Dropout(dropout), nn.Linear(d_model, d_model))
# parameters
self.baseline = baseline
self.Q = Q
self.T = T
self.n_negs = n_negs
# masked language modeling head
self.vocab_transform = nn.Linear(d_model, d_model)
self.vocab_norm = nn.LayerNorm(normalized_shape=d_model, eps=1e-12)
self.vocab_projector = nn.Linear(d_model, vocab_size)
self.mlm_loss_fct = nn.CrossEntropyLoss()
# cross-modal matching head
self.crossmodal_matching = nn.Linear(d_model, 1)
self.cm_loss_fct = nn.BCELoss()
# weight initialization
self.apply(self._init_weights)
self.answer_embeddings = None
# pretrained DistilBERT language model
self.bert = Bert()
# answer modules
self.amodel = AModel(out_dim=d_model, sentence_dim=2048)
if probe: # freeze all layers but the heads
for n, p in self.named_parameters():
if "vqproj" not in n and (
("amodel" not in n) or ("linear_text" not in n)
):
p.requires_grad_(False)
else:
print(n)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Embedding):
if module.weight.requires_grad:
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _compute_answer_embedding(self, a2v):
self.answer_embeddings = self.get_answer_embedding(a2v)
def get_answer_embedding(self, answer):
answer = self.amodel(answer)
return answer
def get_video_embedding(self, video):
video = self.linear_video(video)
video = gelu(video)
video = self.norm_video(video)
return video
def get_question_embedding(self, question):
question = self.linear_question(question)
question = gelu(question)
question = self.norm_question(question)
return question
def forward(
self,
video,
question=None,
labels=None,
answer=None,
video_mask=None,
text_mask=None,
mode="vqa",
):
"""
:param video: [bs, T, feature_dim]
:param question: [bs, Q]
:param labels: [bs, Q] used for masked language modeling
:param answer: [batch_size, amax_words, 300] used for contrastive loss training, otherwise precomputed at the vocabulary level
:param video_mask: [bs, T]
:param text_mask: [bs, Q]
"""
if mode == "vqa":
question = self.bert(question)
if question.shape[1] < self.Q:
question = torch.cat(
[
question,
torch.zeros(
question.shape[0],
self.Q - question.shape[1],
question.shape[2],
).cuda(),
],
1,
)
text_mask = torch.cat(
[
text_mask,
torch.zeros(
text_mask.shape[0], self.Q - text_mask.shape[1]
).cuda(),
],
1,
)
if self.baseline == "qa":
question_proj = self.get_question_embedding(question)
vq_cat = torch.cat(
[
question_proj,
torch.zeros(
question_proj.size(0), self.T, question_proj.size(-1)
).cuda(),
],
dim=1,
)
vq = self.position(vq_cat)
mask = torch.cat(
[text_mask, torch.zeros(question_proj.size(0), self.T).cuda()],
dim=1,
)
attended_vq = self.mmt(x=vq, attn_mask=mask)[0]
fusion_proj = self.vqproj(attended_vq[:, 0, :])
else:
video_proj = self.get_video_embedding(video)
question_proj = self.get_question_embedding(question)
vq_cat = torch.cat([question_proj, video_proj], dim=1)
mask = torch.cat([text_mask, video_mask], dim=1)
vq = self.position(vq_cat)
attended_vq = self.mmt(x=vq, attn_mask=mask)[0]
fusion_proj = self.vqproj(attended_vq[:, 0, :])
answer_proj = (
self.get_answer_embedding(answer)
if answer is not None
else self.answer_embeddings
)
if question is not None and answer_proj.device != question.device:
answer_proj = answer_proj.to(question.device)
if answer is not None:
return fusion_proj, answer_proj
else:
return fusion_proj @ answer_proj.t()
elif mode == "mlm":
if text_mask.shape[1] < self.Q:
text_mask = torch.cat(
[
text_mask,
torch.zeros(
text_mask.shape[0], self.Q - text_mask.shape[1]
).cuda(),
],
1,
)
labels = torch.cat(
[
labels,
-100
* torch.ones(labels.shape[0], self.Q - labels.shape[1])
.long()
.cuda(),
],
1,
)
text = self.bert(question)
if text.shape[1] < self.Q:
text = torch.cat(
[
text,
torch.zeros(
text.shape[0], self.Q - text.shape[1], text.shape[2]
).cuda(),
],
1,
)
text_proj = self.get_question_embedding(text)
if not self.baseline == "qa":
mask = torch.cat([text_mask, video_mask], dim=1)
video_proj = self.get_video_embedding(video)
vq_cat = torch.cat([text_proj, video_proj], dim=1)
else:
mask = torch.cat(
[text_mask, torch.zeros(text_proj.size(0), self.T).cuda()],
dim=1,
)
vq_cat = torch.cat(
[
text_proj,
torch.zeros(
text_proj.size(0), self.T, text_proj.size(-1)
).cuda(),
],
dim=1,
)
vq = self.position(vq_cat)
attended_vq = self.mmt(x=vq, attn_mask=mask)[0]
prediction_logits = self.vocab_transform(attended_vq[:, : self.Q, :])
prediction_logits = gelu(prediction_logits)
prediction_logits = self.vocab_norm(prediction_logits)
prediction_logits = self.vocab_projector(prediction_logits)
mlm_loss = self.mlm_loss_fct(
prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1)
)
return mlm_loss
elif mode == "cm":
batch_size = len(video)
video_proj = self.get_video_embedding(video)
text = self.bert(question)
if text.shape[1] < self.Q:
text = torch.cat(
[
text,
torch.zeros(
text.shape[0], self.Q - text.shape[1], text.shape[2]
).cuda(),
],
1,
)
text_mask = torch.cat(
[
text_mask,
torch.zeros(
text_mask.shape[0], self.Q - text_mask.shape[1]
).cuda(),
],
1,
)
text_proj = self.get_question_embedding(text)
positives_vt = torch.cat([text_proj, video_proj], dim=1)
positives_vtembd = self.position(positives_vt)
mask = torch.cat([text_mask, video_mask], dim=1)
positives_attended_vt = self.mmt(x=positives_vtembd, attn_mask=mask)[0]
positives_scores = torch.sigmoid(
self.crossmodal_matching(positives_attended_vt[:, 0, :])
).squeeze()
positives_loss = self.cm_loss_fct(
positives_scores, torch.ones(batch_size).cuda()
)
rd_idx = (
np.random.choice(
np.arange(1, batch_size), size=batch_size, replace=True
)
+ np.arange(batch_size)
) % batch_size
video_proj_negs = video_proj[rd_idx]
video_negs_mask = video_mask[rd_idx]
video_negatives_vt = torch.cat([text_proj, video_proj_negs], dim=1)
video_negatives_vtembd = self.position(video_negatives_vt)
mask_vnegs = torch.cat([text_mask, video_negs_mask], dim=1)
video_negatives_attended_vt = self.mmt(
x=video_negatives_vtembd, attn_mask=mask_vnegs
)[0]
video_negatives_scores = torch.sigmoid(
self.crossmodal_matching(video_negatives_attended_vt[:, 0, :])
).squeeze()
video_negatives_loss = self.cm_loss_fct(
video_negatives_scores, torch.zeros(batch_size).cuda()
)
rd_idx_txt = (
np.random.choice(
np.arange(1, batch_size), size=batch_size, replace=True
)
+ np.arange(batch_size)
) % batch_size
text_proj_negs = text_proj[rd_idx_txt]
text_negs_mask = text_mask[rd_idx_txt]
text_negatives_vt = torch.cat([text_proj_negs, video_proj], dim=1)
text_negatives_vtembd = self.position(text_negatives_vt)
mask_tnegs = torch.cat([text_negs_mask, video_mask], dim=1)
text_negatives_attended_vt = self.mmt(
x=text_negatives_vtembd, attn_mask=mask_tnegs
)[0]
text_negatives_scores = torch.sigmoid(
self.crossmodal_matching(text_negatives_attended_vt[:, 0, :])
).squeeze()
text_negatives_loss = self.cm_loss_fct(
text_negatives_scores, torch.zeros(batch_size).cuda()
)
cm_loss = positives_loss + video_negatives_loss + text_negatives_loss
return cm_loss
elif mode == "retrieval":
text = self.bert(question)
if text.shape[1] < self.Q:
text = torch.cat(
[
text,
torch.zeros(
text.shape[0], self.Q - text.shape[1], text.shape[2]
).cuda(),
],
1,
)
text_mask = torch.cat(
[
text_mask,
torch.zeros(
text_mask.shape[0], self.Q - text_mask.shape[1]
).cuda(),
],
1,
)
text_proj = self.get_question_embedding(text)
text_proj_rep = text_proj.repeat(len(video), 1, 1)
text_mask_rep = text_mask.repeat(len(video), 1)
video_proj = self.get_video_embedding(video)
vt = torch.cat([text_proj_rep, video_proj], dim=1)
mask = torch.cat([text_mask_rep, video_mask], dim=1)
attended_vt = self.mmt(x=vt, attn_mask=mask)[0]
scores = torch.sigmoid(
self.crossmodal_matching(attended_vt[:, 0, :])
).squeeze()
return scores
elif mode == "vqacm":
text = self.bert(question.squeeze())
text_mask = text_mask.squeeze()
if text.shape[1] < self.Q:
text = torch.cat(
[
text,
torch.zeros(
text.shape[0], self.Q - text.shape[1], text.shape[2]
).cuda(),
],
1,
)
text_mask = torch.cat(
[
text_mask,
torch.zeros(
text_mask.shape[0], self.Q - text_mask.shape[1]
).cuda(),
],
1,
)
text_proj = self.get_question_embedding(text)
video_proj = self.get_video_embedding(video)
video_proj_rep = video_proj.repeat(len(text), 1, 1)
video_mask_rep = video_mask.repeat(len(text), 1)
vt = torch.cat([text_proj, video_proj_rep], dim=1)
mask = torch.cat([text_mask, video_mask_rep], dim=1)
attended_vt = self.mmt(x=vt, attn_mask=mask)[0]
scores = torch.sigmoid(
self.crossmodal_matching(attended_vt[:, 0, :])
).squeeze()
return scores
| 27,052 | 36.366022 | 134 | py |
just-ask | just-ask-main/model/language_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import DistilBertTokenizer, DistilBertModel
class Bert(nn.Module):
""" Finetuned DistilBERT module """
def __init__(self):
super(Bert, self).__init__()
self.bert_tokenizer = DistilBertTokenizer.from_pretrained(
"distilbert-base-uncased"
)
self.bert = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.cls_token = self.bert_tokenizer.cls_token_id
self.sep_token = self.bert_tokenizer.sep_token_id
def forward(self, tokens):
attention_mask = (tokens > 0).float()
embds = self.bert(tokens, attention_mask=attention_mask)[0]
return embds
class Sentence_Maxpool(nn.Module):
""" Utilitary for the answer module """
def __init__(self, word_dimension, output_dim, relu=True):
super(Sentence_Maxpool, self).__init__()
self.fc = nn.Linear(word_dimension, output_dim)
self.out_dim = output_dim
self.relu = relu
def forward(self, x_in):
x = self.fc(x_in)
x = torch.max(x, dim=1)[0]
if self.relu:
x = F.relu(x)
return x
class AModel(nn.Module):
"""
Answer embedding module
"""
def __init__(self, out_dim=512, sentence_dim=2048):
super(AModel, self).__init__()
self.bert = Bert()
self.linear_text = nn.Linear(768, out_dim)
def forward(self, answer):
if len(answer.shape) == 3:
bs, nans, lans = answer.shape
answer = answer.view(bs * nans, lans)
answer = self.bert(answer)
answer = answer[:, 0, :]
answer = answer.view(bs, nans, 768)
else:
answer = self.bert(answer)
answer = answer[:, 0, :]
answer = self.linear_text(answer)
return answer
| 1,880 | 28.390625 | 78 | py |
FastFusionNet | FastFusionNet-master/prepro.py | # Origin: https://github.com/taolei87/sru/blob/master/DrQA/prepro.py
# Modified by Felix Wu
import torch
import re
import json
import spacy
# import msgpack
import unicodedata
import numpy as np
import pandas as pd
import argparse
import collections
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import logging
from os.path import join
from qa.utils import str2bool
from qa.general_utils import normalize_text, build_embedding, load_glove_vocab, feature_gen, token2id, process_jsonlines
parser = argparse.ArgumentParser(
description='Preprocessing data files, about 10 minitues to run.'
)
parser.add_argument('--wv_file', default='data/glove/glove.840B.300d.txt',
help='path to word vector file.')
parser.add_argument('--wv_dim', type=int, default=300,
help='word vector dimension.')
parser.add_argument('--wv_cased', type=str2bool, default=True,
help='treat the words as cased or not.')
parser.add_argument('--sort_all', type=str2bool, default=True,
help='sort the vocabulary by frequencies of all words. '
'Otherwise consider question words first.')
parser.add_argument('--sample_size', type=int, default=100,
help='size of sample data (for debugging).')
parser.add_argument('--threads', type=int, default=multiprocessing.cpu_count(),
help='number of threads for preprocessing.')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size for multiprocess tokenizing and tagging.')
parser.add_argument('--data_dir', type=str, default='data/squad',
help='data directory.')
parser.add_argument('--pre_proc', default='fusion', help='[sru/fusion/qanet]')
parser.add_argument('--train', default='data/squad/train-v1.1.json')
parser.add_argument('--dev', default='data/squad/dev-v1.1.json')
parser.add_argument('--suffix', type=str, default='fusion',
help='suffix of the output file')
args = parser.parse_args()
trn_file = args.train
dev_file = args.dev
wv_file = args.wv_file
wv_dim = args.wv_dim
if args.pre_proc == 'sru':
from qa.general_utils import pre_proc_sru as pre_proc
elif args.pre_proc == 'fusion':
from qa.general_utils import pre_proc_fusion as pre_proc
elif args.pre_proc == 'qanet':
from qa.general_utils import pre_proc_qanet as pre_proc
else:
raise ValueError('args.pre_proc={}'.format(args.pre_proc))
args.spacy_version = spacy.__version__
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S')
log = logging.getLogger(__name__)
log.info(vars(args))
log.info('start data preparing...')
wv_vocab = load_glove_vocab(wv_file, wv_dim)
log.info('glove loaded.')
def flatten_json(file, proc_func):
'''A multi-processing wrapper for loading SQuAD data file.'''
with open(file) as f:
data = json.load(f)['data']
with ProcessPoolExecutor(max_workers=args.threads) as executor:
rows = executor.map(proc_func, data)
rows = sum(rows, [])
return rows
def proc_train(article):
'''Flatten each article in training data.'''
rows = []
for paragraph in article['paragraphs']:
context = paragraph['context']
for qa in paragraph['qas']:
id_, question, answers = qa['id'], qa['question'], qa['answers']
answer_starts = [a['answer_start'] for a in answers]
answers = [a['text'] for a in answers]
answer_ends = [answer_start + len(answer) for answer_start, answer in zip(answer_starts, answers)]
rows.append((id_, context, question, answers, answer_starts, answer_ends))
return rows
def proc_dev(article):
'''Flatten each article in dev data'''
rows = []
for paragraph in article['paragraphs']:
context = paragraph['context']
for qa in paragraph['qas']:
id_, question, answers = qa['id'], qa['question'], qa['answers']
answers = [a['text'] for a in answers]
rows.append((id_, context, question, answers))
return rows
train = flatten_json(trn_file, proc_train)
train = pd.DataFrame(train,
columns=['id', 'context', 'question', 'answers',
'answer_starts', 'answer_ends'])
dev = flatten_json(dev_file, proc_dev)
dev = pd.DataFrame(dev,
columns=['id', 'context', 'question', 'answers'])
##debug
# train = train[:100]
# dev = dev[:100]
log.info('json data flattened.')
nlp = spacy.load('en', parser=False, tagger=False, entity=False)
# indices = [28, 40, 86]
# for i in indices:
# print(train.context[i])
context_iter = (pre_proc(c) for c in train.context)
context_tokens = [[w.text for w in doc] for doc in nlp.pipe(
context_iter, batch_size=args.batch_size, n_threads=args.threads)]
log.info('got intial tokens.')
def get_answer_index(context, context_token, answer_starts, answer_ends):
'''
Get exact indices of the answer in the tokens of the passage,
according to the start and end position of the answer.
Args:
context (str): the context passage
context_token (list): list of tokens (str) in the context passage
answer_starts (list): the start position of the answer in the passage
answer_ends (list): the end position of the answer in the passage
Returns:
(int, int): start index and end index of answer
'''
p_str = 0
p_token = 0
start_tokens, end_tokens = [], []
for answer_start, answer_end in zip(answer_starts, answer_ends):
while p_str < len(context):
if re.match('\s', context[p_str]):
p_str += 1
continue
token = context_token[p_token]
token_len = len(token)
if context[p_str:p_str + token_len] != token:
return (None, None)
if p_str == answer_start:
t_start = p_token
p_str += token_len
if p_str == answer_end:
try:
start_tokens.append(t_start)
end_tokens.append(p_token)
except UnboundLocalError as e:
pass
finally:
break
p_token += 1
if len(start_tokens) == 0:
return (None, None)
else:
return (start_tokens, end_tokens)
train['answer_start_tokens'], train['answer_end_tokens'] = \
zip(*[get_answer_index(a, b, c, d) for a, b, c, d in
zip(train.context, context_tokens,
train.answer_starts, train.answer_ends)])
initial_len = len(train)
train.dropna(inplace=True)
log.info('drop {} inconsistent samples.'.format(initial_len - len(train)))
log.info('answer pointer generated.')
ntrain = len(train)
ndev = len(dev)
questions = list(train.question) + list(dev.question)
contexts = list(train.context) + list(dev.context)
nlp = spacy.load('en', parser=False)
context_text = [pre_proc(c) for c in contexts]
question_text = [pre_proc(q) for q in questions]
question_docs = [doc for doc in nlp.pipe(
iter(question_text), batch_size=args.batch_size, n_threads=args.threads)]
context_docs = [doc for doc in nlp.pipe(
iter(context_text), batch_size=args.batch_size, n_threads=args.threads)]
log.info('parsed')
# get tokens
if args.wv_cased:
question_tokens = [[normalize_text(w.text) for w in doc] for doc in question_docs]
context_tokens = [[normalize_text(w.text) for w in doc] for doc in context_docs]
else:
question_tokens = [[normalize_text(w.text).lower() for w in doc] for doc in question_docs]
context_tokens = [[normalize_text(w.text).lower() for w in doc] for doc in context_docs]
def get_spans(tokens, text):
pos = 0
spans = []
for token in tokens:
start = pos + text[pos:].find(token)
spans.append([start, start+len(token)])
pos = start + len(token)
for (s, e), token in zip(spans, tokens):
assert text[s:e] == token, '{}, {}\ntext: {}\n token: {}'.foramt(s, e, text, token)
return spans
context_token_span = [get_spans([w.text for w in doc], text) for doc, text in zip(context_docs, contexts)]
question_token_span = [get_spans([w.text for w in doc], text) for doc, text in zip(question_docs, questions)]
# context_token_span = [[(w.idx, w.idx + len(w.text)) for w in doc] for doc in context_docs]
# context_sentence_lens = [[len(sent) for sent in doc.sents] for doc in context_docs]
context_sentence_lens = [[] for doc in context_docs]
log.info('tokens generated')
# get features
context_tags = [[w.tag_ for w in doc] for doc in context_docs]
context_ents = [[w.ent_type_ for w in doc] for doc in context_docs]
context_features = []
for question, context in zip(question_docs, context_docs):
question_word = {w.text for w in question}
question_lower = {w.text.lower() for w in question}
question_lemma = {w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower() for w in question}
match_origin = [w.text in question_word for w in context]
match_lower = [w.text.lower() in question_lower for w in context]
match_lemma = [(w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower()) in question_lemma for w in context]
context_features.append(list(zip(match_origin, match_lower, match_lemma)))
log.info('features generated')
def build_train_vocab(questions, contexts, wv_vocab): # vocabulary will also be sorted accordingly
if args.sort_all:
counter = collections.Counter(w for doc in questions + contexts for w in doc)
vocab = sorted([t for t in counter if t in wv_vocab], key=counter.get, reverse=True)
else:
counter_q = collections.Counter(w for doc in questions for w in doc)
counter_c = collections.Counter(w for doc in contexts for w in doc)
counter = counter_c + counter_q
vocab = sorted([t for t in counter_q if t in wv_vocab], key=counter_q.get, reverse=True)
vocab += sorted([t for t in counter_c.keys() - counter_q.keys() if t in wv_vocab],
key=counter.get, reverse=True)
total = sum(counter.values())
matched = sum(counter[t] for t in vocab)
log.info('vocab {1}/{0} OOV {2}/{3} ({4:.4f}%)'.format(
len(counter), len(vocab), (total - matched), total, (total - matched) / total * 100))
vocab.insert(0, "<PAD>")
vocab.insert(1, "<UNK>")
return vocab
def build_eval_vocab(questions, contexts, train_vocab, wv_vocab): # most vocabulary comes from tr_vocab
existing_vocab = set(train_vocab)
new_vocab = list(set([w for doc in questions + contexts for w in doc if w not in existing_vocab and w in wv_vocab]))
vocab = train_vocab + new_vocab
log.info('train vocab {0}, total vocab {1}'.format(len(train_vocab), len(vocab)))
return vocab
def build_full_vocab(questions, contexts, eval_vocab):
existing_vocab = set(eval_vocab)
new_vocab = list(set([w for doc in questions + contexts for w in doc if w not in existing_vocab]))
vocab = eval_vocab + new_vocab
log.info('eval vocab {0}, total vocab {1}'.format(len(eval_vocab), len(vocab)))
return vocab
# vocab
train_vocab = build_train_vocab(question_tokens[:ntrain], context_tokens[:ntrain], wv_vocab)
eval_vocab = build_eval_vocab(question_tokens[ntrain:], context_tokens[ntrain:], train_vocab, wv_vocab)
# train_context_ids = token2id(context_tokens[:ntrain], train_vocab, unk_id=1)
# train_question_ids = token2id(question_tokens[:ntrain], train_vocab, unk_id=1)
# full_vocab = set(w for doc in context_tokens + question_tokens for w in doc) | set(eval_vocab)
# tokens
question_ids = token2id(question_tokens, eval_vocab, unk_id=1)
context_ids = token2id(context_tokens, eval_vocab, unk_id=1)
# term frequency in document
context_tf = []
for doc in context_tokens:
counter_ = collections.Counter(w.lower() for w in doc)
total = sum(counter_.values())
context_tf.append([counter_[w.lower()] / total for w in doc])
context_features = [[list(w) + [tf] for w, tf in zip(doc, tfs)] for doc, tfs in
zip(context_features, context_tf)]
# tags
vocab_tag = list(nlp.tagger.tag_names)
context_tag_ids = token2id(context_tags, vocab_tag)
log.info('Found {} POS tags.'.format(len(vocab_tag)))
# entities, build dict on the fly
vocab_ent = [''] + nlp.entity.cfg[u'actions']['1']
context_ent_ids = token2id(context_ents, vocab_ent)
log.info('Found {} entity tags: {}'.format(len(vocab_ent), vocab_ent))
log.info('vocab built.')
embedding = build_embedding(wv_file, eval_vocab, wv_dim)
log.info('got embedding matrix.')
# train.to_csv('data/squad/train.csv', index=False)
# dev.to_csv('data/squad/dev.csv', index=False)
train_feature_names = ['id', 'context_word_ids', 'context_features', 'context_pos_ids', 'context_ent_ids',
'question_word_ids',
'context', 'context_token_span', 'context_sentence_lens',
'question', 'question_token_span',
'answers', 'answer_spans']
dev_feature_names = ['id', 'context_word_ids', 'context_features', 'context_pos_ids', 'context_ent_ids',
'question_word_ids',
'context', 'context_token_span', 'context_sentence_lens',
'question', 'question_token_span',
'answers']
meta = {
'prepro_args': dict(vars(args)),
'vocab': eval_vocab,
'train_vocab_size': len(train_vocab),
'vocab_tag': vocab_tag,
'vocab_ent': vocab_ent,
'embedding': embedding.tolist(),
'train_feature_names': train_feature_names,
'dev_feature_names': dev_feature_names,
'args': dict(vars(args)),
# 'full_vocab': full_vocab,
}
train_new = list(zip(
train['id'].tolist(),
context_ids[:ntrain],
context_features[:ntrain],
context_tag_ids[:ntrain],
context_ent_ids[:ntrain],
question_ids[:ntrain],
contexts[:ntrain], # context_text[:ntrain],
context_token_span[:ntrain],
context_sentence_lens[:ntrain],
questions[:ntrain], # question_text[:ntrain],
question_token_span[:ntrain],
train.answers.tolist(),
[list(zip(s, e)) for s, e in zip(train['answer_start_tokens'], train['answer_end_tokens'])],
))
dev_new = list(zip(
dev['id'].tolist(),
context_ids[ntrain:],
context_features[ntrain:],
context_tag_ids[ntrain:],
context_ent_ids[ntrain:],
question_ids[ntrain:],
contexts[ntrain:], # context_text[ntrain:],
context_token_span[ntrain:],
context_sentence_lens[ntrain:],
questions[ntrain:], # question_text[ntrain:],
question_token_span[ntrain:],
dev['answers'].tolist(),
))
for i, d in enumerate(train_new):
if not d[6][d[7][d[12][0][0]][0]:d[7][d[12][0][1]][1]] == d[11][0]:
print(i, d[6][d[7][d[12][0][0]][0]:d[7][d[12][0][1]][1]], d[11][0])
print(len(train_new), len(dev_new))
result = {
'train': train_new,
'dev': dev_new
}
torch.save({'data': result, 'meta': meta}, join(args.data_dir, 'data{}.pth'.format('' if args.suffix == '' else '-' + args.suffix)))
if args.sample_size:
sample = {
'train': train_new[:args.sample_size],
'dev': dev_new[:args.sample_size]
}
# with open(join(args.data_dir, 'sample-{}.msgpack'.format(args.sample_size)), 'wb') as f:
# msgpack.dump(sample, f)
torch.save({'data': sample, 'meta': meta}, join(args.data_dir, 'sample-{}{}.pth'.format(args.sample_size, '' if args.suffix == '' else '-' + args.suffix)))
log.info('saved to disk.')
# with open('data/squad/meta.msgpack', 'wb') as f:
# msgpack.dump(meta, f)
# result = {
# 'trn_question_ids': question_ids[:len(train)],
# 'dev_question_ids': question_ids[len(train):],
# 'trn_context_ids': context_ids[:len(train)],
# 'dev_context_ids': context_ids[len(train):],
# 'trn_context_features': context_features[:len(train)],
# 'dev_context_features': context_features[len(train):],
# 'trn_context_tags': context_tag_ids[:len(train)],
# 'dev_context_tags': context_tag_ids[len(train):],
# 'trn_context_ents': context_ent_ids[:len(train)],
# 'dev_context_ents': context_ent_ids[len(train):],
# 'trn_context_text': context_text[:len(train)],
# 'dev_context_text': context_text[len(train):],
# 'trn_context_spans': context_token_span[:len(train)],
# 'dev_context_spans': context_token_span[len(train):]
# }
# with open('data/squad/data.msgpack', 'wb') as f:
# msgpack.dump(result, f)
# if args.sample_size:
# sample_size = args.sample_size
# sample = {
# 'trn_question_ids': result['trn_question_ids'][:sample_size],
# 'dev_question_ids': result['dev_question_ids'][:sample_size],
# 'trn_context_ids': result['trn_context_ids'][:sample_size],
# 'dev_context_ids': result['dev_context_ids'][:sample_size],
# 'trn_context_features': result['trn_context_features'][:sample_size],
# 'dev_context_features': result['dev_context_features'][:sample_size],
# 'trn_context_tags': result['trn_context_tags'][:sample_size],
# 'dev_context_tags': result['dev_context_tags'][:sample_size],
# 'trn_context_ents': result['trn_context_ents'][:sample_size],
# 'dev_context_ents': result['dev_context_ents'][:sample_size],
# 'trn_context_text': result['trn_context_text'][:sample_size],
# 'dev_context_text': result['dev_context_text'][:sample_size],
# 'trn_context_spans': result['trn_context_spans'][:sample_size],
# 'dev_context_spans': result['dev_context_spans'][:sample_size]
# }
# with open('data/squad/sample.msgpack', 'wb') as f:
# msgpack.dump(sample, f)
# log.info('saved to disk.')
| 17,786 | 39.151242 | 159 | py |
FastFusionNet | FastFusionNet-master/eval.py | import re
import os
import sys
import time
import json
import random
import logging
import argparse
import torch
from shutil import copyfile
from datetime import datetime
from collections import Counter
from qa.model import DocReaderModel
from qa.utils import *
parser = argparse.ArgumentParser(
description='Eval a QA model.'
)
# system
parser.add_argument('--save_dir', default='save/debug',
help='path to store saved models.')
parser.add_argument('--seed', type=int, default=123,
help='random seed for data shuffling, dropout, etc.')
parser.add_argument("--cuda", type=str2bool, nargs='?',
const=True, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument("--debug", action='store_true',
help='debug mode')
# eval
parser.add_argument('-bs', '--eval_batch_size', type=int, default=1,
help='batch size for evaluation (default: 1)')
parser.add_argument('-rs', '--resume', default='best_model.pt',
help='previous model file name (in `save_dir`). '
'e.g. "checkpoint_epoch_11.pt"')
parser.add_argument('--max_eval_len', type=int, default=0,
help='max len for evaluation (default: 0, i.e. unlimited)')
args = parser.parse_args()
# set random seed
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# setup logger
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
def main():
log.info('[program starts.]')
log.info('[loading previous model...]')
checkpoint = torch.load(os.path.join(args.save_dir, args.resume))
checkpoint['config'].update(vars(args))
opt = checkpoint['config']
log.info('[loading data...]')
train, dev, train_y, dev_y, embedding, opt, meta = load_data(opt, log)
log.info('[Data loaded.]')
log.info('train_size: {}, dev_size: {}'.format(len(train), len(dev)))
state_dict = checkpoint['state_dict']
model = DocReaderModel(opt, embedding, state_dict)
epoch_0 = checkpoint['epoch'] + 1
for i in range(checkpoint['epoch']):
random.shuffle(list(range(len(train)))) # synchronize random seed
log.info('opt: {}'.format(opt))
if args.cuda:
model.cuda()
log.info('model:\n{}'.format(model.network))
batches = BatchGen(opt, dev, batch_size=opt['eval_batch_size'], evaluation=True, max_len=args.max_eval_len, gpu=args.cuda, with_cids=False)
predictions = []
start = time.perf_counter()
for batch in batches:
predictions.extend(model.predict(batch))
torch.cuda.synchronize()
eval_time = time.perf_counter() - start
em, f1 = score(predictions, dev_y)
log.info("[dev EM: {} F1: {} eval_time: {:.2f} s eval_time per example: {:.3f} ms]".format(em, f1, eval_time, eval_time * 1000. / len(dev)))
if __name__ == '__main__':
main()
| 3,164 | 31.96875 | 144 | py |
FastFusionNet | FastFusionNet-master/train.py | import re
import os
import sys
import time
import json
import random
import logging
import argparse
import torch
from shutil import copyfile
from datetime import datetime
from collections import Counter
from tensorboardX import SummaryWriter
from qa.model import DocReaderModel
from qa.utils import *
parser = argparse.ArgumentParser(
description='Train a QA model.'
)
# system
parser.add_argument('--log_file', default='output.log',
help='path for log file.')
parser.add_argument('--log_per_updates', type=int, default=50,
help='log model loss per x updates (mini-batches).')
parser.add_argument('--data_suffix', default='fusion',
help='suffix of the preprocessed data file.')
parser.add_argument('--save_dir', default='save/debug',
help='path to store saved models.')
parser.add_argument('--save_last_only', action='store_true',
help='only save the final models.')
parser.add_argument('--MTLSTM_path', default='data/glove/MT-LSTM.pth',
help='path to pretrained CoVe.')
parser.add_argument('--eval_per_epoch', type=int, default=1,
help='perform evaluation per x epochs.')
parser.add_argument('--seed', type=int, default=123,
help='random seed for data shuffling, dropout, etc.')
parser.add_argument("--cuda", type=str2bool, nargs='?',
const=True, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument("--debug", action='store_true',
help='debug mode')
parser.add_argument('--profile',type = str,default = '', help = 'profile file name')
parser.add_argument('--profile_std',action = 'store_true')
# training
parser.add_argument('-e', '--epochs', type=int, default=40)
parser.add_argument('-bs', '--batch_size', type=int, default=32)
parser.add_argument('-rs', '--resume', default='',
help='previous model file name (in `save_dir`). '
'e.g. "checkpoint_epoch_11.pt"')
parser.add_argument('-ro', '--resume_options', action='store_true',
help='use previous model options, ignore the cli and defaults.')
parser.add_argument('-rlr', '--reduce_lr', type=float, default=0.,
help='reduce initial (resumed) learning rate by this factor.')
parser.add_argument('--decay_every', type=int, default=0,
help='reduce learning rate very this many epochs.')
parser.add_argument('--lr_decay_rate', type=float, default=0.5,
help='learning rate decay rate')
parser.add_argument('-op', '--optimizer', default='adamax',
help='supported optimizer: adamax, sgd, adam')
parser.add_argument('-gc', '--grad_clipping', type=float, default=10)
parser.add_argument('-wd', '--weight_decay', type=float, default=0)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.002,
help='only applied to SGD.')
parser.add_argument('-mm', '--momentum', type=float, default=0,
help='only applied to SGD.')
parser.add_argument('-tp', '--tune_partial', type=int, default=1000,
help='finetune top-x embeddings.')
parser.add_argument('--fix_embeddings', action='store_true',
help='if true, `tune_partial` will be ignored.')
parser.add_argument('--rnn_padding', action='store_true',
help='perform rnn padding (much slower but more accurate).')
parser.add_argument('--max_train_len', type=int, default=0, help='max len for training')
parser.add_argument('--max_eval_len', type=int, default=0, help='max len for evaluation')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2')
parser.add_argument('--warm_start', action='store_true')
parser.add_argument('--ema_decay', type=float, default=1.,
help='Exponential moving average decay rate for trainable variables')
parser.add_argument('--valid_size', type=int, default=0)
parser.add_argument('--train_eval_size', type=int, default=3200, help='first n training data are used for evaluation')
parser.add_argument('--save_checkpoints', action='store_true')
# model
parser.add_argument('-m', '--model_type', default='drqa')
parser.add_argument('--question_merge', default='self_attn')
parser.add_argument('--doc_layers', type=int, default=3)
parser.add_argument('--question_layers', type=int, default=3)
parser.add_argument('--fusion_reading_layers', type=int, default=2)
parser.add_argument('--fusion_understanding_layers', type=int, default=1)
parser.add_argument('--fusion_final_layers', type=int, default=1)
parser.add_argument('--fusion_self_boost_times', type=int, default=1)
parser.add_argument('--fusion_gldr_layers', type=int, default=3)
parser.add_argument('--fusion_gldr_dilation_base', type=int, default=2)
parser.add_argument('--hidden_size', type=int, default=128)
parser.add_argument('--num_features', type=int, default=4)
parser.add_argument('--match', type=str2bool, nargs='?', const=True, default=True,
help='match features.')
parser.add_argument('--tf', type=str2bool, nargs='?', const=True, default=True,
help='term frequency features.')
parser.add_argument('--pos', type=str2bool, nargs='?', const=True, default=True,
help='use pos tags as a feature.')
parser.add_argument('--ner', type=str2bool, nargs='?', const=True, default=True,
help='use named entity tags as a feature.')
parser.add_argument('--use_word_emb', type=str2bool, nargs='?', const=True, default=True)
parser.add_argument('--use_qemb', type=str2bool, nargs='?', const=True, default=True)
parser.add_argument('--use_demb', type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('--concat_rnn_layers', type=str2bool, nargs='?',
const=True, default=True)
parser.add_argument('--word_dropout_c', type=float, default=0.0)
parser.add_argument('--word_dropout_q', type=float, default=0.0)
parser.add_argument('--dropout_emb', type=float, default=0.4)
parser.add_argument('--dropout_rnn', type=float, default=0.4)
parser.add_argument('--dropout_rnn_output', type=str2bool, nargs='?',
const=True, default=True)
parser.add_argument('--variational_dropout', type=str2bool, nargs='?',
const=True, default=True)
parser.add_argument('--depth_drop', type=float, default=0., help='max dropout rate of stochastic depth')
parser.add_argument('--max_len', type=int, default=15)
parser.add_argument('--rnn_type', default='lstm',
help='supported types: rnn, gru, lstm')
parser.add_argument('--pos_dim', type=int, default=12)
parser.add_argument('--ner_dim', type=int, default=8)
parser.add_argument('--use_feat_emb', type=str2bool, nargs='?', const=True, default=True)
parser.add_argument('--end_gru', action='store_true')
parser.add_argument('--use_cove', action='store_true')
parser.add_argument('--use_max_char_emb', action='store_true')
parser.add_argument('--max_char_emb_size', type=int, default=200)
parser.add_argument('--max_char_emb_max_len', type=int, default=16)
parser.add_argument('--residual', action='store_true')
parser.add_argument('--squeeze_excitation', type=int, default=0, help='squeeze excitation reduction ratio')
parser.add_argument('--sentence_level', action='store_true')
args = parser.parse_args()
if not args.match:
args.num_features -= 3
if not args.tf:
args.num_features -= 1
if args.fix_embeddings:
args.tune_partial = 0
# set model dir
save_dir = args.save_dir
os.makedirs(save_dir, exist_ok=True)
save_dir = os.path.abspath(save_dir)
# set random seed
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# setup logger
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(save_dir, args.log_file))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
writer = SummaryWriter(save_dir)
def main():
log.info('[program starts.]')
train, dev, train_y, dev_y, embedding, opt, meta = load_data(vars(args), log)
# hold out original dev set
log.info('[Data loaded.]')
log.info('train_size: {}, dev_size: {}'.format(len(train), len(dev)))
if args.resume:
log.info('[loading previous model...]')
checkpoint = torch.load(os.path.join(save_dir, args.resume))
if args.resume_options:
opt.update(checkpoint['config'])
state_dict = checkpoint['state_dict']
model = DocReaderModel(opt, embedding, state_dict)
epoch_0 = checkpoint['epoch'] + 1
for i in range(checkpoint['epoch']):
random.shuffle(list(range(len(train)))) # synchronize random seed
if args.reduce_lr:
lr_decay(model.optimizer, lr_decay=args.reduce_lr)
else:
model = DocReaderModel(opt, embedding)
epoch_0 = 1
log.info('opt: {}'.format(opt))
if args.cuda:
model.cuda()
log.info('model:\n{}'.format(model.network))
opt['with_cids'] = False
if args.profile:
from pycallgraph import PyCallGraph, Config
from pycallgraph.output import GraphvizOutput
log.info('starts profiling')
with PyCallGraph(output=GraphvizOutput(output_file=args.profile), config=Config(include_stdlib=args.profile_std)):
batches = BatchGen(opt, dev, batch_size=args.batch_size, max_len=args.max_eval_len, evaluation=True, gpu=args.cuda, with_cids=opt['with_cids'])
predictions = []
for batch in batches:
predictions.extend(model.predict(batch))
print(len(dev_y), len(predictions))
dev_em, dev_f1 = score(predictions, dev_y)
log.info("[dev EM: {} F1: {}]".format(dev_em, dev_f1))
log.info('finished profiling')
return
if args.resume:
if not 'best_val_score' in checkpoint:
batches = BatchGen(opt, dev, batch_size=args.batch_size, max_len=args.max_eval_len, evaluation=True, gpu=args.cuda, with_cids=opt['with_cids'])
predictions = []
for batch in batches:
predictions.extend(model.predict(batch))
em, f1 = score(predictions, dev_y)
log.info("[dev EM: {} F1: {}]".format(em, f1))
best_val_score = f1
else:
best_val_score = checkpoint['best_val_score']
else:
best_val_score = 0.
log.info('best score is set to {:.2f}'.format(best_val_score))
with open(os.path.join(save_dir, 'opt.json'), 'w') as f:
json.dump(opt, f, indent=2)
with open(os.path.join(save_dir, 'model_str.txt'), 'w') as f:
print('model:\n{}\n\noptimizer:{}'.format(model.network, model.optimizer), file=f)
dawn_log = os.path.join(args.save_dir, 'dawn_train.tsv')
with open(dawn_log, 'w') as f:
print('epoch\thours\tf1Score', file=f)
all_train_time = 0.
for epoch in range(epoch_0, args.epochs):
log.warning('Epoch {}'.format(epoch))
# train
batches = BatchGen(opt, train, batch_size=args.batch_size, max_len=args.max_train_len, gpu=args.cuda, with_cids=opt['with_cids'])
start = datetime.now()
num_train_batches = len(batches)
for i, batch in enumerate(batches):
model.update(batch)
if model.updates % args.log_per_updates == 0:
log.info('epoch [{0:2}] updates[{1:6}] train loss[{2:.5f}] remaining[{3}]'.format(
epoch, model.updates, model.train_loss.avg,
str((datetime.now() - start) / (i + 1) * (len(batches) - i - 1)).split('.')[0]))
writer.add_scalar('train_loss_avg_iter', model.train_loss.avg, model.updates)
writer.add_scalar('train_loss_iter', model.train_loss.val, model.updates)
train_time = datetime.now() - start
all_train_time += train_time.total_seconds()
# eval
if epoch % args.eval_per_epoch == 0:
train_batches = BatchGen(opt, train[:args.train_eval_size], batch_size=args.batch_size, evaluation=True, max_len=args.max_eval_len, gpu=args.cuda, with_cids=opt['with_cids'])
predictions = []
for batch in train_batches:
predictions.extend(model.predict(batch))
train_em, train_f1 = score(predictions, train_y[:args.train_eval_size])
dev_batches = BatchGen(opt, dev, batch_size=args.batch_size, evaluation=True, max_len=args.max_eval_len, gpu=args.cuda, with_cids=opt['with_cids'])
predictions = []
start = datetime.now()
for batch in dev_batches:
predictions.extend(model.predict(batch))
dev_em, dev_f1 = score(predictions, dev_y)
eval_time = datetime.now() - start
is_best = best_val_score < dev_f1
if is_best:
best_val_score = dev_f1
log.warning("Epoch {} train loss: {:.5f} EM: {:.2f} F1: {:.2f} dev EM: {:.2f} F1: {:.2f} (best: {:.2f}) train: {:.2f} s eval: {:.2f} s".format(epoch, model.train_loss.avg, train_em, train_f1, dev_em, dev_f1, best_val_score, train_time.total_seconds(), eval_time.total_seconds()))
with open(dawn_log, 'a') as f:
print('{}\t{}\t{}'.format(epoch, all_train_time / 3600., dev_f1), file=f)
writer.add_scalar('train_loss_avg_epoch', model.train_loss.avg, epoch)
writer.add_scalar('time_train', train_time.total_seconds(), epoch),
writer.add_scalar('time_eval', eval_time.total_seconds(), epoch),
writer.add_scalar('time_per_epoch_train', train_time.total_seconds() / num_train_batches, epoch),
writer.add_scalar('time_per_epoch_eval', eval_time.total_seconds() / len(dev_batches), epoch)
writer.add_scalar('EM_train', train_em, epoch)
writer.add_scalar('F1_train', train_f1, epoch)
writer.add_scalar('EM_dev', dev_em, epoch)
writer.add_scalar('F1_dev', dev_f1, epoch)
# save
if not args.save_last_only or epoch == epoch_0 + args.epochs - 1:
prev_model_file = os.path.join(save_dir, 'checkpoint_epoch_{}.pt'.format(epoch-1))
model_file = os.path.join(save_dir, 'checkpoint_epoch_{}.pt'.format(epoch))
model.save(model_file, epoch, best_val_score)
if is_best:
copyfile(
model_file,
os.path.join(save_dir, 'best_model.pt'))
log.info('[new best model saved.]')
if os.path.exists(prev_model_file) and not args.save_checkpoints:
os.remove(prev_model_file)
if args.decay_every > 0 and epoch % args.decay_every == 0:
lr_decay(model.optimizer, args.lr_decay_rate)
def lr_decay(optimizer, lr_decay):
for param_group in optimizer.param_groups:
param_group['lr'] *= lr_decay
log.info('[learning rate reduced by {}]'.format(lr_decay))
return optimizer
if __name__ == '__main__':
main()
| 15,435 | 47.388715 | 291 | py |
FastFusionNet | FastFusionNet-master/qa/utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Modified by Felix Wu
from typing import Union, List, Dict, Any, Callable
import os
import re
import sys
import argparse
import torch
import string
import numpy as np
import unicodedata
import random
# from spacy.lang.en.stop_words import STOP_WORDS
import spacy
from spacy.tokens import Doc
from collections import Counter
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from . import layers
from torch.autograd import Variable
nlp = spacy.load('en')
STOP_WORDS = nlp.Defaults.stop_words
# vocab_tag = [''] + list(nlp.tagger.labels)
# vocab_ent = ['', 'PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT',
# 'WORK_OF_ART', 'LAW', 'LANGUAGE', 'DATE', 'TIME', 'PERCENT', 'MONEY',
# 'QUANTITY', 'ORDINAL', 'CARDINAL']
# inv_vocab_tag = {w: i for i, w in enumerate(vocab_tag)}
# inv_vocab_ent = {w: i for i, w in enumerate(vocab_ent)}
# Modification: remove unused functions and imports, add a boolean parser.
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
# ------------------------------------------------------------------------------
# General logging utilities.
# ------------------------------------------------------------------------------
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class EMA(object):
'''exponential moving average from https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4'''
def __init__(self, mu):
self.mu = mu
self.shadow = {}
def register(self, name, val):
self.shadow[name] = val.clone().cuda()
def __call__(self, name, x):
assert name in self.shadow
new_average = self.mu * x + (1.0 - self.mu) * self.shadow[name]
self.shadow[name] = new_average.clone()
return new_average
def load_data(opt, log=None):
if opt['debug']:
data_path = 'data/squad/sample-100{}.pth'
else:
data_path = 'data/squad/data{}.pth'
data_path = data_path.format('' if opt['data_suffix'] == '' else '-' + opt['data_suffix'])
if log:
log.info('loading data from {}'.format(data_path))
saved = torch.load(data_path)
if log:
log.info('done')
# with open('data/squad/meta.msgpack', 'rb') as f:
# meta = msgpack.load(f, encoding='utf9')
meta = saved['meta']
embedding = torch.Tensor(meta['embedding'])
opt['pretrained_words'] = True
opt['vocab_size'] = embedding.size(0)
opt['embedding_dim'] = embedding.size(1)
if not opt['fix_embeddings']:
embedding[1] = torch.normal(torch.zeros(opt['embedding_dim']), 1.)
opt['pos_size'] = len(meta['vocab_tag']) if opt['use_feat_emb'] else 0
opt['ner_size'] = len(meta['vocab_ent']) if opt['use_feat_emb'] else 0
# global id2w, w2id, full_vocab, w2cids
id2w = meta['vocab']
w2id = {w:i for i, w in enumerate(id2w)}
# with open(args.data_file, 'rb') as f:
# data = msgpack.load(f, encoding='utf8')
data = saved['data']
if 'full_vocab' in meta:
full_vocab = meta['full_vocab']
else:
if log:
log.info('getting full_vocab')
full_vocab = set(d[6][s:e] for d in data['train'] + data['dev'] for s, e in d[7]) | set(meta['vocab'])
saved['meta']['full_vocab'] = full_vocab
torch.save(saved, data_path)
if opt['valid_size'] > 0:
perm_idx = np.random.RandomState(4444).permutation(len(data['train']))
train = [data['train'][i] for i in perm_idx[:-opt['valid_size']]]
dev = [data['train'][i] for i in perm_idx[-opt['valid_size']:]]
else:
train = data['train']
dev = data['dev']
if log:
log.info('sorting dev')
dev.sort(key=lambda x: len(x[1]), reverse=True)
# if log:
# log.info('getting w2cids')
# w2cids = {w: torch.IntTensor(ELMoCharacterMapper.convert_word_to_char_ids(w)) for w in full_vocab}
# meta['w2cids'] = w2cids
train_y = [x[-2] for x in train]
if opt['valid_size'] > 0:
dev_y = [x[-2] for x in dev]
else:
dev_y = [x[-1] for x in dev]
if log:
log.info('generating cids')
train = [d[:9] + (None, None) + d[9:] for d in train]
dev = [d[:9] + (None, None) + d[9:] for d in dev]
if log:
log.info('done')
return train, dev, train_y, dev_y, embedding, opt, meta
def build_embedding(embed_file, targ_vocab, wv_dim):
vocab_size = len(targ_vocab)
emb = np.random.uniform(-1, 1, (vocab_size, wv_dim))
emb[0] = 0 # <PAD> should be all 0 (using broadcast)
w2id = {w: i for i, w in enumerate(targ_vocab)}
with open(embed_file, encoding="utf8") as f:
for line in f:
elems = line.split()
token = normalize_text(''.join(elems[0:-wv_dim]))
if token in w2id:
emb[w2id[token]] = [float(v) for v in elems[-wv_dim:]]
return emb
def normalize_text(text):
return unicodedata.normalize('NFD', text)
class BatchGen(object):
def __init__(self, opt={}, data=[], batch_size=32, gpu=False, max_len=0, evaluation=False, with_cids=False):
"""
input:
data - list of lists
batch_size - int
"""
self.opt = {'tf': True, 'use_feat_emb': True, 'pos_size': 12, 'ner_size': 8, 'use_elmo': False}
self.opt.update(opt)
self.batch_size = batch_size
self.eval = evaluation
self.gpu = gpu
self.max_len = max_len
self.with_cids = with_cids
# shuffle
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
if max_len > 0:
data = [d for d in data if len(d[1]) <= max_len]
# chunk into batches
data = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]
self.data = data
def __len__(self):
return len(self.data)
def __iter__(self):
for batch in self.data:
batch_size = len(batch)
batch = list(zip(*batch))
context_len = max(len(x) for x in batch[1])
# print('context_len:', context_len)
context_id = torch.LongTensor(batch_size, context_len).fill_(0)
for i, doc in enumerate(batch[1]):
context_id[i, :len(doc)] = torch.LongTensor(doc)
feature_len = len(batch[2][0][0])
context_feature = torch.Tensor(batch_size, context_len, feature_len).fill_(0)
for i, doc in enumerate(batch[2]):
for j, feature in enumerate(doc):
context_feature[i, j, :] = torch.Tensor(feature)
if not self.opt['tf']:
if self.opt['match']:
context_feature = context_feature[:, :, :3]
else:
context_feature = None
else:
if not self.opt['match']:
context_feature = context_feature[:, :, 3:]
if self.opt['use_feat_emb']:
context_tag = torch.LongTensor(batch_size, context_len).fill_(0)
for i, doc in enumerate(batch[3]):
context_tag[i, :len(doc)] = torch.LongTensor(doc)
context_ent = torch.LongTensor(batch_size, context_len).fill_(0)
for i, doc in enumerate(batch[4]):
context_ent[i, :len(doc)] = torch.LongTensor(doc)
else:
# create one-hot vectors
context_tag = torch.Tensor(batch_size, context_len, self.opt['pos_size']).fill_(0)
for i, doc in enumerate(batch[3]):
for j, tag in enumerate(doc):
context_tag[i, j, tag] = 1
context_ent = torch.Tensor(batch_size, context_len, self.opt['ner_size']).fill_(0)
for i, doc in enumerate(batch[4]):
for j, ent in enumerate(doc):
context_ent[i, j, ent] = 1
question_len = max(len(x) for x in batch[5])
question_id = torch.LongTensor(batch_size, question_len).fill_(0)
for i, doc in enumerate(batch[5]):
question_id[i, :len(doc)] = torch.LongTensor(doc)
context_mask = torch.eq(context_id, 0)
question_mask = torch.eq(question_id, 0)
text = list(batch[6])
span = list(batch[7])
context_sentence_lens = list(batch[8])
if self.with_cids:
context_char_id = torch.LongTensor(batch_size, context_len, 50).fill_(260) # 260 is padding_char
for i,d in enumerate(batch[9]):
context_char_id[i, :d.size(0)] = d
question_char_id = torch.LongTensor(batch_size, question_len, 50).fill_(260)
for i,d in enumerate(batch[10]):
question_char_id[i, :d.size(0)] = d
else:
context_char_id, question_char_id = None, None
if not self.eval:
tmp = torch.LongTensor([ex[0] for ex in batch[-1]])
y_s = tmp[:, 0].contiguous()
y_e = tmp[:, 1].contiguous()
elif context_char_id is not None:
context_char_id.volatile = True
question_char_id.volatile = True
if self.gpu:
context_id = context_id.pin_memory()
context_feature = context_feature.pin_memory() if context_feature is not None else None
context_tag = context_tag.pin_memory()
context_ent = context_ent.pin_memory()
context_mask = context_mask.pin_memory()
question_id = question_id.pin_memory()
question_mask = question_mask.pin_memory()
context_char_id = context_char_id.cuda() if context_char_id is not None else None
question_char_id = question_char_id.cuda() if question_char_id is not None else None
if self.eval:
yield (context_id, context_feature, context_tag, context_ent, context_mask,
question_id, question_mask, context_sentence_lens, context_char_id, question_char_id, text, span)
else:
yield (context_id, context_feature, context_tag, context_ent, context_mask,
question_id, question_mask, context_sentence_lens, context_char_id, question_char_id, y_s, y_e, text, span)
def _normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _exact_match(pred, answers):
if pred is None or answers is None:
return False
pred = _normalize_answer(pred)
for a in answers:
if pred == _normalize_answer(a):
return True
return False
def _f1_score(pred, answers):
def _score(g_tokens, a_tokens):
common = Counter(g_tokens) & Counter(a_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1. * num_same / len(g_tokens)
recall = 1. * num_same / len(a_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
if pred is None or answers is None:
return 0
g_tokens = _normalize_answer(pred).split()
scores = [_score(g_tokens, _normalize_answer(a).split()) for a in answers]
return max(scores)
def score(pred, truth):
assert len(pred) == len(truth)
f1 = em = total = 0
for p, t in zip(pred, truth):
total += 1
em += _exact_match(p, t)
f1 += _f1_score(p, t)
em = 100. * em / total
f1 = 100. * f1 / total
return em, f1
def get_inv_group(group_id):
inv_group = {}
for i, g in enumerate(group_id):
if g in inv_group:
inv_group[g].append(i)
else:
inv_group[g] = [i]
return inv_group
| 13,206 | 35.183562 | 139 | py |
FastFusionNet | FastFusionNet-master/qa/model.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
#
# Modified by Felix Wu
# Modification:
# - change the logger name
# - save & load optimizer state dict
# - change the dimension of inputs (for POS and NER features)
import math
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
from torch.autograd import Variable
from .utils import AverageMeter, EMA
from .rnn_reader import *
logger = logging.getLogger(__name__)
class DocReaderModel(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, opt, embedding=None, state_dict=None):
# Book-keeping.
self.opt = opt
self.updates = state_dict['updates'] if state_dict else 0
self.train_loss = AverageMeter()
# Building network.
if opt['model_type'] == 'drqa':
self.network = RnnDocReader(opt, embedding=embedding)
elif opt['model_type'] == 'gldr-drqa':
self.network = CnnDocReader(opt, embedding=embedding)
elif opt['model_type'] == 'fusionnet':
self.network = FusionNet(opt, embedding=embedding)
elif opt['model_type'] == 'bidaf':
self.network = BiDAF(opt, embedding=embedding)
else:
print('UNKNOWN model_type: ' + opt['model_type'])
raise NotImplementedError
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['network'].keys()):
if k not in new_state:
del state_dict['network'][k]
self.network.load_state_dict(state_dict['network'])
# Building optimizer.
parameters = [p for p in self.network.parameters() if p.requires_grad]
if opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, opt['learning_rate'],
momentum=opt['momentum'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters, opt['learning_rate'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adam':
self.optimizer = optim.Adam(parameters, opt['learning_rate'],
betas=(opt['beta1'], opt['beta2']),
weight_decay=opt['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
if state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
print(self.optimizer)
if opt['ema_decay'] < 1.:
print('using EMA')
self.ema = EMA(opt['ema_decay'])
for name, param in self.network.named_parameters():
if param.requires_grad:
self.ema.register(name, param.data)
num_params = sum(p.data.numel() for p in parameters
if p.data.data_ptr() != self.network.embedding.weight.data.data_ptr())
print("{} parameters".format(num_params))
def update(self, ex):
# Train mode
self.network.train()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True)) if torch.is_tensor(e) else e for e in ex[:10]]
target_s = Variable(ex[10].cuda(async=True))
target_e = Variable(ex[11].cuda(async=True))
else:
inputs = [Variable(e) if torch.is_tensor(e) else e for e in ex[:10]]
target_s = Variable(ex[10])
target_e = Variable(ex[11])
# Run forward
score_s, score_e = self.network(*inputs)
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
self.train_loss.update(loss.data[0], ex[0].size(0))
# warm_start
if self.opt['warm_start'] and self.updates <= 1000:
lr = self.opt['learning_rate'] / math.log(1002.) * math.log(self.updates + 2)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
if self.opt['grad_clipping'] > 0.:
torch.nn.utils.clip_grad_norm(self.network.parameters(),
self.opt['grad_clipping'])
# Update parameters
self.optimizer.step()
self.updates += 1
# Exponential Moving Average
if hasattr(self, 'ema'):
for name, param in self.network.named_parameters():
if param.requires_grad:
param.data = self.ema(name, param.data)
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
def predict(self, ex):
# Eval mode
self.network.eval()
# Transfer to GPU
if next(self.network.parameters()).is_cuda:
inputs = [Variable(e.cuda(async=True), volatile=True) if torch.is_tensor(e) else e for e in ex[:10]]
else:
inputs = [Variable(e, volatile=True) if torch.is_tensor(e) else e for e in ex[:10]]
# Run forward
score_s, score_e = self.network(*inputs)
if type(score_s) is list:
score_s, score_e = score_s[-1], score_e[-1]
# Transfer to CPU/normal tensors for numpy ops
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
# Get argmax text spans
text = ex[-2]
spans = ex[-1]
predictions = []
max_len = self.opt['max_len'] or score_s.size(1)
for i in range(score_s.size(0)):
scores = torch.ger(score_s[i], score_e[i])
scores.triu_().tril_(max_len - 1)
scores = scores.numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
predictions.append(text[i][s_offset:e_offset])
return predictions
def reset_parameters(self):
# Reset fixed embeddings to original value
if self.opt['tune_partial'] > 0:
offset = self.opt['tune_partial'] + 2
if offset < self.network.embedding.weight.data.size(0):
self.network.embedding.weight.data[offset:] \
= self.network.fixed_embedding
def save(self, filename, epoch, best_val_score=0.):
params = {
'state_dict': {
'network': self.network.state_dict(),
'optimizer': self.optimizer.state_dict(),
'updates': self.updates
},
'config': self.opt,
'epoch': epoch,
'best_val_score': best_val_score,
}
try:
torch.save(params, filename)
logger.info('model saved to {}'.format(filename))
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
def cuda(self):
self.network.cuda()
return self
def cpu(self):
self.network.cpu()
return self
| 7,683 | 36.300971 | 112 | py |
FastFusionNet | FastFusionNet-master/qa/encoder.py | import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import layers
from typing import IO, List, Iterable, Tuple
class RnnEncoder(nn.Module):
"""Network for the Document Reader module of DrQA."""
def __init__(self, opt):
super().__init__()
self.encoder_input_dim = opt.get('encoder_input_dim', 512)
self.rnn_output_size = 2 * opt['hidden_size'] * opt['doc_layers'] if opt['concat_rnn_layers'] else 2 * opt['hidden_size']
self.proj_size = 600 if opt['target_type'] == 'cove' else 2048
self.rnn = layers.StackedBRNN(
input_size=self.encoder_input_dim,
hidden_size=opt['hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
variational_dropout=opt['variational_dropout'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
if self.proj_size != self.rnn_output_size:
self.proj = nn.Linear(self.rnn_output_size, self.proj_size)
def forward(self, x_emb, mask):
bs, l = x_emb.size()[:2]
pad_mask = (mask == 0)
outputs = self.rnn(x_emb, pad_mask)
if self.proj_size != self.rnn_output_size:
outputs = self.proj(outputs.contiguous().view(bs * l, -1))
outputs = outputs.contiguous().view(bs, l, -1)
mask = mask.unsqueeze(-1)
outputs = outputs * mask.float()
return outputs
| 1,572 | 37.365854 | 129 | py |
FastFusionNet | FastFusionNet-master/qa/layers.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
# Modified by Felix Wu
import sys
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
from torch.autograd.function import InplaceFunction
from oldsru import SRUCell
# ------------------------------------------------------------------------------
# Modules
# ------------------------------------------------------------------------------
def variational_dropout(x, p=0, training=False):
"""
x: batch * len * input_size
"""
if training == False or p == 0:
return x
dropout_mask = Variable(1.0 / (1-p) * torch.bernoulli((1-p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)), requires_grad=False)
return dropout_mask.unsqueeze(1).expand_as(x) * x
def dropout(x, p=0, training=False, variational=False):
"""
x: (batch * len * input_size) or (any other shape)
"""
if p > 0:
if variational and len(x.size()) == 3: # if x is (batch * len * input_size)
return variational_dropout(x, p=p, training=training)
else:
return F.dropout(x, p=p, training=training)
else:
return x
class SizeDropout(nn.Module):
def __init__(self, input_size, min_size, dim, rescale=True):
super().__init__()
self.min_size = min_size
self.input_size = input_size
self.dim = dim
self.eval_size = input_size
self.rescale = rescale
if min_size < input_size:
mask = torch.cat([torch.ones(min_size), torch.arange(input_size - min_size, 0, -1) / (input_size - min_size + 1)], dim=0)
else:
mask = torch.ones(input_size)
self.register_buffer('mask', torch.ones(input_size))
# self.register_buffer('eval_mask', mask)
self.eval_mask = mask.cuda()
self.train_size = input_size
self.generate_mask(1)
def sample_train_size(self):
if self.input_size == self.min_size:
return self.input_size
self.train_size = self.min_size + min(int(torch.rand(1)[0] * (self.input_size - self.min_size + 1)), self.input_size) ## take the min in case of getting 1 from torch.rand
return self.train_size
def generate_mask(self, max_dim):
curr_mask = self.mask.clone()
if self.train_size < self.input_size:
curr_mask[self.train_size:] = 0
for i in range(self.dim):
curr_mask.unsqueeze_(0)
for i in range(self.dim+1, max_dim):
curr_mask.unsqueeze_(-1)
self.curr_mask_var = Variable(curr_mask, requires_grad=False)
def generate_eval_mask(self, max_dim):
if self.rescale:
curr_mask = self.eval_mask.clone()
else:
curr_mask = torch.ones(self.input_size).cuda()
if self.eval_size < self.input_size:
curr_mask[self.eval_size:] = 0
for i in range(self.dim):
curr_mask.unsqueeze_(0)
for i in range(self.dim+1, max_dim):
curr_mask.unsqueeze_(-1)
self.curr_eval_mask_var = Variable(curr_mask, requires_grad=False)
def forward(self, x, resample=True, mask=None):
assert x.size(self.dim) == self.input_size, 'x: {}, input_size: {}'.format(x.size(), self.input_size)
if self.input_size == self.min_size:
return x
if self.training:
if resample:
self.sample_train_size()
self.generate_mask(x.dim())
elif isinstance(mask, Variable):
self.curr_mask_var = mask
elif x.dim() != self.curr_mask_var.dim() or type(x.data) != type(self.curr_mask_var.data):
'''# of dim doesn't match generate the mask again'''
self.generate_mask(x.dim())
x = x * self.curr_mask_var
else:
self.generate_eval_mask(x.dim())
x = x * self.curr_eval_mask_var
return x
def __repr__(self):
return '{}(input_size={}, min_size={}, dim={}, rescale={}, eval_size={})'.format(
self.__class__.__name__, self.input_size, self.min_size, self.dim, self.rescale, self.eval_size)
class LayerNorm(nn.Module):
'''Layer Norm implementation source: https://github.com/pytorch/pytorch/issues/1959'''
def __init__(self, features, eps=1e-6):
super().__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class LayerNormChannelFirst(nn.Module):
'''Layer Norm implementation source: https://github.com/pytorch/pytorch/issues/1959'''
def __init__(self, features, eps=1e-6):
super().__init__()
self.gamma = nn.Parameter(torch.ones(features).view(1, -1, 1))
self.beta = nn.Parameter(torch.zeros(features).view(1, -1, 1))
self.eps = eps
def forward(self, x):
mean = x.mean(-2, keepdim=True)
std = x.std(-2, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class StackedBRNN(nn.Module):
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN, 'sru': SRUCell}
SRU_TYPES = {'sru', 'sru-v2'}
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
variational_dropout=True,
residual=False,
squeeze_excitation=0,
sd_min_size=0, sd_rescale=True,
concat_layers=False, padding=False):
super(StackedBRNN, self).__init__()
self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.variational_dropout = variational_dropout
self.num_layers = num_layers
self.residual = residual
self.squeeze_excitation = squeeze_excitation
self.concat_layers = concat_layers
self.sd_min_size = sd_min_size
self.sd_rescale = sd_rescale
self.rnns = nn.ModuleList()
self.rnn_type = rnn_type
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
if rnn_type in self.SRU_TYPES:
self.rnns.append(self.RNN_TYPES[rnn_type](input_size, hidden_size,
dropout=dropout_rate,
rnn_dropout=dropout_rate,
use_tanh=1,
bidirectional=True))
else:
self.rnns.append(self.RNN_TYPES[rnn_type](input_size, hidden_size,
num_layers=1,
bidirectional=True))
if sd_min_size > 0:
self.sds = nn.ModuleList()
for i in range(num_layers):
self.sds.append(SizeDropout(hidden_size, sd_min_size, 3, sd_rescale))
if squeeze_excitation > 0:
self.ses = nn.ModuleList()
for i in range(num_layers):
self.ses.append(nn.Sequential(nn.Linear(hidden_size*2, hidden_size*2//self.squeeze_excitation),
nn.ReLU(inplace=True),
nn.Linear(hidden_size*2//self.squeeze_excitation, hidden_size*2),
nn.Sigmoid()))
def forward(self, x, x_mask):
"""Can choose to either handle or ignore variable length sequences.
Always handle padding in eval.
"""
# No padding necessary.
if x_mask.data.sum() == 0:
return self._forward_unpadded(x, x_mask)
# Pad if we care or if its during eval.
# if (self.padding or not self.training) and not self.rnn_type == 'sru':
if self.padding and not self.rnn_type in self.SRU_TYPES:
return self._forward_padded(x, x_mask)
# We don't care.
return self._forward_unpadded(x, x_mask)
def _forward_unpadded(self, x, x_mask):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
x = x.transpose(0, 1)
lengths_var = Variable(x_mask.data.eq(0).long().sum(1).squeeze().float().unsqueeze(1), requires_grad=False)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0 and self.rnn_type not in self.SRU_TYPES:
rnn_input = dropout(rnn_input, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
if self.residual and rnn_output.size() == rnn_input.size():
rnn_output = rnn_output + outputs[-1]
if self.sd_min_size > 0:
bs, l, hs = rnn_output.size()
rnn_output = self.sds[i](rnn_output.view(bs, l, 2, hs//2)).view(bs, l, hs)
if self.squeeze_excitation > 0:
rnn_output = rnn_output * self.ses[i](rnn_output.sum(0) / lengths_var).unsqueeze(0)
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = dropout(output, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
return output
def _forward_padded(self, x, x_mask):
"""Slower (significantly), but more precise,
encoding that handles padding."""
# Compute sorted sequence lengths
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths_var = Variable(lengths[idx_sort].float().unsqueeze(1), requires_grad=False)
lengths = list(lengths[idx_sort])
idx_sort = Variable(idx_sort)
idx_unsort = Variable(idx_unsort)
# Sort x
x = x.index_select(0, idx_sort)
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Pack it up
# rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to input
if self.dropout_rate > 0:
rnn_input = dropout(rnn_input, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
rnn_input = nn.utils.rnn.pack_padded_sequence(rnn_input, lengths)
# if self.dropout_rate > 0:
# dropout_input = F.dropout(rnn_input.data,
# p=self.dropout_rate,
# training=self.training)
# rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
# rnn_input.batch_sizes)
rnn_output = self.rnns[i](rnn_input)[0]
rnn_output = nn.utils.rnn.pad_packed_sequence(rnn_output)[0]
if self.residual and rnn_output.size() == outputs[-1].size():
rnn_output = rnn_output + outputs[-1]
if self.sd_min_size > 0:
bs, l, hs = rnn_output.size()
rnn_output = self.sds[i](rnn_output.view(bs, l, 2, hs//2)).view(bs, l, hs)
if self.squeeze_excitation > 0:
rnn_output = rnn_output * self.ses[i](rnn_output.sum(0) / lengths_var).unsqueeze(0)
outputs.append(rnn_output)
# Unpack everything
# for i, o in enumerate(outputs[1:], 1):
# outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
# Concat hidden layers or take final
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose and unsort
output = output.transpose(0, 1)
output = output.index_select(0, idx_unsort)
# Pad up to original batch sequence length
if output.size(1) != x_mask.size(1):
padding = torch.zeros(output.size(0),
x_mask.size(1) - output.size(1),
output.size(2)).type(output.data.type())
output = torch.cat([output, Variable(padding)], 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
class DilatedResNet(nn.Module):
"""Dilated ResNet with GRU to replace BRNN."""
def __init__(self, input_size, hidden_size, num_layers,
dilation_layers=1, dilation_base=1, dilation_offset=0,
input_padding=0, masked=True,
growing_mode='block', # ['block', 'layer']
block_type='dilated_conv', # ['dilated_conv', 'dilated_sep_conv', 'sep_conv']
activation_type='glu', # ['glu', 'relu']
dropout_rate=0, dropout_output=False):
super(DilatedResNet, self).__init__()
# self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.input_padding = input_padding
# self.concat_layers = concat_layers
if activation_type == 'glu':
self.reduce_block = nn.Sequential(
nn.Conv1d(input_size, hidden_size*2, 3, padding=1 + input_padding),
nn.GLU(dim=1))
else:
self.reduce_block = nn.Sequential(
nn.Conv1d(input_size, hidden_size, 3, padding=1 + input_padding),
nn.ReLU(inplace=True))
self.cnns = nn.ModuleList()
self.masked = masked
assert num_layers % 2 == 1, 'num_layers=' + str(num_layers) +' is not odd'
for i in range(num_layers // 2):
if block_type == 'sep_conv':
if growing_mode == 'block':
kernel_size = 2 ** (i - dilation_offset + 2) - 1 if dilation_offset <= i < dilation_offset + dilation_layers else 3
kernel_size = (kernel_size, kernel_size)
elif growing_mode == 'layer':
kernel_size = [1, 1]
kernel_size[0] = 2 ** (2*i+2-dilation_offset) - 1 if dilation_offset <= (2*i+1) < dilation_offset + dilation_layers else 3
kernel_size[1] = 2 ** (2*i+3-dilation_offset) - 1 if dilation_offset <= (2*i+2) < dilation_offset + dilation_layers else 3
else:
raise NotImplementedError
dilation = 1
padding = (kernel_size[0] // 2, kernel_size[1] // 2)
elif block_type in {'dilated_conv', 'dilated_sep_conv'}:
if growing_mode == 'block':
dilation = dilation_base ** (i - dilation_offset + 1) if dilation_offset <= i < dilation_offset + dilation_layers else 1
elif growing_mode == 'layer':
dilation = [1, 1]
dilation[0] = dilation_base ** (2*i+1-dilation_offset) if dilation_offset <= (2*i+1) < dilation_offset + dilation_layers else 1
dilation[1] = dilation_base ** (2*i+2-dilation_offset) if dilation_offset <= (2*i+2) < dilation_offset + dilation_layers else 1
else:
raise NotImplementedError
padding = dilation
kernel_size = 3
else:
raise NotImplementedError
if block_type == 'dilated_conv':
Block = GLUResBlock
elif block_type in {'dilated_sep_conv', 'sep_conv'}:
Block = GLUResBlock_sep
else:
raise NotImplementedError
self.cnns.append(Block(hidden_size, hidden_size,
hidden_size, kernel_size=kernel_size,
padding=padding,
dilation=dilation,
dropout_rate=dropout_rate,
activation_type=activation_type))
def forward(self, x, x_mask=None):
# swap filter dim and sequence dim
if self.input_padding > 0 and self.masked and x_mask is not None:
x_mask = F.pad(x_mask.unsqueeze(1).unsqueeze(2), (self.input_padding, self.input_padding, 0, 0), 'constant', True)[:, 0, 0, :]
x = x.transpose(1, 2)
if self.dropout_output and self.dropout_rate > 0:
x = F.dropout(x, p=self.dropout_rate,
training=self.training)
x = self.reduce_block(x)
for cnn in self.cnns:
x = cnn(x, x_mask)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
x = F.dropout(x, p=self.dropout_rate,
training=self.training)
x = x.transpose(1, 2)
return x.contiguous()
class GLUResBlock(nn.Module):
'''GLU Res Block
input -> drop1 -> conv1 -> GLU1 -> drop2 -> conv2 -> GLU2 --> residual
add residual back to input
'''
def __init__(self, input_size, hidden_size, output_size, kernel_size=3,
padding=1, groups=1, dilation=1, dropout_rate=0, activation_type='glu'):
super(GLUResBlock, self).__init__()
if type(dilation) is int:
dilation = (dilation, dilation)
if type(kernel_size) is int:
kernel_size = (kernel_size, kernel_size)
if type(padding) is int:
padding = (padding, padding)
self.dropout_rate = dropout_rate
self.drop1 = nn.Dropout2d(dropout_rate)
self.activation_type = activation_type
if activation_type == 'glu':
self.conv1 = nn.Conv1d(input_size, hidden_size*2, kernel_size[0],
padding=padding[0], dilation=dilation[0])
self.act1 = nn.GLU(dim=1)
elif activation_type == 'relu':
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size[0],
padding=padding[0], dilation=dilation[0])
self.act1 = nn.ReLU(inplace=True)
self.drop2 = nn.Dropout2d(dropout_rate)
if activation_type == 'glu':
self.conv2 = nn.Conv1d(hidden_size, output_size*2, kernel_size[1],
padding=padding[1], dilation=dilation[1])
self.act2 = nn.GLU(dim=1)
elif activation_type == 'relu':
self.conv2 = nn.Conv1d(hidden_size, output_size, kernel_size[1],
padding=padding[1], dilation=dilation[1])
self.act2 = nn.ReLU(inplace=True)
def forward(self, x, x_mask=None, masked=True):
res = x
res = self.drop1(res.unsqueeze(3))[:, :, :, 0]
res = self.act1(self.conv1(x))
if masked and x_mask is not None:
res.masked_fill_(x_mask.unsqueeze(1), 0)
res = self.drop2(res.unsqueeze(3))[:, :, :, 0]
res = self.act2(self.conv2(x))
if masked and x_mask is not None:
res.masked_fill_(x_mask.unsqueeze(1), 0)
if x.size(1) == res.size(1):
x = x + res
elif x.size(1) > res.size(1):
res = res + x[:, :res.size(1)]
x = res
else:
x = x + res[:, :x.size(1)]
return x
class GLUResBlock_sep(nn.Module):
'''GLU Res Block
input -> drop1 -> conv1 -> GLU1 -> drop2 -> conv2 -> GLU2 --> residual
add residual back to input
'''
def __init__(self, input_size, hidden_size, output_size, kernel_size=3,
padding=1, groups=1, dilation=1, dropout_rate=0, activation_type='glu'):
super().__init__()
if type(dilation) is int:
dilation = (dilation, dilation)
if type(kernel_size) is int:
kernel_size = (kernel_size, kernel_size)
if type(padding) is int:
padding = (padding, padding)
self.dropout_rate = dropout_rate
self.drop1 = nn.Dropout2d(dropout_rate)
self.activation_type = activation_type
if activation_type == 'glu':
self.conv1_1 = nn.Conv1d(input_size, input_size, kernel_size[0],
groups=input_size, padding=padding[0], dilation=dilation[0])
self.conv1_2 = nn.Conv1d(input_size, hidden_size*2, 1)
self.act1 = nn.GLU(dim=1)
elif activation_type == 'relu':
self.conv1_1 = nn.Conv1d(input_size, input_size, kernel_size[0],
groups=input_size, padding=padding[0], dilation=dilation[0])
self.conv1_2 = nn.Conv1d(input_size, hidden_size, 1)
self.act1 = nn.ReLU(inplace=True)
self.drop2 = nn.Dropout2d(dropout_rate)
if activation_type == 'glu':
self.conv2_1 = nn.Conv1d(hidden_size, hidden_size, kernel_size[1],
groups=hidden_size, padding=padding[1], dilation=dilation[1])
self.conv2_2 = nn.Conv1d(hidden_size, output_size*2, 1)
self.act2 = nn.GLU(dim=1)
elif activation_type == 'relu':
self.conv2_1 = nn.Conv1d(hidden_size, hidden_size, kernel_size[1],
groups=hidden_size, padding=padding[1], dilation=dilation[1])
self.conv2_2 = nn.Conv1d(hidden_size, output_size, 1)
self.act2 = nn.ReLU(inplace=True)
def forward(self, x, x_mask=None, masked=True):
res = x
res = self.drop1(res.unsqueeze(3)).squeeze(3)
res = self.act1(self.conv1_2(self.conv1_1(x)))
if masked and x_mask is not None:
res.masked_fill_(x_mask.unsqueeze(1), 0)
res = self.drop2(res.unsqueeze(3)).squeeze(3)
res = self.act2(self.conv2_2(self.conv2_1(x)))
if masked and x_mask is not None:
res.masked_fill_(x_mask.unsqueeze(1), 0)
if x.size(1) == res.size(1):
x = x + res
elif x.size(1) > res.size(1):
res = res + x[:, :res.size(1)]
x = res
else:
x = x + res[:, :x.size(1)]
return x
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, variational_dropout=True,
concat_layers=False, output_act=True):
super(MLP, self).__init__()
self.dropout_rate = dropout_rate
self.variational_dropout = variational_dropout
self.num_layers = num_layers
self.concat_layers = concat_layers
self.linears = nn.ModuleList()
self.output_act = output_act
for i in range(num_layers):
input_size = input_size if i == 0 else hidden_size
self.linears.append(nn.Linear(input_size, hidden_size))
def forward(self, x):
original_size = x.size()
if len(original_size) == 3:
x = x.view(-1, original_size[2]).contiguous()
hiddens = []
for i in range(self.num_layers):
if self.dropout_rate > 0.:
x = dropout(x, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
if i < self.num_layers - 1 or self.output_act:
x = F.relu(self.linears[i](x), inplace=True)
hiddens.append(x)
if self.concat_layers:
x = torch.cat(hiddens, 2)
if len(original_size) == 3:
x = x.view(original_size[0], original_size[1], -1).contiguous()
return x
class SeqAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, hidden_size=None, identity=False, dropout=0., variational_dropout=False):
super(SeqAttnMatch, self).__init__()
self.dropout = dropout
self.variational_dropout = variational_dropout
if hidden_size is None:
hidden_size = input_size
if not identity:
self.linear = nn.Linear(input_size, hidden_size)
else:
self.linear = None
def forward(self, x, y, y_mask, scores_hook=None):
"""Input shapes:
x = batch * len1 * h
y = batch * len2 * h
y_mask = batch * len2
Output shapes:
matched_seq = batch * len1 * h
"""
if y.size(0) == 1 and x.size(0) > 1:
y = y.repeat(x.size(0), 1, 1)
y_mask = y_mask.repeat(x.size(0), 1)
elif x.size(0) == 1 and y.size(0) > 1:
x = x.repeat(y.size(0), 1, 1)
# Project vectors
if self.linear is not None:
batch_size = x.size(0)
len1 = x.size(1)
len2 = y.size(1)
x = dropout(x, p=self.dropout, training=self.training, variational=self.variational_dropout)
y = dropout(y, p=self.dropout, training=self.training, variational=self.variational_dropout)
x_proj = self.linear(x.view(-1, x.size(2))).view(batch_size, len1, -1)
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(batch_size, len2, -1)
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
# Compute scores
scores = x_proj.bmm(y_proj.transpose(2, 1))
if scores_hook is not None:
scores = scores_hook(scores)
# Mask padding
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.data.masked_fill_(y_mask.data, -float('inf'))
# Normalize with softmax
alpha_flat = F.softmax(scores.view(-1, y.size(1)), dim=1)
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
# Take weighted average
matched_seq = alpha.bmm(y)
return matched_seq
class BilinearSeqAttn(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False):
super(BilinearSeqAttn, self).__init__()
if not identity:
self.linear = nn.Linear(y_size, x_size)
else:
self.linear = None
def forward(self, x, y, x_mask, log=False, logit=False):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
if y.size(0) == 1 and x.size(0) > 1:
y = y.repeat(x.size(0), 1)
elif x.size(0) == 1 and y.size(0) > 1:
x = x.repeat(y.size(0), 1, 1)
x_mask = x_mask.repeat(y.size(0), 1)
Wy = self.linear(y) if self.linear is not None else y
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if logit:
return xWy
elif log:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=1)
return alpha
class LinearSeqAttn(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size):
super(LinearSeqAttn, self).__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, x, x_mask, log=False):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.contiguous().view(-1, x.size(-1))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
if log:
alpha = F.log_softmax(scores, dim=1)
else:
alpha = F.softmax(scores, dim=1)
return alpha
class RNNEncoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
variational_dropout=True, aux_size=0):
super(RNNEncoder, self).__init__()
self.variational_dropout = variational_dropout
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.rnns = nn.ModuleList()
for i in range(num_layers):
input_size_ = (input_size + 2 * hidden_size * i)
if i == 0: input_size_ += aux_size
self.rnns.append(rnn_type(input_size_, hidden_size, num_layers=1, bidirectional=True))
def forward(self, x, x_mask, aux_input=None):
# Transpose batch and sequence dims
x = x.transpose(0, 1)
if aux_input is not None:
aux_input = aux_input.transpose(0, 1)
# Encode all layers
hiddens = [x]
for i in range(self.num_layers):
rnn_input = torch.cat(hiddens, 2)
if i == 0 and aux_input is not None:
rnn_input = torch.cat([rnn_input, aux_input], 2)
# Apply dropout to input
if self.dropout_rate > 0:
rnn_input = dropout(rnn_input, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
hiddens.append(rnn_output)
# Transpose back
hiddens = [h.transpose(0, 1) for h in hiddens]
return hiddens[1:]
class MTLSTM(nn.Module):
def __init__(self, opt, embedding=None, padding_idx=0, with_emb=True):
"""Initialize an MTLSTM
Arguments:
embedding (Float Tensor): If not None, initialize embedding matrix with specified embedding vectors
"""
super(MTLSTM, self).__init__()
self.embedding = nn.Embedding(opt['vocab_size'], opt['embedding_dim'], padding_idx=padding_idx)
if embedding is not None:
self.embedding.weight.data = embedding
state_dict = torch.load(opt['MTLSTM_path'], map_location=lambda storage, loc: storage)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, nn.Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, nn.Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.embedding.parameters():
p.requires_grad = False
for p in self.rnn1.parameters():
p.requires_grad = False
for p in self.rnn2.parameters():
p.requires_grad = False
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
"""Allow evaluation vocabulary size to be greater than training vocabulary size
Arguments:
eval_embed (Float Tensor): Initialize eval_embed to be the specified embedding vectors
"""
self.eval_embed = nn.Embedding(eval_embed.size(0), eval_embed.size(1), padding_idx = padding_idx)
self.eval_embed.weight.data = eval_embed
for p in self.eval_embed.parameters():
p.requires_grad = False
def forward(self, x_idx, x_mask):
"""A pretrained MT-LSTM (McCann et. al. 2017).
This LSTM was trained with 300d 840B GloVe on the WMT 2017 machine translation dataset.
Arguments:
x_idx (Long Tensor): a Long Tensor of size (batch * len).
x_mask (Byte Tensor): a Byte Tensor of mask for the input tensor (batch * len).
"""
# emb = self.embedding if self.training else self.eval_embed
emb = self.embedding
x_hiddens = emb(x_idx)
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(nn.utils.rnn.pack_padded_sequence(x_hiddens[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = nn.utils.rnn.pad_packed_sequence(output1, batch_first=True)[0]
output2 = nn.utils.rnn.pad_packed_sequence(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
# Attention layer
class FullAttention(nn.Module):
def __init__(self, full_size, hidden_size, num_level, dropout=0., variational_dropout=True):
super(FullAttention, self).__init__()
assert(hidden_size % num_level == 0)
self.full_size = full_size
self.hidden_size = hidden_size
self.attsize_per_lvl = hidden_size // num_level
self.num_level = num_level
self.linear = nn.Linear(full_size, hidden_size, bias=False)
self.linear_final = nn.Parameter(torch.ones(1, hidden_size), requires_grad = True)
self.output_size = hidden_size
self.dropout = dropout
self.variational_dropout = variational_dropout
# print("Full Attention: (atten. {} -> {}, take {}) x {}".format(self.full_size, self.attsize_per_lvl, hidden_size // num_level, self.num_level))
def forward(self, x1_att, x2_att, x2, x2_mask):
"""
x1_att: batch * len1 * full_size
x2_att: batch * len2 * full_size
x2: batch * len2 * hidden_size
x2_mask: batch * len2
"""
if x1_att.size(0) == 1 and x2_att.size(0) > 1:
x1_att = x1_att.repeat(x2_att.size(0), 1, 1)
elif x2_att.size(0) == 1 and x1_att.size(0) > 1:
x2_att = x2_att.repeat(x1_att.size(0), 1, 1)
x2 = x2.repeat(x1_att.size(0), 1, 1)
x2_mask = x2_mask.repeat(x1_att.size(0), 1)
batch_size = x1_att.size(0)
len1 = x1_att.size(1)
len2 = x2_att.size(1)
x1_att = dropout(x1_att, p=self.dropout, training=self.training, variational=self.variational_dropout)
x2_att = dropout(x2_att, p=self.dropout, training=self.training, variational=self.variational_dropout)
x1_key = F.relu(self.linear(x1_att.view(-1, self.full_size)))
x2_key = F.relu(self.linear(x2_att.view(-1, self.full_size)))
final_v = self.linear_final.expand_as(x2_key)
x2_key = final_v * x2_key
x1_rep = x1_key.view(-1, len1, self.num_level, self.attsize_per_lvl).transpose(1, 2).contiguous().view(-1, len1, self.attsize_per_lvl)
x2_rep = x2_key.view(-1, len2, self.num_level, self.attsize_per_lvl).transpose(1, 2).contiguous().view(-1, len2, self.attsize_per_lvl)
scores = x1_rep.bmm(x2_rep.transpose(1, 2)).view(-1, self.num_level, len1, len2) # batch * num_level * len1 * len2
x2_mask = x2_mask.unsqueeze(1).unsqueeze(2).expand_as(scores)
scores.data.masked_fill_(x2_mask.data, -float('inf'))
alpha_flat = F.softmax(scores.view(-1, len2), dim=1)
alpha = alpha_flat.view(-1, len1, len2)
# alpha = F.softmax(scores, dim=2)
size_per_level = self.hidden_size // self.num_level
atten_seq = alpha.bmm(x2.contiguous().view(-1, x2.size(1), self.num_level, size_per_level).transpose(1, 2).contiguous().view(-1, x2.size(1), size_per_level))
return atten_seq.view(-1, self.num_level, len1, size_per_level).transpose(1, 2).contiguous().view(-1, len1, self.hidden_size)
def __repr__(self):
return "FullAttention: (atten. {} -> {}, take {}) x {}".format(self.full_size, self.attsize_per_lvl, self.hidden_size // self.num_level, self.num_level)
# For summarizing a set of vectors into a single vector
class LinearSelfAttn(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size):
super(LinearSelfAttn, self).__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, x, x_mask):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x = dropout(x, p=my_dropout_p, training=self.training)
x_flat = x.contiguous().view(-1, x.size(-1))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores, dim=1)
return alpha
class BiAttn(nn.Module):
""" Bi-Directonal Attention from https://arxiv.org/abs/1611.01603 """
def __init__(self, input_size, q2c: bool=True, query_dots: bool=True):
super(BiAttn, self).__init__()
self.input_size = input_size
self.q2c = q2c
self.query_dots = query_dots
self.w_x = nn.Parameter(torch.Tensor(input_size, 1))
self.w_y = nn.Parameter(torch.Tensor(input_size, 1))
self.w_dot = nn.Parameter(torch.Tensor(input_size, 1))
self.bias = nn.Parameter(torch.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform(self.w_x.data)
nn.init.kaiming_uniform(self.w_y.data)
nn.init.kaiming_uniform(self.w_dot.data)
self.bias.data.zero_()
def forward(self, x, y, x_mask=None, y_mask=None, raw_score_only=False):
"""
Args:
x: batch * len1 * hdim (context)
y: batch * len2 * hdim (query)
x_mask: batch * len1 (1 for padding, 0 for true)
y_mask: batch * len2 (1 for padding, 0 for true)
Output:
if raw_score_only:
scores: batch * len1 * len2
else:
matched_seq: batch * len1 * hdim
"""
batch_size = x.size(0)
len1 = x.size(1)
len2 = y.size(1)
# get the scores
x_ext = x.unsqueeze(2)
y_ext = y.unsqueeze(1)
try:
xy = x_ext * y_ext
except:
print('x_ext:', x_ext.size())
print('y_ext:', y_ext.size())
import time
time.sleep(10)
scores = self.bias.view(1, 1, 1) + \
x.contiguous().view(-1, self.input_size).mm(self.w_x).view(batch_size, len1, 1) + \
y.contiguous().view(-1, self.input_size).mm(self.w_y).view(batch_size, 1, len2) + \
xy.view(-1, self.input_size).mm(self.w_dot).view(batch_size, len1, len2)
# fill the padding part with -inf
if x_mask is not None:
scores = maskneginf(scores, x_mask.unsqueeze(2))
if y_mask is not None:
scores = maskneginf(scores, y_mask.unsqueeze(1))
if raw_score_only:
return scores
# context-to-query
alpha = F.softmax(scores, dim=2)
# replacing NaN with zeros (Softmax is numerically unstable)
# no ideas how to avoid it yet
alpha.data[alpha.data != alpha.data] = 0.
c2q_attn = alpha.bmm(y)
if x_mask is not None:
c2q_attn = maskzero(c2q_attn, x_mask.unsqueeze(2))
outputs = [c2q_attn]
# query-to-context
if self.q2c:
beta = F.softmax(scores.max(2)[0], dim=1)
q2c_attn = beta.unsqueeze(1).bmm(x)
outputs.append(q2c_attn)
if self.query_dots:
outputs.append(x * c2q_attn)
return outputs
def __repr__(self):
return '{}(input_size={}, q2c={}, query_dots={})'.format(
self.__class__.__name__, self.input_size, self.q2c, self.query_dots)
class Linear(nn.Module):
''' Simple Linear layer with xavier init '''
def __init__(self, d_in, d_out, bias=True):
super(Linear, self).__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
init.xavier_normal(self.linear.weight)
def forward(self, x):
return self.linear(x)
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, d_model, attn_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.temper = d_model ** 0.5
self.dropout = nn.Dropout(attn_dropout)
self.softmax = BottleSoftmax(dim=-1)
def forward(self, q, k, v, attn_mask=None):
attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
if attn_mask is not None:
# assert attn_mask.size() == attn.size(), \
# 'Attention mask shape {} mismatch ' \
# 'with Attention logit tensor shape ' \
# '{}.'.format(attn_mask.size(), attn.size())
# print(attn.size(), attn_mask.size())
attn_mask = attn_mask.unsqueeze(1).expand_as(attn)
attn.data.masked_fill_(attn_mask.data, -float('inf'))
attn = F.softmax(attn, dim=-1)
attn.data[attn.data != attn.data] = 0.
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class Highway(nn.Module):
def __init__(self, input_size, hidden_size=128, num_layers=2, dropout_rate=0.):
super(Highway, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.reduction = (input_size != hidden_size)
self.dropout_rate = dropout_rate
if self.input_size != self.hidden_size:
self.reduction = nn.Linear(input_size, hidden_size)
self.highway_layers = nn.ModuleList()
for i in range(num_layers):
self.highway_layers.append(nn.Linear(hidden_size, hidden_size*2))
def forward(self, x, x_mask=None):
ndim = x.dim()
if ndim == 3:
batch_size = x.size(0)
x_len = x.size(1)
x = x.view(-1, x.size(2))
if self.input_size != self.hidden_size:
x = self.reduction(x)
for layer in self.highway_layers:
x_trans = layer(F.dropout(x, self.dropout_rate, training=self.training))
gate = F.sigmoid(x_trans[:, self.hidden_size:])
x_trans = F.relu(x_trans[:, :self.hidden_size])
x = x * (1 - gate) + x_trans * gate
if ndim == 3:
x = x.view(batch_size, x_len, -1)
return x
class Bottle(nn.Module):
''' Perform the reshape routine before and after an operation '''
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super().forward(input.view(size[0]*size[1], -1))
return out.view(size[0], size[1], -1)
class BottleLinear(Bottle, Linear):
''' Perform the reshape routine before and after a linear projection '''
pass
class BottleSoftmax(Bottle, nn.Softmax):
''' Perform the reshape routine before and after a softmax operation'''
pass
# borrowed from https://github.com/jadore801120/attention-is-all-you-need-pytorch.git
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, attn_dropout=0.0, input_layer_norm=False):
super(MultiHeadAttention, self).__init__()
if input_layer_norm:
self.layer_norm = LayerNorm(d_model)
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))
self.w_ks = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k))
self.w_vs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_v))
self.attention = ScaledDotProductAttention(d_k, attn_dropout=attn_dropout)
# if n_head * d_v != d_model:
self.proj = BottleLinear(n_head*d_v, d_model)
# self.dropout = nn.Dropout(dropout)
init.xavier_normal(self.w_qs)
init.xavier_normal(self.w_ks)
init.xavier_normal(self.w_vs)
def forward(self, q, attn_mask=None):
'''only supports self-attn'''
if hasattr(self, 'layer_norm'):
q = self.layer_norm(q)
d_k, d_v = self.d_k, self.d_v
n_head = self.n_head
mb_size, len_q, d_model = q.size()
# mb_size, len_k, d_model = k.size()
# mb_size, len_v, d_model = v.size()
len_k = len_q
len_v = len_q
# treat as a (n_head) size batch
q_s = q.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_q) x d_model
# k_s = k.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_k) x d_model
# v_s = v.repeat(n_head, 1, 1).view(n_head, -1, d_model) # n_head x (mb_size*len_v) x d_model
k_s = q_s
v_s = q_s
# treat the result as a (n_head * mb_size) size batch
q_s = torch.bmm(q_s, self.w_qs).view(-1, len_q, d_k) # (n_head*mb_size) x len_q x d_k
k_s = torch.bmm(k_s, self.w_ks).view(-1, len_k, d_k) # (n_head*mb_size) x len_k x d_k
v_s = torch.bmm(v_s, self.w_vs).view(-1, len_v, d_v) # (n_head*mb_size) x len_v x d_v
# perform attention, result size = (n_head * mb_size) x len_q x d_v
if attn_mask is not None:
attn_mask = attn_mask.repeat(n_head, 1)
outputs, attns = self.attention(q_s, k_s, v_s, attn_mask=attn_mask)
# back to original mb_size batch, result size = mb_size x len_q x (n_head*d_v)
outputs = torch.cat(torch.split(outputs, mb_size, dim=0), dim=-1)
# project back to residual size
if hasattr(self, 'proj'):
outputs = self.proj(outputs)
# outputs = self.dropout(outputs)
return outputs
class GBEncoderBlock(nn.Module):
'''Encoder of the Google Brain paper (QANet or AdamsNet)'''
# TODO: dropout, layer dropout
def __init__(self, hidden_size=128, kernel_size=7, num_layers=4, dropout_rate=0., variational_dropout=True, depth_drop=0., depth_drop_start=0, depth_drop_end=None, add_pos=True):
'''assuming input_size == hidden_size'''
super(GBEncoderBlock, self).__init__()
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.variational_dropout = variational_dropout
self.depth_drop = depth_drop
self.depth_drop_start = depth_drop_start
self.depth_drop_end = num_layers if depth_drop_end is None else depth_drop_end
self.cnns = nn.ModuleList()
for i in range(num_layers):
# no activation?
self.cnns.append(nn.Sequential(
LayerNormChannelFirst(hidden_size),
nn.Conv1d(hidden_size, hidden_size, kernel_size, padding=kernel_size//2, groups=hidden_size),
nn.Conv1d(hidden_size, hidden_size, 1),
nn.ReLU(True)
))
self.self_attn = MultiHeadAttention(8, hidden_size, hidden_size//8, hidden_size//8, input_layer_norm=True)
self.ffn = nn.Sequential(
LayerNorm(hidden_size),
BottleLinear(hidden_size, hidden_size*4),
nn.ReLU(True),
BottleLinear(hidden_size*4, hidden_size),
)
# add position embeding to the first block
if add_pos:
self.set_pos_emb(2000)
def set_pos_emb(self, l):
self.pos_emb = nn.Parameter(get_position_encoding(self.hidden_size, [l]).unsqueeze_(0))
self.pos_emb.requires_grad = True
def forward(self, x, x_mask=None):
"""
TODO add x_mask
x = batch * len * hidden_size
"""
batch_size = x.size(0)
x_len = x.size(1)
drop_i = self.depth_drop_start
if hasattr(self, 'pos_emb'):
if x_len > self.pos_emb.size(1):
self.set_pos_emb(x_len + 200)
x = x + self.pos_emb[:, :x_len, :]
# if x_mask is not None:
# print('u1:', x.data[0].sum(1))
# maskzero(x, x_mask.unsqueeze(2))
# print('u2:', x.data[0].sum(1))
x = x.transpose(1, 2)
for cnn in self.cnns:
drop_i += 1
depth_drop_prob = self.depth_drop * drop_i / self.depth_drop_end
if self.depth_drop <= 0. or torch.rand(1)[0] > depth_drop_prob:
x_drop = dropout(x.transpose(1,2), p=self.dropout_rate, training=self.training,
variational=self.variational_dropout).transpose(1, 2)
residual = cnn(x_drop)
if self.training and self.depth_drop > 0.:
residual = residual / (1 - depth_drop_prob)
x = x + residual
# if x_mask is not None:
# maskzero(x, x_mask.unsqueeze(1))
x = x.transpose(1, 2)
# print('t1:', x.data.sum())
drop_i += 1
depth_drop_prob = self.depth_drop * drop_i / self.depth_drop_end
if self.depth_drop <= 0. or torch.rand(1)[0] > depth_drop_prob:
x_drop = dropout(x, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
residual = self.self_attn(x_drop, x_mask)
if self.training and self.depth_drop > 0.:
residual = residual / (1 - depth_drop_prob)
x = x + residual
# print('t2:', x.data.sum())
drop_i += 1
depth_drop_prob = self.depth_drop * drop_i / self.depth_drop_end
if self.depth_drop <= 0. or torch.rand(1)[0] > depth_drop_prob:
x_drop = dropout(x, p=self.dropout_rate, training=self.training,
variational=self.variational_dropout)
residual = self.ffn(x_drop)
if self.training and self.depth_drop > 0.:
residual = residual / (1 - depth_drop_prob)
x = x + residual
# if x_mask is not None:
# maskzero(x, x_mask.unsqueeze(2))
return x
def get_position_encoding(emb_size, lengths, min_timescale=1.0, max_timescale=1.0e4):
'''
create position embeding of size len1 (x len2 x len3 ...) x emb_size
reference: https://github.com/tensorflow/tensor2tensor/blob/8bdecbe434d93cb1e79c0489df20fee2d5a37dc2/tensor2tensor/layers/common_attention.py#L503
'''
num_dims = len(lengths)
num_timescales = emb_size // (num_dims * 2)
log_timescale_increment = (math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1))
inv_timescales = min_timescale * (torch.arange(num_timescales) * -log_timescale_increment).exp()
inv_timescales.unsqueeze_(0)
x = None
for dim, length in enumerate(lengths):
position = torch.arange(length).unsqueeze_(1)
scaled_time = position * inv_timescales
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
for _ in range(dim):
signal.unsqueeze_(0)
for _ in range(num_dims - 1 - dim):
signal.unsqueeze_(-2)
x = signal if x is None else x + signal
return x
# ------------------------------------------------------------------------------
# Functional
# ------------------------------------------------------------------------------
def uniform_weights(x, x_mask):
"""Return uniform weights over non-masked input."""
alpha = Variable(torch.ones(x.size(0), x.size(1)))
if x.data.is_cuda:
alpha = alpha.cuda()
alpha = alpha * x_mask.eq(0).float()
alpha = alpha / alpha.sum(1).expand(alpha.size())
return alpha
def weighted_avg(x, weights):
"""x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1)
class MaskNegInf(InplaceFunction):
@staticmethod
def forward(ctx, input, mask=None):
ctx.save_for_backward(mask)
if mask is not None:
input.masked_fill_(mask.expand_as(input), -float('inf'))
return input
@staticmethod
def backward(ctx, grad_output):
mask = ctx.saved_variables[0]
if mask is not None:
grad_output.masked_fill_(mask.expand_as(grad_output), 0)
return grad_output, None
class MaskZero(InplaceFunction):
@staticmethod
def forward(ctx, input, mask=None):
ctx.save_for_backward(mask)
if mask is not None:
input.masked_fill_(mask.expand_as(input), 0)
return input
@staticmethod
def backward(ctx, grad_output):
print('go:', grad_output.sum())
mask = ctx.saved_variables[0]
if mask is not None:
grad_output.masked_fill_(mask.expand_as(grad_output), 0)
return grad_output, None
def maskneginf(input, mask):
return MaskNegInf.apply(input, mask)
def maskzero(input, mask):
return MaskZero.apply(input, mask)
def split_sentences(x, sentence_lens):
assert x.size(0) == len(sentence_lens)
ndim = x.dim()
if ndim == 2:
x = x.unsqueeze(-1)
x = x.transpose(1, 2)
sentences = []
max_sentence_len = max(l for s in sentence_lens for l in s)
for i, lens in enumerate(sentence_lens):
pos = 0
for l in lens:
sentences.append(F.pad(x[i, :, pos:pos+l], (0, max_sentence_len - l)).transpose(0, 1))
pos += l
if ndim == 2:
return torch.stack(sentences, 0).squeeze_(-1)
else:
return torch.stack(sentences, 0)
def combine_sentences(x, sentence_lens):
ndim = x.dim()
if ndim == 2:
x = x.unsqueeze(-1)
docs = []
max_doc_len = max(sum(s) for s in sentence_lens)
sent_id = 0
zeros = Variable(x.data.new(max_doc_len, x.size(2)).zero_(), requires_grad=False)
for i, lens in enumerate(sentence_lens):
doc = []
doc_len = sum(lens)
for l in lens:
doc.append(x[sent_id, :l])
sent_id += 1
if doc_len < max_doc_len:
doc.append(zeros[:max_doc_len-doc_len])
doc = torch.cat(doc, 0)
docs.append(doc)
if ndim == 2:
return torch.stack(docs, 0).squeeze(-1)
else:
return torch.stack(docs, 0)
def duplicate_for_sentences(x, sentence_lens):
if not isinstance(x, Variable):
x = Variable(x)
assert x.size(0) == len(sentence_lens)
ndim = x.dim()
if ndim == 2:
x = x.unsqueeze(-1)
duplicated = []
for i, lens in enumerate(sentence_lens):
duplicated.append(x[i:i+1].repeat(len(lens), 1, 1))
if ndim == 2:
return torch.cat(duplicated, 0).squeeze_(-1)
else:
return torch.cat(duplicated, 0)
def reduce_for_sentences(x, sentence_lens):
ndim = x.dim()
if ndim == 2:
x = x.unsqueeze(-1)
reduced = []
offset = 0
for i, lens in enumerate(sentence_lens):
reduced.append(x[offset])
offset += len(lens)
if ndim == 2:
return torch.stack(reduced, 0).squeeze_(-1)
else:
return torch.stack(reduced, 0)
def replace_nan_grad_hook(grad):
grad.data.masked_fill_(grad.data != grad.data, 0)
return grad
def print_hook(name):
def hook(grad):
print('{}: {}/{}'.format(name, (grad.data != grad.data).sum(), grad.data.numel()))
return hook
# https://github.com/pytorch/pytorch/issues/2591
def logsumexp(x, dim=None, keepdim=False):
if dim is None:
x, dim = x.view(-1), 0
xm, _ = torch.max(x, dim, keepdim=True)
# x = my_where(
# (xm == float('inf')) | (xm == float('-inf')),
# xm,
# xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True)))
# return x if keepdim else x.squeeze(dim)
output = xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True))
return output if keepdim else output.squeeze(dim)
# https://github.com/pytorch/pytorch/issues/2591 implementation of torch.where in pytorch v3
def my_where(cond, xt, xf):
ret = torch.zeros_like(xt)
ret[cond] = xt[cond]
ret[cond ^ 1] = xf[cond ^ 1]
return ret
| 57,196 | 37.989093 | 182 | py |
FastFusionNet | FastFusionNet-master/qa/rnn_reader.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
#
# Modified by Felix Wu: adding RCModelProto, CnnDocReader, FusionNet, BiDAF
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import layers
from typing import IO, List, Iterable, Tuple
from qa.encoder import *
class RCModelProto(nn.Module):
'''prototype of the reading comprehension models'''
def __init__(self, opt, padding_idx=0, embedding=None):
super().__init__()
# Store config
self.opt = opt
self.setup_emb_modules(padding_idx, embedding)
def setup_emb_modules(self, padding_idx=0, embedding=None):
opt = self.opt
# Word embeddings
self.paired_input_size = opt['embedding_dim']
if opt['pretrained_words']:
assert embedding is not None
self.embedding = nn.Embedding(embedding.size(0),
embedding.size(1),
padding_idx=padding_idx)
self.embedding.weight.data[2:, :] = embedding[2:, :]
if opt['fix_embeddings']:
assert opt['tune_partial'] == 0
for p in self.embedding.parameters():
p.requires_grad = False
elif opt['tune_partial'] > 0:
assert opt['tune_partial'] + 2 < embedding.size(0)
fixed_embedding = embedding[opt['tune_partial'] + 2:]
self.register_buffer('fixed_embedding', fixed_embedding)
self.fixed_embedding = fixed_embedding
else: # random initialized
self.embedding = nn.Embedding(opt['vocab_size'],
opt['embedding_dim'],
padding_idx=padding_idx)
# Character embeddings
if opt['use_max_char_emb']:
self.max_char_emb = nn.Embedding(301, opt['max_char_emb_size'], padding_idx=260)
self.paired_input_size += opt['max_char_emb_size']
# Contextualized embeddings
if opt['use_cove']:
self.CoVe = layers.MTLSTM(opt, embedding, padding_idx=padding_idx)
self.paired_input_size += self.CoVe.output_size
# Input size to RNN: word emb + question emb + manual features
doc_input_size = self.paired_input_size + opt['num_features']
question_input_size = self.paired_input_size
if opt['use_feat_emb']:
if opt['pos']:
doc_input_size += opt['pos_dim']
self.pos_embedding = nn.Embedding(opt['pos_size'], opt['pos_dim'])
if opt['ner']:
doc_input_size += opt['ner_dim']
self.ner_embedding = nn.Embedding(opt['ner_size'], opt['ner_dim'])
else:
if opt['pos']:
doc_input_size += opt['pos_size']
if opt['ner']:
doc_input_size += opt['ner_size']
# Projection for attention weighted question
if opt['use_qemb']:
self.qemb_match = layers.SeqAttnMatch(opt['embedding_dim'],
dropout=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
)
# self.qemb_match = layers.FullAttention(
# full_size=opt['embedding_dim'],
# hidden_size=opt['embedding_dim'],
# num_level=1,
# dropout=opt['dropout_rnn'],
# variational_dropout=opt['variational_dropout'],
# )
doc_input_size += opt['embedding_dim']
if opt['use_demb']:
self.demb_match = layers.SeqAttnMatch(opt['embedding_dim'],
dropout=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
)
# self.demb_match = layers.FullAttention(
# full_size=opt['embedding_dim'],
# hidden_size=opt['embedding_dim'],
# num_level=1,
# dropout=opt['dropout_rnn'],
# variational_dropout=opt['variational_dropout'],
# )
question_input_size += opt['embedding_dim']
if opt['use_qemb'] and opt['use_demb']:
self.paired_input_size += opt['embedding_dim']
self.doc_input_size = doc_input_size
self.question_input_size = question_input_size
def forward_emb(self, x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char=None, x2_char=None):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_pos = document POS tags [batch * len_d]
x1_ner = document entity tags [batch * len_d]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
x1_char = document char indices [batch * len_d * nchar]
x2_char = question char indices [batch * len_q * nchar]
"""
if self.training and self.opt.get('word_dropout_c', 0.) > 0.:
dropout_mask = torch.rand(x1.size()) < self.opt.get('word_dropout_c', 0.)
if x1.is_cuda:
dropout_mask = dropout_mask.cuda()
x1.data.masked_fill_(dropout_mask, 1)
if self.training and self.opt.get('word_dropout_q', 0.) > 0.:
dropout_mask = torch.rand(x2.size()) < self.opt.get('word_dropout_q', 0.)
if x1.is_cuda:
dropout_mask = dropout_mask.cuda()
x2.data.masked_fill_(dropout_mask, 1)
def dropout(x, p=self.opt['dropout_rnn']):
return layers.dropout(x, p=p,
training=self.training, variational=self.opt['variational_dropout'] and x.dim() == 3)
feat_dict = {}
x1_all_list, x2_all_list = [], []
x1_paired_list, x2_paired_list = [], []
if x1_f is not None:
x1_all_list.append(x1_f)
# Embed both document and question
if self.opt['use_word_emb'] or self.opt['use_qemb'] or self.opt['use_demb']:
x1_emb = self.embedding(x1)
x2_emb = self.embedding(x2)
if self.opt['dropout_emb'] > 0:
x1_emb = dropout(x1_emb, self.opt['dropout_emb'])
x2_emb = dropout(x2_emb, self.opt['dropout_emb'])
feat_dict['x1_emb'] = x1_emb
feat_dict['x2_emb'] = x2_emb
if self.opt['use_word_emb']:
x1_all_list.append(x1_emb)
x2_all_list.append(x2_emb)
x1_paired_list.append(x1_emb)
x2_paired_list.append(x2_emb)
if self.opt['use_max_char_emb']:
bs = x1_char.size(0)
char_len = self.opt['max_char_emb_max_len']
char_dim = self.opt['max_char_emb_size']
x1_max_char_emb = self.max_char_emb(x1_char[:, :, :char_len].contiguous().view(-1, char_len)).view(bs, -1, char_len, char_dim).max(2)[0]
x2_max_char_emb = self.max_char_emb(x2_char[:, :, :char_len].contiguous().view(-1, char_len)).view(bs, -1, char_len, char_dim).max(2)[0]
if self.opt['dropout_emb'] > 0:
x1_max_char_emb = dropout(x1_max_char_emb, self.opt['dropout_emb'])
x2_max_char_emb = dropout(x2_max_char_emb, self.opt['dropout_emb'])
x1_all_list.append(x1_max_char_emb)
x2_all_list.append(x2_max_char_emb)
x1_paired_list.append(x1_max_char_emb)
x2_paired_list.append(x2_max_char_emb)
# Contextualized embeddings
if self.opt['use_cove']:
_, x1_cove = self.CoVe(x1, x1_mask)
_, x2_cove = self.CoVe(x2, x2_mask)
if self.opt['dropout_emb'] > 0:
x1_cove = dropout(x1_cove, self.opt['dropout_emb'])
x2_cove = dropout(x2_cove, self.opt['dropout_emb'])
x1_all_list.append(x1_cove)
x2_all_list.append(x2_cove)
x1_paired_list.append(x1_cove)
x2_paired_list.append(x2_cove)
feat_dict['x1_cove'] = x1_cove
feat_dict['x2_cove'] = x2_cove
if self.opt['use_feat_emb']:
if self.opt['pos']:
x1_pos_emb = self.pos_embedding(x1_pos)
x1_all_list.append(x1_pos_emb)
feat_dict['x1_pos_emb'] = x1_pos_emb
if self.opt['ner']:
x1_ner_emb = self.ner_embedding(x1_ner)
x1_all_list.append(x1_ner_emb)
feat_dict['x1_ner_emb'] = x1_ner_emb
else:
if self.opt['pos']:
x1_all_list.append(x1_pos)
feat_dict['x1_pos'] = x1_pos
if self.opt['ner']:
x1_all_list.append(x1_ner)
feat_dict['x1_ner'] = x1_ner
# Add attention-weighted question representation (word level fusion)
if self.opt['use_qemb']:
x1_qemb = self.qemb_match(x1_emb, x2_emb, x2_mask)
# x1_qemb = self.qemb_match(x1_emb, x2_emb, x2_emb, x2_mask)
x1_all_list.append(x1_qemb)
feat_dict['x1_qemb'] = x1_qemb
if self.opt['use_demb']:
x2_demb = self.demb_match(x2_emb, x1_emb, x1_mask)
# x2_demb = self.demb_match(x2_emb, x1_emb, x1_emb, x1_mask)
x2_all_list.append(x2_demb)
feat_dict['x2_demb'] = x2_demb
if self.opt['use_qemb'] and self.opt['use_demb']:
x1_paired_list.append(x1_qemb)
x2_paired_list.append(x2_demb)
x1_paired_emb = layers.maskzero(torch.cat(x1_paired_list, 2), x1_mask.unsqueeze(-1))
x2_paired_emb = layers.maskzero(torch.cat(x2_paired_list, 2), x2_mask.unsqueeze(-1))
x1_full_emb = layers.maskzero(torch.cat(x1_all_list, 2), x1_mask.unsqueeze(-1))
x2_full_emb = layers.maskzero(torch.cat(x2_all_list, 2), x2_mask.unsqueeze(-1))
return x1_paired_emb, x2_paired_emb, x1_full_emb, x2_full_emb, feat_dict
def forward(self, x1, x1_f, x0_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char=None, x2_char=None):
raise NotImplementedError
class RnnDocReader(RCModelProto):
"""Network for the Document Reader module of DrQA."""
def __init__(self, opt, padding_idx=0, embedding=None):
super().__init__(opt, padding_idx, embedding)
# RNN document encoder
self.doc_rnn = layers.StackedBRNN(
input_size=self.doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
variational_dropout=opt['variational_dropout'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
# RNN question encoder
self.question_rnn = layers.StackedBRNN(
input_size=self.question_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['question_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
variational_dropout=opt['variational_dropout'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
# Output sizes of rnn encoders
doc_hidden_size = 2 * opt['hidden_size']
question_hidden_size = 2 * opt['hidden_size']
if opt['concat_rnn_layers']:
doc_hidden_size *= opt['doc_layers']
question_hidden_size *= opt['question_layers']
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('question_merge = %s' % opt['question_merge'])
if opt['question_merge'] == 'self_attn':
self.self_attn = layers.LinearSeqAttn(question_hidden_size)
# Bilinear attention for span start/end
self.start_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
if opt['end_gru']:
self.end_gru = nn.GRUCell(doc_hidden_size, question_hidden_size)
self.end_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
def forward(self, x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char=None, x2_char=None, logit=False):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_pos = document POS tags [batch * len_d]
x1_ner = document entity tags [batch * len_d]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
# Embed both document and question
x1_paired_emb, x2_paired_emb, x1_full_emb, x2_full_emb, feat_dict = self.forward_emb(x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char, x2_char)
if self.opt['sentence_level']:
x1_mask_backup = x1_mask
x1_full_emb = layers.split_sentences(x1_full_emb, sent_lens)
x1_mask = layers.split_sentences(x1_mask.unsqueeze(-1), sent_lens).select(2, 0)
# print('after:', x1_full_emb.size())
# print('x1_mask:', x1_mask.size())
# print(x1_full_emb.data.type())
# print(x1_mask.data.type())
# print(sent_lens)
# assert False
# Encode document with RNN
doc_hiddens = self.doc_rnn(x1_full_emb, x1_mask)
if self.opt['sentence_level']:
x1_mask = x1_mask_backup
doc_hiddens = layers.combine_sentences(doc_hiddens, sent_lens)
# Encode question with RNN + merge hiddens
question_hiddens = self.question_rnn(x2_full_emb, x2_mask)
if self.opt['question_merge'] == 'avg':
q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
elif self.opt['question_merge'] == 'self_attn':
q_merge_weights = self.self_attn(question_hiddens, x2_mask)
question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
# Predict start and end positions
start_logits = self.start_attn(doc_hiddens, question_hidden, x1_mask, logit=True)
start_scores = F.log_softmax(start_logits, 1) if self.training else F.softmax(start_logits, 1)
if self.opt['end_gru']:
weights = start_scores.exp() if self.training else start_scores
weighted_doc_hidden = layers.weighted_avg(doc_hiddens, weights)
question_v_hidden = self.end_gru(weighted_doc_hidden, question_hidden)
end_logits = self.end_attn(doc_hiddens, question_v_hidden, x1_mask, logit=True)
else:
end_logits = self.end_attn(doc_hiddens, question_hidden, x1_mask, logit=True)
if logit:
return start_logits, end_logits
else:
end_scores = F.log_softmax(end_logits, 1) if self.training else F.softmax(end_logits, 1)
return start_scores, end_scores
class CnnDocReader(RCModelProto):
def __init__(self, opt, padding_idx=0, embedding=None):
super().__init__(opt, padding_idx, embedding)
# RNN document encoder
self.doc_rnn = layers.DilatedResNet(
input_size=self.doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
dilation_base=2,
dilation_layers=3,
dilation_offset=1,
)
# RNN question encoder
self.question_rnn = layers.DilatedResNet(
input_size=self.question_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['question_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
dilation_base=1,
)
# Output sizes of rnn encoders
doc_hidden_size = opt['hidden_size']
question_hidden_size = opt['hidden_size']
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('question_merge = %s' % opt['question_merge'])
if opt['question_merge'] == 'self_attn':
self.self_attn = layers.LinearSeqAttn(question_hidden_size)
# Bilinear attention for span start/end
self.start_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
if opt['end_gru']:
self.end_gru = nn.GRUCell(doc_hidden_size, question_hidden_size)
self.end_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
def forward(self, x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char=None, x2_char=None):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_pos = document POS tags [batch * len_d]
x1_ner = document entity tags [batch * len_d]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
# Embed both document and question
x1_paired_emb, x2_paired_emb, x1_full_emb, x2_full_emb, feat_dict = self.forward_emb(x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char, x2_char)
if self.opt['sentence_level']:
x1_mask_backup = x1_mask
print('before:', x1_full_emb.size())
x1_full_emb = layers.split_sentences(x1_full_emb, sent_lens)
print('after:', x1_full_emb.size())
print(sent_lens)
# assert False
# Encode document with RNN
doc_hiddens = self.doc_rnn(x1_full_emb, x1_mask)
if self.opt['sentence_level']:
x1_mask = x1_mask_backup
doc_hiddens = layers.combine_sentences(doc_hiddens, sent_lens)
# Encode question with RNN + merge hiddens
question_hiddens = self.question_rnn(x2_full_emb, x2_mask)
if self.opt['question_merge'] == 'avg':
q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
elif self.opt['question_merge'] == 'self_attn':
q_merge_weights = self.self_attn(question_hiddens, x2_mask)
question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
# Predict start and end positions
start_scores = self.start_attn(doc_hiddens, question_hidden, x1_mask, log=self.training)
if self.opt['end_gru']:
weights = start_scores.exp() if self.training else start_scores
weighted_doc_hidden = layers.weighted_avg(doc_hiddens, weights)
question_v_hidden = self.end_gru(weighted_doc_hidden, question_hidden)
end_scores = self.end_attn(doc_hiddens, question_v_hidden, x1_mask, log=self.training)
else:
end_scores = self.end_attn(doc_hiddens, question_hidden, x1_mask, log=self.training)
return start_scores, end_scores
class FusionNet(RCModelProto):
"""Network for FusionNet."""
def __init__(self, opt, padding_idx=0, embedding=None):
super().__init__(opt, padding_idx, embedding)
# RNN document encoder
self.doc_rnn = layers.StackedBRNN(
input_size=self.doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=2,
dropout_rate=opt['dropout_rnn'],
# dropout_output=opt['dropout_rnn_output'],
variational_dropout=opt['variational_dropout'],
concat_layers=True,
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
residual=opt['residual'],
squeeze_excitation=opt['squeeze_excitation'],
)
# RNN question encoder
self.question_rnn = layers.StackedBRNN(
input_size=self.question_input_size,
hidden_size=opt['hidden_size'],
num_layers=2,
dropout_rate=opt['dropout_rnn'],
# dropout_output=opt['dropout_rnn_output'],
variational_dropout=opt['variational_dropout'],
concat_layers=True,
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
residual=opt['residual'],
squeeze_excitation=opt['squeeze_excitation'],
)
# Output sizes of rnn encoders
doc_hidden_size = 2 * 2 * opt['hidden_size']
question_hidden_size = doc_hidden_size
self.question_urnn = layers.StackedBRNN(
input_size=question_hidden_size,
hidden_size=opt['hidden_size'],
num_layers=opt['fusion_understanding_layers'],
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
residual=opt['residual'],
squeeze_excitation=opt['squeeze_excitation'],
concat_layers=False,
)
self.multi_level_fusion = layers.FullAttention(
full_size=self.paired_input_size + doc_hidden_size,
hidden_size=2 * 3 * opt['hidden_size'],
num_level=3,
dropout=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
)
self.doc_urnn = layers.StackedBRNN(
input_size=2 * 5 * opt['hidden_size'],
hidden_size=opt['hidden_size'],
num_layers=opt['fusion_understanding_layers'],
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
residual=opt['residual'],
squeeze_excitation=opt['squeeze_excitation'],
concat_layers=False,
)
self.self_boost_fusions = nn.ModuleList()
self.doc_final_rnns = nn.ModuleList()
full_size=self.paired_input_size + 4 * 3 * opt['hidden_size']
for i in range(self.opt['fusion_self_boost_times']):
self.self_boost_fusions.append(layers.FullAttention(
full_size=full_size,
hidden_size=2 * opt['hidden_size'],
num_level=1,
dropout=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
))
self.doc_final_rnns.append(layers.StackedBRNN(
input_size=4 * opt['hidden_size'],
hidden_size=opt['hidden_size'],
num_layers=opt['fusion_final_layers'],
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
residual=opt['residual'],
squeeze_excitation=opt['squeeze_excitation'],
concat_layers=False,
))
full_size += 2 * opt['hidden_size']
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('question_merge = %s' % opt['question_merge'])
if opt['question_merge'] == 'self_attn':
self.quesiton_merge_attns = nn.ModuleList()
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('question_merge = %s' % opt['question_merge'])
if opt['question_merge'] == 'self_attn':
self.self_attn = layers.LinearSeqAttn(2 * opt['hidden_size'])
# Bilinear attention for span start/end
self.start_attn = layers.BilinearSeqAttn(
2 * opt['hidden_size'],
2 * opt['hidden_size'],
)
if opt['end_gru']:
self.end_gru = nn.GRUCell(2 * opt['hidden_size'], 2 * opt['hidden_size'])
self.end_attn = layers.BilinearSeqAttn(
2 * opt['hidden_size'],
2 * opt['hidden_size'],
)
def forward(self, x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char=None, x2_char=None, logit=False):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_pos = document POS tags [batch * len_d]
x1_ner = document entity tags [batch * len_d]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
def dropout(x, p=self.opt['dropout_rnn']):
return layers.dropout(x, p=p,
training=self.training, variational=self.opt['variational_dropout'] and x.dim() == 3)
# Embed both document and question
x1_paired_emb, x2_paired_emb, x1_full_emb, x2_full_emb, feat_dict = self.forward_emb(x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char, x2_char)
# Encode document with RNN
doc_hiddens = self.doc_rnn(x1_full_emb, x1_mask)
# Encode question with RNN
question_hiddens = self.question_rnn(x2_full_emb, x2_mask)
# Question Understanding
question_u_hiddens = self.question_urnn(question_hiddens, x2_mask)
# Fully-Aware Multi-level Fusion
doc_HoW = torch.cat([x1_paired_emb, doc_hiddens], 2)
question_HoW = torch.cat([x2_paired_emb, question_hiddens], 2)
question_cat_hiddens = torch.cat([question_hiddens, question_u_hiddens], 2)
doc_fusions = self.multi_level_fusion(doc_HoW, question_HoW, question_cat_hiddens, x2_mask)
# Document Understanding
doc_u_hiddens = self.doc_urnn(torch.cat([doc_hiddens, doc_fusions], 2), x1_mask)
# Fully-Aware Self-Boosted Fusion
self_boost_HoW = torch.cat([x1_paired_emb, doc_hiddens, doc_fusions, doc_u_hiddens], 2)
for i in range(len(self.self_boost_fusions)):
doc_self_fusions = self.self_boost_fusions[i](self_boost_HoW, self_boost_HoW, doc_u_hiddens, x1_mask)
# Final document representation
doc_final_hiddens = self.doc_final_rnns[i](torch.cat([doc_u_hiddens, doc_self_fusions], 2), x1_mask)
if i < len(self.self_boost_fusions) - 1:
self_boost_HoW = torch.cat([self_boost_HoW, doc_final_hiddens], 2)
doc_u_hiddens = doc_final_hiddens
# Encode question with RNN + merge hidden, 2s
if self.opt['question_merge'] == 'avg':
q_merge_weights = layers.uniform_weights(question_u_hiddens, x2_mask)
elif self.opt['question_merge'] == 'self_attn':
q_merge_weights = self.self_attn(dropout(question_u_hiddens), x2_mask)
question_u_hidden = layers.weighted_avg(question_u_hiddens, q_merge_weights)
# Predict start and end positions
start_logits = self.start_attn(dropout(doc_final_hiddens), dropout(question_u_hidden), x1_mask, logit=True)
if self.opt['sentence_level']:
start_logits = layers.combine_sentences(start_logits, sent_lens)
start_scores = F.log_softmax(start_logits, 1) if self.training else F.softmax(start_logits, 1)
if self.opt['end_gru']:
weights = start_scores.exp() if self.training else start_scores
weighted_doc_hidden = layers.weighted_avg(doc_final_hiddens, weights)
question_v_hidden = self.end_gru(dropout(weighted_doc_hidden), dropout(question_u_hidden))
# question_v_hidden = layers.dropout(question_v_hidden)
end_logits = self.end_attn(dropout(doc_final_hiddens), dropout(question_v_hidden), x1_mask, logit=True)
else:
end_logits = self.end_attn(doc_final_hiddens, question_u_hidden, x1_mask, logit=True)
if self.opt['sentence_level']:
end_logits = layers.combine_sentences(end_logits, sent_lens)
if logit:
return start_logits, end_logits
else:
end_scores = F.log_softmax(end_logits, 1) if self.training else F.softmax(end_logits, 1)
return start_scores, end_scores
class BiDAF(RCModelProto):
"""simple BiDAF model (without char-cnn)"""
def __init__(self, opt, padding_idx=0, embedding=None):
super().__init__(opt, padding_idx, embedding)
# Store config
# RNN document encoder
self.doc_enc = layers.StackedBRNN(
input_size=self.doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=1,
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
# RNN question encoder
if self.doc_input_size == self.question_input_size:
self.question_enc = self.doc_enc
else:
self.question_enc = layers.StackedBRNN(
input_size=self.question_input_size,
hidden_size=opt['hidden_size'],
num_layers=1,
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
# Context-Query Attention Layer
self.biattn = layers.BiAttn(2*opt['hidden_size'])
# Model Encoder Layer
self.model_enc = layers.StackedBRNN(
input_size=opt['hidden_size']*8,
hidden_size=opt['hidden_size'],
num_layers=2,
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
self.end_enc = layers.StackedBRNN(
input_size=opt['hidden_size']*14,
hidden_size=opt['hidden_size'],
num_layers=1,
dropout_rate=opt['dropout_rnn'],
variational_dropout=opt['variational_dropout'],
rnn_type=opt['rnn_type'],
padding=opt['rnn_padding'],
)
# Bilinear attention for span start/end
self.start_attn = layers.LinearSeqAttn(10 * opt['hidden_size'])
self.end_attn = layers.LinearSeqAttn(10 * opt['hidden_size'])
def forward(self, x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char=None, x2_char=None):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_pos = document POS tags [batch * len_d]
x1_ner = document entity tags [batch * len_d]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
batch_size = x1.size(0)
x1_len = x1.size(1)
x2_len = x2.size(1)
def dropout(x, p=self.opt['dropout_rnn']):
return layers.dropout(x, p=p,
training=self.training, variational=self.opt['variational_dropout'] and x.dim() == 3)
# Embed both document and question
x1_paired_emb, x2_paired_emb, x1_full_emb, x2_full_emb, feat_dict = self.forward_emb(x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask, sent_lens, x1_char, x2_char)
# Encode document with RNN
doc_hiddens = self.doc_enc(x1_full_emb, x1_mask)
# Encode question with RNN
question_hiddens = self.question_enc(x2_full_emb, x2_mask)
# Context-Query Attention
outputs = self.biattn(doc_hiddens, question_hiddens, x1_mask, x2_mask)
outputs[1] = outputs[1].expand_as(outputs[2]) # Q2C is a vector need to expand
outputs.append(doc_hiddens)
p0 = torch.cat(outputs, 2)
# Predict start and end positions
g1 = self.model_enc(p0, x1_mask)
start_scores = self.start_attn(dropout(torch.cat([g1, p0], 2)), x1_mask, log=self.training)
alpha = start_scores.exp() if self.training else start_scores
a1i = layers.weighted_avg(g1, alpha).unsqueeze_(1)
g2 = self.end_enc(torch.cat([p0, g1, a1i.expand_as(g1), g1 * a1i], 2), x1_mask)
end_scores = self.end_attn(dropout(torch.cat([g2, p0], 2)), x1_mask, log=self.training)
return start_scores, end_scores
| 33,559 | 43.332893 | 169 | py |
FastFusionNet | FastFusionNet-master/qa/general_utils.py | # Modified from https://github.com/momohuang/FusionNet-NLI/blob/master/general_utils.py
import re
import os
import sys
import random
import string
import logging
import argparse
import unicodedata
from shutil import copyfile
from datetime import datetime
from collections import Counter
import torch
import msgpack
import jsonlines
import numpy as np
#===========================================================================
#================= All for preprocessing SQuAD data set ====================
#===========================================================================
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def load_glove_vocab(file, wv_dim):
vocab = set()
with open(file, encoding="utf8") as f:
for line in f:
elems = line.split()
token = normalize_text(''.join(elems[0:-wv_dim]))
vocab.add(token)
return vocab
def pre_proc_sru(text):
'''normalize spaces in a string. From SRU DrQA code'''
text = re.sub('\s+', ' ', text)
return text
def space_extend(matchobj):
return ' ' + matchobj.group(0) + ' '
def pre_proc_fusion(text):
'''from FusionNet-NLI'''
# make hyphens, spaces clean
text = re.sub(u'-|\u2010|\u2011|\u2012|\u2013|\u2014|\u2015|%|\[|\]|:|\(|\)|/', space_extend, text)
text = text.strip(' \n')
text = re.sub('\s+', ' ', text)
return text
extra_split_chars = (u'-', u'£', u'€', u'¥', u'¢', u'₹', u'\u2212', u'\u2014',
u'\u2013', u'/', u'~', u'"', u"'", u'\ud01C', u'\u2019',
u'\u201D', u'\u2018', u'\u00B0')
extra_split_tokens = (
u'``',
u'(?<=[^_])_(?=[^_])', # dashes w/o a preceeding or following dash, so __wow___ -> ___ wow ___
u"''",
u'[' + u''.join(extra_split_chars) + ']')
extra_split_chars_re = re.compile(u'(' + u'|'.join(extra_split_tokens) + u')')
def pre_proc_qanet(text):
'''from QANet code'''
# make hyphens, spaces clean
text = re.sub(u'-|\u2010|\u2011|\u2012|\u2013|\u2014|\u2015|%|\[|\]|:|\(|\)|/', space_extend, text)
text = extra_split_chars_re.sub(space_extend, text)
text = text.strip(' \n')
text = re.sub('\s+', ' ', text)
return text
def process_jsonlines(data_file):
with jsonlines.open(data_file) as reader:
snli_label = []
snli_sent1 = []
snli_sent2 = []
for obj in reader:
if obj['gold_label'] != '-':
snli_label.append(obj['gold_label'])
snli_sent1.append(obj['sentence1'])
snli_sent2.append(obj['sentence2'])
return SNLIData(snli_label, snli_sent1, snli_sent2)
def feature_gen(A_docs, B_docs):
A_tags = [[w.tag_ for w in doc] for doc in A_docs]
A_ents = [[w.ent_type_ for w in doc] for doc in A_docs]
A_features = []
for textA, textB in zip(A_docs, B_docs):
counter_ = Counter(w.text.lower() for w in textA)
total = sum(counter_.values())
term_freq = [counter_[w.text.lower()] / total for w in textA]
question_word = {w.text for w in textB}
question_lower = {w.text.lower() for w in textB}
question_lemma = {w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower() for w in textB}
match_origin = [w.text in question_word for w in textA]
match_lower = [w.text.lower() in question_lower for w in textA]
match_lemma = [(w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower()) in question_lemma for w in textA]
A_features.append(list(zip(term_freq, match_origin, match_lower, match_lemma)))
return A_tags, A_ents, A_features
def build_embedding(embed_file, targ_vocab, wv_dim):
vocab_size = len(targ_vocab)
emb = np.random.uniform(-1, 1, (vocab_size, wv_dim))
emb[0] = 0 # <PAD> should be all 0 (using broadcast)
w2id = {w: i for i, w in enumerate(targ_vocab)}
with open(embed_file, encoding="utf8") as f:
for line in f:
elems = line.split()
token = normalize_text(''.join(elems[0:-wv_dim]))
if token in w2id:
emb[w2id[token]] = [float(v) for v in elems[-wv_dim:]]
return emb
def token2id(docs, vocab, unk_id=None):
w2id = {w: i for i, w in enumerate(vocab)}
ids = [[w2id[w] if w in w2id else unk_id for w in doc] for doc in docs]
return ids
| 4,322 | 34.727273 | 111 | py |
WPFS | WPFS-main/src/main.py | import json
import pytorch_lightning
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks import RichProgressBar, LearningRateMonitor
from pytorch_lightning.loggers import CSVLogger
from sklearn.ensemble import RandomForestClassifier
from functools import partial
import os
import warnings
import sklearn
import logging
import argparse
import numpy as np
from dataset import *
from models import *
def get_run_name(args):
run_name = f"{args.model}"
return run_name
def train(args):
#### Load dataset
print(f"\nInside training function")
print(f"\nLoading data {args.dataset}...")
data_module = create_data_module(args)
print(f"Train/Valid/Test splits of sizes {args.train_size}, {args.valid_size}, {args.test_size}")
print(f"Num of features: {args.num_features}")
csv_logger = CSVLogger("logs", name=f"{args.experiment_name}")
#### Baselines training
if args.model == 'rf':
# scikit-learn expects class_weights to be a dictionary
class_weights = {}
for i, val in enumerate(args.class_weights):
class_weights[i] = val
class_weights_list = [class_weights[i] for i in range(len(class_weights))]
model = RandomForestClassifier(n_estimators=args.rf_n_estimators,
min_samples_leaf=args.rf_min_samples_leaf, max_depth=args.rf_max_depth,
class_weight=class_weights, max_features='sqrt',
random_state=42, verbose=True)
model.fit(data_module.X_train, data_module.y_train)
#### Log metrics
y_pred_train = model.predict(data_module.X_train)
y_pred_valid = model.predict(data_module.X_valid)
y_pred_test = model.predict(data_module.X_test)
train_metrics = compute_all_metrics(args, data_module.y_train, y_pred_train)
valid_metrics = compute_all_metrics(args, data_module.y_valid, y_pred_valid)
test_metrics = compute_all_metrics(args, data_module.y_test, y_pred_test)
res = {}
for metrics, dataset_name in zip(
[train_metrics, valid_metrics, test_metrics],
["bestmodel_train", "bestmodel_valid", "bestmodel_test"]):
for metric_name, metric_value in metrics.items():
csv_logger.log_metrics({f"{dataset_name}/{metric_name}": metric_value})
res[f"{dataset_name}/{metric_name}"] = [metric_value]
pd.DataFrame(res).to_csv(f"{csv_logger.log_dir}/metrics.csv", index=False)
#### Pytorch lightning training
else:
#### Set embedding size if it wasn't provided
if args.wpn_embedding_size==-1:
args.wpn_embedding_size = args.train_size
if args.sparsity_gene_embedding_size==-1:
args.sparsity_gene_embedding_size = args.train_size
if args.max_steps!=-1:
steps_per_epoch = np.floor(args.train_size / args.batch_size)
args.max_epochs = int(np.ceil(args.max_steps / steps_per_epoch))
print(f"Training for max_epochs = {args.max_epochs}")
#### Create model
model = create_model(args, data_module)
##### Train
checkpoint_callback = ModelCheckpoint( # save best model for evaluation
monitor=f'valid/cross_entropy_loss',
mode='min',
save_last=True,
verbose=True
)
callbacks = [checkpoint_callback, RichProgressBar()]
if args.patience_early_stopping and args.train_on_full_data==False:
callbacks.append(EarlyStopping(
monitor=f'valid/cross_entropy_loss',
mode='min',
patience=args.patience_early_stopping,
))
callbacks.append(LearningRateMonitor(logging_interval='step'))
pl.seed_everything(args.seed_training, workers=True)
trainer = pl.Trainer(
# Training
max_steps=args.max_steps,
gradient_clip_val=2.5,
# logging
logger=csv_logger,
log_every_n_steps = 1,
val_check_interval = args.val_check_interval,
callbacks = callbacks,
# miscellaneous
accelerator="auto",
devices="auto",
detect_anomaly=True
)
# train
trainer.fit(model, data_module)
if args.train_on_full_data: # if we trained on full data
checkpoint_path = checkpoint_callback.last_model_path
else:
checkpoint_path = checkpoint_callback.best_model_path
print(f"\n\nBest model saved on path {checkpoint_path}\n\n")
#### Compute metrics for the best model
model.log_test_key = 'bestmodel_train'
trainer.test(model, dataloaders=data_module.train_dataloader(), ckpt_path=checkpoint_path)
model.log_test_key = 'bestmodel_valid'
trainer.test(model, dataloaders=data_module.val_dataloader(), ckpt_path=checkpoint_path)
model.log_test_key = 'bestmodel_test'
trainer.test(model, dataloaders=data_module.test_dataloader(), ckpt_path=checkpoint_path)
print("\nExiting from train function..")
def parse_arguments(args=None):
parser = argparse.ArgumentParser()
############### Dataset ###############
parser.add_argument('--dataset', type=str, required=True,
choices=['metabric-pam50', 'metabric-dr', 'tcga-2ysurvival', 'tcga-tumor-grade',
'lung', 'prostate', 'toxicity', 'cll', 'smk'])
############### Model ###############
parser.add_argument('--model', type=str, choices=['mlp', 'wpfs', 'rf', 'fsnet', 'cae', 'dietnetworks'], default='wpfs')
parser.add_argument('--feature_extractor_dims', type=int, nargs='+', default=[100, 100, 10],
help='layer size for the feature extractor. If using a virtual layer,\
the first dimension must match it.')
parser.add_argument('--layers_for_hidden_representation', type=int, default=2,
help='number of layers after which to output the hidden representation used as input to the decoder \
(e.g., if the layers are [100, 100, 10] and layers_for_hidden_representation=2, \
then the hidden representation will be the representation after the two layers [100, 100])')
parser.add_argument('--dropout_rate', type=float, default=0.2, help='dropout rate for the main network')
############### Sparsity network and sparsity regularization ###############
parser.add_argument('--sparsity_gene_embedding_type', type=str, default='nmf',
choices=['feature_values', 'nmf'], help='It`s applied over data preprocessed using `embedding_preprocessing`')
parser.add_argument('--sparsity_gene_embedding_size', type=int, default=50)
parser.add_argument('--sparsity_regularizer', action='store_true', dest='sparsity_regularizer')
parser.set_defaults(sparsity_regularizer=False)
parser.add_argument('--sparsity_regularizer_hyperparam', type=float, default=0,
help='The weight of the sparsity regularizer (used to compute total_loss)')
############### Weight predictor network ###############
parser.add_argument('--wpn_embedding_type', type=str, default='nmf',
choices=['histogram', 'feature_values', 'nmf', 'svd'],
help='histogram = histogram x means (like FsNet)\
feature_values = randomly pick patients and use their gene expressions as the embedding\
It`s applied over data preprocessed using `embedding_preprocessing`')
parser.add_argument('--wpn_embedding_size', type=int, default=50, help='Size of the gene embedding')
parser.add_argument('--wpn_layers', type=int, nargs='+', default=[100, 100, 100, 100], help="The list of layer sizes for the weight predictor network.")
############### Concrete autoencoder parameters ###############
parser.add_argument('--concrete_anneal_iterations', type=int, default=1000,
help='number of iterations for annealing the Concrete radnom variables (in CAE and FsNet)')
############### Scikit-learn parameters ###############
parser.add_argument('--rf_n_estimators', type=int, default=500, help='number of trees in the random forest')
parser.add_argument('--rf_max_depth', type=int, default=5, help='maximum depth of the tree')
parser.add_argument('--rf_min_samples_leaf', type=int, default=2, help='minimum number of samples in a leaf')
####### Training
parser.add_argument('--use_best_hyperparams', action='store_true', dest='use_best_hyperparams',
help="True if you don't want to use the best hyperparams for a custom dataset")
parser.set_defaults(use_best_hyperparams=False)
parser.add_argument('--max_steps', type=int, default=10000, help='Specify the max number of steps to train.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--gamma', type=float, default=0,
help='The factor multiplied to the reconstruction error (DietNetworks and FsNet) \
If >0, then create a decoder with a reconstruction loss. \
If ==0, then dont create a decoder.')
parser.add_argument('--patient_preprocessing', type=str, default='z_score',
choices=['raw', 'z_score', 'minmax'],
help='Preprocessing applied on each COLUMN of the N x D matrix, where a row contains all gene expressions of a patient.')
parser.add_argument('--embedding_preprocessing', type=str, default='minmax',
choices=['raw', 'z_score', 'minmax'],
help='Preprocessing applied on each ROW of the D x N matrix, where a row contains all patient expressions for one gene.')
####### Training on the entire train + validation data
parser.add_argument('--train_on_full_data', action='store_true', dest='train_on_full_data', \
help='Train on the full data (train + validation), leaving only `--test_split` for testing.')
# We observe unstable results when training on the full data (because the number of epochs differs greatly from split to split)
# We use 10% of the training data to perform early stopping.
parser.set_defaults(train_on_full_data=False)
####### Validation
parser.add_argument('--patience_early_stopping', type=int, default=200,
help='Set number of checks (set by *val_check_interval*) to do early stopping.\
It will train for at least args.val_check_interval * args.patience_early_stopping epochs')
parser.add_argument('--val_check_interval', type=int, default=5,
help='number of steps at which to check the validation')
####### Cross-validation
parser.add_argument('--num_repeats', type=int, default=5, help='number of times to repeat the cross-validation; each time shuffle the data')
parser.add_argument('--cv_folds', type=int, default=5, help="Number of CV splits")
parser.add_argument('--repeat_id', type=int, default=0, help='each repeat_id gives a different random seed for shuffling the dataset')
parser.add_argument('--test_split', type=int, default=0, help="Index of the test split. It should be smaller than `cv_folds`")
parser.add_argument('--valid_percentage', type=float, default=0.1, help='Percentage of training data used for validation')
parser.add_argument('--run_repeats_and_cv', action='store_true', dest='run_repeats_and_cv')
parser.set_defaults(run_repeats_and_cv=False)
####### Optimization
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--class_weight', type=str, choices=['standard', 'balanced'], default='balanced',
help="If `standard`, all classes use a weight of 1.\
If `balanced`, classes are weighted inverse proportionally to their size (see https://scikit-learn.org/stable/modules/generated/sklearn.utils.class_weight.compute_class_weight.html)")
parser.add_argument('--lr_scheduler', type=str, choices=['cosine_warm_restart', 'lambda'], default='lambda')
parser.add_argument('--cosine_warm_restart_eta_min', type=float, default=1e-6)
parser.add_argument('--cosine_warm_restart_t_0', type=int, default=35)
parser.add_argument('--cosine_warm_restart_t_mult', type=float, default=1)
parser.add_argument('--debugging', action='store_true', dest='debugging')
parser.set_defaults(debugging=False)
# SEEDS
parser.add_argument('--seed_model_init', type=int, default=42, help='Seed for initializing the model (to have the same weights)')
parser.add_argument('--seed_training', type=int, default=42, help='Seed for training (e.g., batch ordering)')
parser.add_argument('--seed_kfold', type=int, help='Seed used for doing the kfold in train/test split')
parser.add_argument('--seed_validation', type=int, help='Seed used for selecting the validation split.')
# Dataset loading
parser.add_argument('--num_workers', type=int, default=1, help="number of workers for loading dataset")
parser.add_argument('--no_pin_memory', dest='pin_memory', action='store_false', help='dont pin memory for data loaders')
parser.set_defaults(pin_memory=True)
####### Logging
parser.add_argument('--experiment_name', type=str, default='', help='Name for the experiment')
return parser.parse_args(args)
if __name__ == "__main__":
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=pytorch_lightning.utilities.warnings.LightningDeprecationWarning)
print("Starting...")
logging.basicConfig(
filename='logs_exceptions.txt',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG
)
args = parse_arguments()
# set experiment name if not set
if args.experiment_name=='':
import uuid
args.experiment_name = str(uuid.uuid4())[:8]
#### Assert that the dataset is supported
SUPPORTED_DATASETS = ['metabric-pam50', 'metabric-dr',
'tcga-2ysurvival', 'tcga-tumor-grade',
'lung', 'prostate', 'toxicity', 'cll', 'smk', 'your_custom_dataset']
if args.dataset not in SUPPORTED_DATASETS:
raise Exception(f"Dataset {args.dataset} not supported. Supported datasets are {SUPPORTED_DATASETS}")
# set seeds
args.seed_kfold = args.repeat_id # repeat_id sets the kfol random seed, which in turn created a different shuffle of the dataset
args.seed_validation = args.test_split
if args.dataset == 'prostate' or args.dataset == 'cll':
# `val_check_interval`` must be less than or equal to the number of the training batches
# because these two datasets are small, one cannot use batch_size=16 and val_check_interval=5
args.val_check_interval = 4
# BEST CONFIGS FOR EACH BASELINE AND DATASET
if args.use_best_hyperparams:
if args.model in ['wpfs', 'fsnet', 'dietnetworks']:
if args.dataset=='cll':
args.wpn_embedding_size = 70
args.sparsity_gene_embedding_size = 70
if args.dataset=='lung':
args.wpn_embedding_size = 20
args.sparsity_gene_embedding_size = 20
elif args.model=='rf':
params = {
'cll': (3, 3),
'lung': (3, 2),
'metabric-dr': (7, 2),
'metabric-pam50': (7, 2),
'prostate': (5, 2),
'smk': (5, 2),
'tcga-2ysurvival': (3, 3),
'tcga-tumor-grade': (3, 3),
'toxicity': (5, 3)
}
args.rf_max_depth, args.rf_min_samples_leaf = params[args.dataset]
if args.run_repeats_and_cv:
# Run 5 fold cross-validation with 5 repeats
args_new = dict(json.loads(json.dumps(vars(args))))
for repeat_id in range(args_new['num_repeats']):
for test_split in range(args_new['cv_folds']):
args_new['repeat_id'] = repeat_id
args_new['test_split'] = test_split
train(argparse.Namespace(**args_new))
else:
train(args) | 15,021 | 39.6 | 192 | py |
WPFS | WPFS-main/src/_config.py | BASE_DIR = '.' # path to the project directory
DATA_DIR = f'{BASE_DIR}/data'
LOGS_DIR = f'{BASE_DIR}/logs'
RESULTS_DIR = f"{BASE_DIR}/results"
SEED_VALUE = 42
import random
import numpy as np
import torch
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return seed
| 367 | 15.727273 | 46 | py |
WPFS | WPFS-main/src/sparsity_network.py | import torch
import torch.nn as nn
class SparsityNetwork(nn.Module):
"""
Sparsity network
- same architecture as WPN
- input: gene embedding matrix (D x M)
- output: 1 neuron, sigmoid activation function (which will get multiplied by the weights associated with the gene)
"""
def __init__(self, args, embedding_matrix):
"""
:param nn.Tensor(D, M) embedding_matrix: matrix with the embeddings (D = number of features, M = embedding size)
"""
super().__init__()
self.args = args
self.register_buffer('embedding_matrix', embedding_matrix) # store the static embedding_matrix
layers = []
dim_prev = args.sparsity_gene_embedding_size # input for global sparsity: gene embedding
for _, dim in enumerate(args.wpn_layers):
layers.append(nn.Linear(dim_prev, dim))
layers.append(nn.LeakyReLU())
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.Dropout(args.dropout_rate))
dim_prev = dim
layers.append(nn.Linear(dim, 1))
self.network = nn.Sequential(*layers)
def forward(self):
"""
Input:
- input: None
Returns:
- Tensor of sigmoid values (D)
"""
out = self.network(self.embedding_matrix) # (D, 1)
out = torch.sigmoid(out)
return torch.squeeze(out, dim=1) # (D) | 1,231 | 26.377778 | 116 | py |
WPFS | WPFS-main/src/dataset.py | import os
from _config import *
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from torchnmf.nmf import NMF
import scipy.io as spio
import pandas as pd
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.utils.class_weight import compute_class_weight
from sklearn.preprocessing import StandardScaler, MinMaxScaler
def load_csv_data(path, labels_column=-1):
"""
Load a data file
- path (str): path to csv_file
- labels_column (int): indice of the column with labels
"""
Xy = pd.read_csv(path, index_col=0)
X = Xy[Xy.columns[:labels_column]].to_numpy()
y = Xy[Xy.columns[labels_column]].to_numpy()
return X, y
def load_lung(drop_class_5=True):
"""
Labels in initial dataset:
1 139
2 17
3 21
4 20
5 6
We drop the class 5 because it has too little examples.
"""
data = spio.loadmat(os.path.join(DATA_DIR, 'lung.mat'))
X = pd.DataFrame(data['X'])
Y = pd.Series(data['Y'][:, 0])
# Drop samples of class 5
X = X.drop(index=[156, 157, 158, 159, 160, 161])
Y = Y.drop([156, 157, 158, 159, 160, 161])
new_labels = {1:0, 2:1, 3:2, 4:3, 5:4}
Y = Y.apply(lambda x: new_labels[x])
return X, Y
def load_prostate():
""""
Labels in initial dataset:
1 50
2 52
"""
data = spio.loadmat(os.path.join(DATA_DIR, 'Prostate_GE.mat'))
X = pd.DataFrame(data['X'])
Y = pd.Series(data['Y'][:, 0])
new_labels = {1:0, 2:1}
Y = Y.apply(lambda x: new_labels[x])
return X, Y
def load_toxicity():
"""
Labels in initial dataset:
1 45
2 45
3 39
4 42
"""
data = spio.loadmat(os.path.join(DATA_DIR, 'TOX_171.mat'))
X = pd.DataFrame(data['X'])
Y = pd.Series(data['Y'][:, 0])
new_labels = {1:0, 2:1, 3:2, 4:3}
Y = Y.apply(lambda x: new_labels[x])
return X, Y
def load_cll():
"""
Labels in initial dataset:
1 11
2 49
3 51
"""
data = spio.loadmat(os.path.join(DATA_DIR, 'CLL_SUB_111.mat'))
X = pd.DataFrame(data['X'])
Y = pd.Series(data['Y'][:, 0])
new_labels = {1:0, 2:1, 3:2}
Y = Y.apply(lambda x: new_labels[x])
return X, Y
def load_smk():
"""
Labels in initial dataset:
1 90
2 97
"""
data = spio.loadmat(os.path.join(DATA_DIR, 'SMK_CAN_187.mat'))
X = pd.DataFrame(data['X'])
Y = pd.Series(data['Y'][:, 0])
new_labels = {1:0, 2:1}
Y = Y.apply(lambda x: new_labels[x])
return X, Y
def load_your_custom_dataset():
return np.random.rand(100, 100), np.random.randint(0, 2, 100)
class CustomPytorchDataset(Dataset):
def __init__(self, X, y, transform=None) -> None:
# X, y are numpy
super().__init__()
self.X = torch.tensor(X, requires_grad=False)
self.y = torch.tensor(y, requires_grad=False)
self.transform = transform
def __getitem__(self, index):
x = self.X[index]
y = self.y[index]
if self.transform:
x = self.transform(x)
y = y.repeat(x.shape[0]) # replicate y to match the size of x
return x, y
def __len__(self):
return len(self.X)
def standardize_data(X_train, X_valid, X_test, preprocessing_type):
if preprocessing_type == 'z_score':
scaler = StandardScaler()
elif preprocessing_type == 'minmax':
scaler = MinMaxScaler()
elif preprocessing_type == 'raw':
scaler = None
else:
raise Exception("preprocessing_type not supported")
if scaler:
X_train = scaler.fit_transform(X_train).astype(np.float32)
X_valid = scaler.transform(X_valid).astype(np.float32)
X_test = scaler.transform(X_test).astype(np.float32)
return X_train, X_valid, X_test
def compute_stratified_splits(X, y, cv_folds, seed_kfold, split_id):
skf = StratifiedKFold(n_splits=cv_folds, shuffle=True, random_state=seed_kfold)
for i, (train_ids, test_ids) in enumerate(skf.split(X, y)):
if i == split_id:
return X[train_ids], X[test_ids], y[train_ids], y[test_ids]
############### EMBEDDINGS ###############
def compute_histogram_embedding(args, X, embedding_size):
"""
Compute embedding_matrix (D x M) based on the histograms. The function implements two methods:
DietNetwork
- Normalized bincounts for each SNP
FsNet
0. Input matrix NxD
1. Z-score standardize each column (mean 0, std 1)
2. Compute the histogram for every feature (with density = False)
3. Multiply the histogram values with the bin mean
:param (N x D) X: dataset, each row representing one sample
:return np.ndarray (D x M) embedding_matrix: matrix where each row represents the embedding of one feature
"""
X = np.rot90(X)
number_features = X.shape[0]
embedding_matrix = np.zeros(shape=(number_features, embedding_size))
for feature_id in range(number_features):
feature = X[feature_id]
hist_values, bin_edges = np.histogram(feature, bins=embedding_size) # like in FsNet
bin_centers = (bin_edges[1:] + bin_edges[:-1])/2
embedding_matrix[feature_id] = np.multiply(hist_values, bin_centers)
return embedding_matrix
def compute_nmf_embeddings(Xt, rank):
"""
Note: torchnmf computes V = H W^T instead of the standard formula V = W H
Input
- V (D x N)
- rank of NMF
Returns
- H (D x r) (torch.Parameter with requires_grad=True), where each row represents one gene embedding
"""
print("Approximating V = H W.T")
print(f"Input V has shape {Xt.shape}")
assert type(Xt)==torch.Tensor
assert Xt.shape[0] > Xt.shape[1]
nmf = NMF(Xt.shape, rank=rank).cuda()
nmf.fit(Xt.cuda(), beta=2, max_iter=1000, verbose=True) # beta=2 coresponds to the Frobenius norm, which is equivalent to an additive Gaussian noise model
print(f"H has shape {nmf.H.shape}")
print(f"W.T has shape {nmf.W.T.shape}")
return nmf.H, nmf.W
def compute_svd_embeddings(X, rank=None):
"""
- X (N x D)
- rank (int): rank of the approximation (i.e., size of the embedding)
"""
assert type(X)==torch.Tensor
assert X.shape[0] < X.shape[1]
U, S, Vh = torch.linalg.svd(X, full_matrices=False)
V = Vh.T
if rank:
S = S[:rank]
V = V[:rank]
return V, S
############### DATASETS ###############
class DatasetModule(pl.LightningDataModule):
def __init__(self, args, X_train, y_train, X_valid, y_valid, X_test, y_test):
super().__init__()
self.args = args
# Standardize data
self.X_train_raw = X_train
self.X_valid_raw = X_valid
self.X_test_raw = X_test
X_train, X_valid, X_test = standardize_data(X_train, X_valid, X_test, args.patient_preprocessing)
self.X_train = X_train
self.y_train = y_train
self.X_valid = X_valid
self.y_valid = y_valid
self.X_test = X_test
self.y_test = y_test
self.train_dataset = CustomPytorchDataset(X_train, y_train)
self.valid_dataset = CustomPytorchDataset(X_valid, y_valid)
self.test_dataset = CustomPytorchDataset(X_test, y_test)
self.args.train_size = X_train.shape[0]
self.args.valid_size = X_valid.shape[0]
self.args.test_size = X_test.shape[0]
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.args.batch_size, shuffle=True, drop_last=True,
num_workers=self.args.num_workers, pin_memory=self.args.pin_memory)
def val_dataloader(self):
return DataLoader(self.valid_dataset, batch_size=self.args.batch_size,
num_workers=self.args.num_workers, pin_memory=self.args.pin_memory)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.args.batch_size,
num_workers=self.args.num_workers, pin_memory=self.args.pin_memory)
def get_embedding_matrix(self, embedding_type, embedding_size):
"""
Return matrix D x M
Use a the shared hyper-parameter self.args.embedding_preprocessing.
"""
if embedding_type == None:
return None
else:
if embedding_size == None:
raise Exception()
# Preprocess the data for the embeddings
if self.args.embedding_preprocessing == 'raw':
X_for_embeddings = self.X_train_raw
elif self.args.embedding_preprocessing == 'z_score':
X_for_embeddings = StandardScaler().fit_transform(self.X_train_raw)
elif self.args.embedding_preprocessing == 'minmax':
X_for_embeddings = MinMaxScaler().fit_transform(self.X_train_raw)
else:
raise Exception("embedding_preprocessing not supported")
if embedding_type == 'histogram':
"""
Embedding similar to FsNet
"""
embedding_matrix = compute_histogram_embedding(self.args, X_for_embeddings, embedding_size)
return torch.tensor(embedding_matrix.copy(), dtype=torch.float32, requires_grad=False)
elif embedding_type=='feature_values':
"""
A gene's embedding are its patients gene expressions.
"""
embedding_matrix = np.rot90(X_for_embeddings)[:, :embedding_size]
return torch.tensor(embedding_matrix.copy(), dtype=torch.float32, requires_grad=False)
elif embedding_type=='svd':
# Vh.T (4160 x rank) contains the gene embeddings on each row
U, S, Vh = torch.linalg.svd(torch.tensor(X_for_embeddings, dtype=torch.float32), full_matrices=False)
Vh.T.requires_grad = False
return Vh.T[:, :embedding_size].type(torch.float32)
elif embedding_type=='nmf':
H, _ = compute_nmf_embeddings(torch.tensor(X_for_embeddings).T, rank=embedding_size)
H_data = H.data
H_data.requires_grad = False
return H_data.type(torch.float32)
else:
raise Exception("Invalid embedding type")
def create_data_module(args):
dataset = args.dataset
if dataset in ['metabric-pam50', 'metabric-dr', 'tcga-2ysurvival', 'tcga-tumor-grade']:
dataset_size = 200
if dataset=='metabric-pam50':
dataset_path = os.path.join(DATA_DIR, f'Metabric_samples/metabric_pam50_train_{dataset_size}.csv')
elif dataset=='metabric-dr':
dataset_path = os.path.join(DATA_DIR, f'Metabric_samples/metabric_DR_train_{dataset_size}.csv')
elif dataset=='tcga-2ysurvival':
dataset_path = os.path.join(DATA_DIR, f'TCGA_samples/tcga_2ysurvival_train_{dataset_size}.csv')
elif dataset=='tcga-tumor-grade':
dataset_path = os.path.join(DATA_DIR, f'TCGA_samples/tcga_tumor_grade_train_{dataset_size}.csv')
X, y = load_csv_data(dataset_path)
else:
if dataset=='lung':
X, y = load_lung()
elif dataset=='toxicity':
X, y = load_toxicity()
elif dataset=='prostate':
X, y = load_prostate()
elif dataset=='cll':
X, y = load_cll()
elif dataset=='smk':
X, y = load_smk()
elif dataset=='your_custom_dataset':
X, y = load_your_custom_dataset()
else:
raise Exception("Dataset not supported")
args.dataset_size = X.shape[0]
data_module = create_datamodule_with_cross_validation(args, X, y)
#### Compute classification loss weights
if args.class_weight=='balanced':
args.class_weights = compute_class_weight(class_weight='balanced', classes=np.unique(data_module.y_train), y=data_module.y_train)
elif args.class_weight=='standard':
args.class_weights = compute_class_weight(class_weight=None, classes=np.unique(data_module.y_train), y=data_module.y_train)
args.class_weights = args.class_weights.astype(np.float32)
print(f"Weights for the classification loss: {args.class_weights}")
return data_module
def create_datamodule_with_cross_validation(args, X, y):
"""
Split X, y to be suitable for k-fold cross-validation.
It uses args.test_split to create the train, valid and test stratified datasets.
"""
if type(X)==pd.DataFrame:
X = X.to_numpy()
if type(y)==pd.Series:
y = y.to_numpy()
args.num_features = X.shape[1]
args.num_classes = len(np.unique(y))
assert type(X)==np.ndarray
assert type(y)==np.ndarray
# seed_kfold defines the data shuflling
# --- args.seed_kfold = args.repeat_id ---> args.repeat_id defines the data sluffling
# args.test_split defines the fold to pick
X_train_and_valid, X_test, y_train_and_valid, y_test = compute_stratified_splits(
X, y, cv_folds=args.cv_folds, seed_kfold=args.seed_kfold, split_id=args.test_split)
# randomly pick a validation set from the training_and_val data
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_and_valid, y_train_and_valid,
test_size = args.valid_percentage,
random_state = args.seed_validation,
stratify = y_train_and_valid
)
print(f"Train size: {X_train.shape[0]}\n")
print(f"Valid size: {X_valid.shape[0]}\n")
print(f"Test size: {X_test.shape[0]}\n")
assert X_train.shape[0] + X_valid.shape[0] + X_test.shape[0] == X.shape[0]
assert set(y_train).union(set(y_valid)).union(set(y_test)) == set(y)
if args.train_on_full_data:
# Train on the entire training set (train + validation)
# Make validation and test sets the same
return DatasetModule(args, X_train_and_valid, y_train_and_valid, X_test, y_test, X_test, y_test)
else:
return DatasetModule(args, X_train, y_train, X_valid, y_valid, X_test, y_test) | 12,510 | 28.231308 | 155 | py |
WPFS | WPFS-main/src/weight_predictor_network.py | from torch import nn
class WeightPredictorNetwork(nn.Module):
def __init__(self, args, embedding_matrix):
"""
WPN outputs a "virtual" weight matrix W
:param nn.Tensor(D, M) embedding_matrix: matrix with the embeddings (D = number of features, M = embedding size)
"""
super().__init__()
print(f"Initializing WPN with embedding_matrix of size {embedding_matrix.size()}")
self.args = args
self.register_buffer('embedding_matrix', embedding_matrix) # store the static embedding_matrix
layers = []
prev_dimension = args.wpn_embedding_size
for i, dim in enumerate(args.wpn_layers):
if i == len(args.wpn_layers)-1: # last layer
layer = nn.Linear(prev_dimension, dim)
nn.init.uniform_(layer.weight, -0.01, 0.01) # same initialization as in the Diet Network paper official implementation
layers.append(layer)
layers.append(nn.Tanh())
else:
layer = nn.Linear(prev_dimension, dim)
nn.init.kaiming_normal_(layer.weight, a=0.01, mode='fan_out', nonlinearity='leaky_relu')
layers.append(layer)
layers.append(nn.LeakyReLU())
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.Dropout(args.dropout_rate))
prev_dimension = dim
self.wpn = nn.Sequential(*layers)
def forward(self):
W = self.wpn(self.embedding_matrix) # W has size (D x K)
return W.T # size K x D | 1,342 | 32.575 | 123 | py |
WPFS | WPFS-main/src/models.py | import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import numpy as np
from sklearn.metrics import balanced_accuracy_score
from sparsity_network import SparsityNetwork
from weight_predictor_network import WeightPredictorNetwork
def get_labels_lists(outputs):
all_y_true, all_y_pred = [], []
for output in outputs:
all_y_true.extend(output['y_true'].detach().cpu().numpy().tolist())
all_y_pred.extend(output['y_pred'].detach().cpu().numpy().tolist())
return all_y_true, all_y_pred
def compute_all_metrics(args, y_true, y_pred):
metrics = {}
metrics['balanced_accuracy'] = balanced_accuracy_score(y_true, y_pred)
return metrics
def detach_tensors(tensors):
"""
Detach losses
"""
if type(tensors)==list:
detached_tensors = list()
for tensor in tensors:
detach_tensors.append(tensor.detach())
elif type(tensors)==dict:
detached_tensors = dict()
for key, tensor in tensors.items():
detached_tensors[key] = tensor.detach()
else:
raise Exception("tensors must be a list or a dict")
return detached_tensors
def reshape_batch(batch):
"""
When the dataloaders create multiple samples from one original sample, the input has size (batch_size, no_samples, D)
This function reshapes the input from (batch_size, no_samples, D) to (batch_size * no_samples, D)
"""
x, y = batch
x = x.reshape(-1, x.shape[-1])
y = y.reshape(-1)
return x, y
def create_model(args, data_module=None):
"""
Function to create the model. It firstly creates the components (e.g., FeatureExtractor)
and then assambles them
"""
pl.seed_everything(args.seed_model_init, workers=True)
### create embedding matrices
wpn_embedding_matrix = data_module.get_embedding_matrix(args.wpn_embedding_type, args.wpn_embedding_size)
if args.wpn_embedding_type==args.sparsity_gene_embedding_type and args.wpn_embedding_size==args.sparsity_gene_embedding_size:
spn_embedding_matrix = wpn_embedding_matrix
else:
spn_embedding_matrix = data_module.get_embedding_matrix(args.sparsity_gene_embedding_type, args.sparsity_gene_embedding_size)
#### Create model instance
if args.model == 'mlp':
first_layer = FirstLinearLayer(args, is_diet_layer=False, sparsity=None)
return GeneralNeuralNetwork(args, first_layer, None)
if args.model == 'dietnetworks':
first_layer = FirstLinearLayer(args, is_diet_layer=True, sparsity=None, wpn_embedding_matrix=wpn_embedding_matrix)
decoder = Decoder(args, WeightPredictorNetwork(args, wpn_embedding_matrix))
return GeneralNeuralNetwork(args, first_layer, decoder)
elif args.model == 'wpfs':
assert args.feature_extractor_dims[0] == args.wpn_layers[-1], "The output size of WPN must be the same as the first layer of the feature extractor."
assert data_module != None, "You must specify a data_module to compute the feature embeddings"
first_layer = FirstLinearLayer(args, is_diet_layer=True, sparsity=True,
wpn_embedding_matrix=wpn_embedding_matrix, spn_embedding_matrix=spn_embedding_matrix)
return GeneralNeuralNetwork(args, first_layer, None)
elif args.model=='cae': # Supervised Autoencoder
concrete_layer = ConcreteLayer(args, args.num_features, args.feature_extractor_dims[0])
return GeneralNeuralNetwork(args, concrete_layer, None)
elif args.model=='fsnet':
concrete_layer = ConcreteLayer(args, args.num_features, args.feature_extractor_dims[0],
is_diet_layer=True, wpn_embedding_matrix=wpn_embedding_matrix)
decoder = Decoder(args, WeightPredictorNetwork(args, wpn_embedding_matrix))
return GeneralNeuralNetwork(args, concrete_layer, decoder)
else:
raise Exception("Model not implemented")
def create_linear_layers(args, layer_sizes, layers_for_hidden_representation):
"""
Args
- layer_sizes: list of the sizes of the sizes of the linear layers
- layers_for_hidden_representation: number of layers of the first part of the encoder (used to output the input for the decoder)
Returns
Two lists of Pytorch Modules (e.g., Linear, BatchNorm1d, Dropout)
- encoder_first_part
- encoder_second_part
"""
encoder_first_part = []
encoder_second_part = []
for i, (dim_prev, dim) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
if i < layers_for_hidden_representation: # first part of the encoder
encoder_first_part.append(nn.Linear(dim_prev, dim))
encoder_first_part.append(nn.LeakyReLU())
encoder_first_part.append(nn.BatchNorm1d(dim))
encoder_first_part.append(nn.Dropout(args.dropout_rate))
else: # second part of the encoder
encoder_second_part.append(nn.Linear(dim_prev, dim))
encoder_second_part.append(nn.LeakyReLU())
encoder_second_part.append(nn.BatchNorm1d(dim))
encoder_second_part.append(nn.Dropout(args.dropout_rate))
return encoder_first_part, encoder_second_part
class FirstLinearLayer(nn.Module):
"""
First linear layer (with activation, batchnorm and dropout), with the ability to include:
- diet layer (i.e., there's a weight predictor network which predicts the weight matrix)
- sparsity network (i.e., there's a sparsity network which outputs sparsity weights)
"""
def __init__(self, args, is_diet_layer, sparsity, wpn_embedding_matrix=None, spn_embedding_matrix=None):
"""
If is_diet_layer==None and sparsity==None, this layers acts as a standard linear layer
"""
super().__init__()
self.args = args
self.is_diet_layer = is_diet_layer
self.sparsity = sparsity
# DIET LAYER
if is_diet_layer:
# if diet layer, then initialize a weight predictor network
self.wpn = WeightPredictorNetwork(args, wpn_embedding_matrix)
else:
# standard linear layer
self.weights_first_layer = nn.Parameter(torch.zeros(args.feature_extractor_dims[0], args.num_features))
nn.init.kaiming_normal_(self.weights_first_layer, a=0.01, mode='fan_out', nonlinearity='leaky_relu')
# auxiliary layer after the matrix multiplication
self.bias_first_layer = nn.Parameter(torch.zeros(args.feature_extractor_dims[0]))
self.layers_after_matrix_multiplication = nn.Sequential(*[
nn.LeakyReLU(),
nn.BatchNorm1d(args.feature_extractor_dims[0]),
nn.Dropout(args.dropout_rate)
])
# SPARSITY REGULARIZATION for the first layer
if sparsity:
print("Creating Sparsity network")
self.sparsity_model = SparsityNetwork(args, spn_embedding_matrix)
else:
self.sparsity_model = None
def forward(self, x):
"""
Input:
x: (batch_size x num_features)
"""
# first layer
W = self.wpn() if self.is_diet_layer else self.weights_first_layer # W has size (K x D)
if self.sparsity_model==None:
all_sparsity_weights = None
hidden_rep = F.linear(x, W, self.bias_first_layer)
else:
all_sparsity_weights = self.sparsity_model() # Tensor (D, )
assert all_sparsity_weights.shape[0]==self.args.num_features and len(all_sparsity_weights.shape)==1
W = torch.matmul(W, torch.diag(all_sparsity_weights))
hidden_rep = F.linear(x, W, self.bias_first_layer)
return self.layers_after_matrix_multiplication(hidden_rep), all_sparsity_weights
class ConcreteLayer(nn.Module):
"""
Implementation of a concrete layer from paper "Concrete Autoencoders for Differentiable Feature Selection and Reconstruction"
"""
def __init__(self, args, input_dim, output_dim, is_diet_layer=False, wpn_embedding_matrix=None):
"""
- input_dim (int): dimension of the input
- output_dim (int): number of neurons in the layer
"""
super().__init__()
self.args = args
self.input_dim = input_dim
self.output_dim = output_dim
self.temp_start = 10
self.temp_end = 0.01
# the iteration is used in annealing the temperature
# it's increased with every call to sample during training
self.current_iteration = 0
self.anneal_iterations = args.concrete_anneal_iterations # maximum number of iterations for the temperature optimization
self.is_diet_layer = is_diet_layer
if is_diet_layer:
# if diet layer, then initialize a weight predictor matrix
assert wpn_embedding_matrix is not None
self.wpn = WeightPredictorNetwork(args, wpn_embedding_matrix)
else:
# alphas (output_dim x input_dim) - learnable parameters for each neuron
# alphas[i] = parameters of neuron i
self.alphas = nn.Parameter(torch.zeros(output_dim, input_dim), requires_grad=True)
torch.nn.init.xavier_normal_(self.alphas, gain=1) # Glorot normalization, following the original CAE implementation
def get_temperature(self):
# compute temperature
if self.current_iteration >= self.anneal_iterations:
return self.temp_end
else:
return self.temp_start * (self.temp_end / self.temp_start) ** (self.current_iteration / self.anneal_iterations)
def sample(self):
"""
Sample from the concrete distribution.
"""
# Increase the iteration counter during training
if self.training:
self.current_iteration += 1
temperature = self.get_temperature()
alphas = self.wpn() if self.is_diet_layer else self.alphas # alphas is a K x D matrix
# sample from the concrete distribution
if self.training:
samples = F.gumbel_softmax(alphas, tau=temperature, hard=False) # size K x D
assert samples.shape == (self.output_dim, self.input_dim)
else: # sample using argmax
index_max_alphas = torch.argmax(alphas, dim=1) # size K
samples = torch.zeros(self.output_dim, self.input_dim).cuda()
samples[torch.arange(self.output_dim), index_max_alphas] = 1.
return samples
def forward(self, x):
"""
- x (batch_size x input_dim)
"""
mask = self.sample() # size (number_neurons x input_dim)
x = torch.matmul(x, mask.T) # size (batch_size, number_neurons)
return x, None # return additional None for compatibility
class Decoder(nn.Module):
def __init__(self, args, wpn):
super().__init__()
assert wpn!=None, "The decoder is used only with a WPN (because it's only used within the DietNetwork)"
self.wpn = wpn
self.bias = nn.Parameter(torch.zeros(args.num_features,))
def forward(self, hidden_rep):
W = self.wpn().T # W has size D x K
return F.linear(hidden_rep, W, self.bias)
class GeneralNeuralNetwork(pl.LightningModule):
def __init__(self, args, first_layer, decoder):
"""
General neural network that can be instantiated as WPFS, DietNetwork, FsNet or CAE
:param args: arguments from the command line
:param first_layer: first layer of the network (it can be a Linear layer, a Concrete layer, or a DietLayer)
:param decoder: decoder of the network (optional, used only for DietNetworks and FsNet)
"""
super().__init__()
self.args = args
self.log_test_key = None
self.learning_rate = args.lr
self.first_layer = first_layer
encoder_first_layers, encoder_second_layers = create_linear_layers(
args, args.feature_extractor_dims, args.layers_for_hidden_representation-1) # the -1 in (args.layers_for_hidden_representation - 1) is because we don't consider the first layer
self.encoder_first_layers = nn.Sequential(*encoder_first_layers)
self.encoder_second_layers = nn.Sequential(*encoder_second_layers)
self.classification_layer = nn.Linear(args.feature_extractor_dims[-1], args.num_classes)
self.decoder = decoder
def forward(self, x):
x, sparsity_weights = self.first_layer(x) # pass through first layer
x = self.encoder_first_layers(x) # pass throught the first part of the following layers
x_hat = self.decoder(x) if self.decoder else None # reconstruction
x = self.encoder_second_layers(x)
y_hat = self.classification_layer(x) # classification, returns logits
return y_hat, x_hat, sparsity_weights
def compute_loss(self, y_true, y_hat, x, x_hat, sparsity_weights):
losses = {}
losses['cross_entropy'] = F.cross_entropy(input=y_hat, target=y_true, weight=torch.tensor(self.args.class_weights, device=self.device))
losses['reconstruction'] = self.args.gamma * F.mse_loss(x_hat, x, reduction='mean') if self.decoder else torch.zeros(1, device=self.device)
### sparsity loss
if sparsity_weights is None:
losses['sparsity'] = torch.tensor(0., device=self.device)
else:
losses['sparsity'] = self.args.sparsity_regularizer_hyperparam * torch.sum(sparsity_weights)
losses['total'] = losses['cross_entropy'] + losses['reconstruction'] + losses['sparsity']
return losses
def log_losses(self, losses, key, dataloader_name=""):
self.log(f"{key}/total_loss{dataloader_name}", losses['total'].item())
self.log(f"{key}/reconstruction_loss{dataloader_name}", losses['reconstruction'].item())
self.log(f"{key}/cross_entropy_loss{dataloader_name}", losses['cross_entropy'].item())
self.log(f"{key}/sparsity_loss{dataloader_name}", losses['sparsity'].item())
def log_epoch_metrics(self, outputs, key, dataloader_name=""):
y_true, y_pred = get_labels_lists(outputs)
self.log(f'{key}/balanced_accuracy{dataloader_name}', balanced_accuracy_score(y_true, y_pred))
def training_step(self, batch, batch_idx):
x, y_true = batch
y_hat, x_hat, sparsity_weights = self.forward(x)
losses = self.compute_loss(y_true, y_hat, x, x_hat, sparsity_weights)
self.log_losses(losses, key='train')
return {
'loss': losses['total'],
'losses': detach_tensors(losses),
'y_true': y_true,
'y_pred': torch.argmax(y_hat, dim=1)
}
def training_epoch_end(self, outputs):
self.log_epoch_metrics(outputs, 'train')
def validation_step(self, batch, batch_idx, dataloader_idx=0):
"""
- dataloader_idx (int) tells which dataloader is the `batch` coming from
"""
x, y_true = reshape_batch(batch)
y_hat, x_hat, sparsity_weights = self.forward(x)
losses = self.compute_loss(y_true, y_hat, x, x_hat, sparsity_weights)
return {
'losses': detach_tensors(losses),
'y_true': y_true,
'y_pred': torch.argmax(y_hat, dim=1)
}
def validation_epoch_end(self, outputs):
losses = {
'total': np.mean([output['losses']['total'].item() for output in outputs]),
'cross_entropy': np.mean([output['losses']['cross_entropy'].item() for output in outputs]),
'sparsity': np.mean([output['losses']['sparsity'].item() for output in outputs]),
'reconstruction': np.mean([output['losses']['reconstruction'].item() for output in outputs])
}
self.log_losses(losses, key='valid')
self.log_epoch_metrics(outputs, key='valid')
def test_step(self, batch, batch_idx, dataloader_idx=0):
x, y_true = reshape_batch(batch)
y_hat, x_hat, sparsity_weights = self.forward(x)
losses = self.compute_loss(y_true, y_hat, x, x_hat, sparsity_weights)
return {
'losses': detach_tensors(losses),
'y_true': y_true,
'y_pred': torch.argmax(y_hat, dim=1),
'y_hat': y_hat.detach().cpu().numpy(),
}
def test_epoch_end(self, outputs):
### Save losses
losses = {
'total': np.mean([output['losses']['total'].item() for output in outputs]),
'cross_entropy': np.mean([output['losses']['cross_entropy'].item() for output in outputs]),
'sparsity': np.mean([output['losses']['sparsity'].item() for output in outputs]),
'reconstruction': np.mean([output['losses']['reconstruction'].item() for output in outputs])
}
self.log_losses(losses, key=self.log_test_key)
self.log_epoch_metrics(outputs, self.log_test_key)
def configure_optimizers(self):
params = self.parameters()
optimizer = torch.optim.AdamW(params, lr=self.learning_rate, weight_decay=self.args.weight_decay, betas=[0.9, 0.98])
if self.args.lr_scheduler == None:
return optimizer
else:
if self.args.lr_scheduler == 'cosine_warm_restart':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,
T_0 = self.args.cosine_warm_restart_t_0,
eta_min = self.args.cosine_warm_restart_eta_min,
verbose=True)
elif self.args.lr_scheduler == 'lambda':
def scheduler(epoch):
if epoch < 500:
return 0.995 ** epoch
else:
return 0.1
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
scheduler)
else:
raise Exception()
return {
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': lr_scheduler,
'monitor': 'valid/cross_entropy_loss',
'interval': 'step',
'frequency': self.args.val_check_interval,
'name': 'lr_scheduler'
}
} | 16,152 | 34.423246 | 179 | py |
outbrain-click-prediction-kaggle | outbrain-click-prediction-kaggle-master/5_best_mtv_features_xgb.py | import os
import pandas as pd
import numpy as np
import xgboost as xgb
df_all = feather.read_dataframe('tmp/clicks_train_50_50.feather')
df_test = feather.read_dataframe('tmp/clicks_test.feather')
df_train_0 = df_all[df_all.fold == 0].reset_index(drop=1)
df_train_1 = df_all[df_all.fold == 1].reset_index(drop=1)
del df_train_0['fold'], df_train_1['fold'], df_all
features = list(pd.read_csv('categorical_features.txt', header=None)[0])
# training a small model to select best features
# first, load the data
df_train = df_train_0[:2000000].copy()
df_val = df_train_1[:1000000].copy()
del df_train_0, df_train_1
for f in features:
print('loading data for %s...' % f)
pred_0 = 'features/mte/%s_pred_0.npy' % f
pred_1 = 'features/mte/%s_pred_1.npy' % f
rank_0 = 'features/mte/%s_pred_rank_0.npy' % f
rank_1 = 'features/mte/%s_pred_rank_1.npy' % f
df_train[f] = np.load(pred_0)[:2000000]
df_val[f] = np.load(pred_1)[:1000000]
df_train[f + '_rank'] = np.load(rank_0)[:2000000]
df_val[f + '_rank'] = np.load(rank_1)[:1000000]
ignore = {'display_id', 'ad_id', 'clicked'}
columns = sorted(set(df_train.columns) - ignore)
X_t = df_train[columns].values
y_t = df_train.clicked.values
X_v = df_val[columns].values
y_v = df_val.clicked.values
dtrain = xgb.DMatrix(X_t, y_t, feature_names=columns)
dval = xgb.DMatrix(X_v, y_v, feature_names=columns)
watchlist = [(dtrain, 'train'), (dval, 'val')]
del X_t, X_v, y_t, y_v
# train a small model and save only important feautures
xgb_pars = {
'eta': 0.3,
'gamma': 0.0,
'max_depth': 6,
'min_child_weight': 100,
'max_delta_step': 0,
'subsample': 1,
'colsample_bytree': 0.6,
'colsample_bylevel': 1,
'lambda': 1,
'alpha': 0,
'tree_method': 'approx',
'objective': 'binary:logistic',
'eval_metric': 'auc',
'nthread': 12,
'seed': 42,
'silent': 1
}
model = xgb.train(xgb_pars, dtrain, num_boost_round=20, verbose_eval=1,
evals=watchlist)
scores = model.get_score(importance_type='gain')
useful_features = [f for (f, s) in scores.items() if s >= 50.0]
# now let's put everything together in a data frame and save the result
for f in useful_features:
if '_rank' in f:
base_name = f[:-5] + '_pred_rank'
else:
base_name = f + '_pred'
df_train_0[f] = np.load('features/mtv/%s_0.npy' % base_name)
df_train_1[f] = np.load('features/mtv/%s_1.npy' % base_name)
df_test[f] = np.load('features/mtv/%s_test.npy' % base_name)
# also add the doc features
df_train_0_doc = feather.load_dataframe('features/docs_df_train_0.feather')
df_train_1_doc = feather.load_dataframe('features/docs_df_train_1.feather')
df_test_doc = feather.load_dataframe('features/docs_df_test.feather')
doc_features = ['doc_idf_dot', 'doc_idf_dot_lsa', 'doc_idf_cos',
'doc_idf_dot_rank', 'doc_idf_dot_lsa_rank', 'doc_idf_cos_rank']
for f in doc_features:
df_train_0[f] = df_train_0_doc[f]
df_train_1[f] = df_train_1_doc[f]
df_test[f] = df_test_doc[f]
df_train_0['doc_known_views'] = np.load('features/doc_known_views_0.npy')
df_train_1['doc_known_views'] = np.load('features/doc_known_views_1.npy')
df_test['doc_known_views'] = np.load('features/doc_known_views_test.npy)
# now save evertyhing
feather.write_dataframe(df_train_0, 'tmp/mtv_df_train_0.feather')
feather.write_dataframe(df_train_1, 'tmp/mtv_df_train_1.feather')
feather.write_dataframe(df_test, 'tmp/mtv_df_test.feather')
| 3,479 | 27.52459 | 79 | py |
outbrain-click-prediction-kaggle | outbrain-click-prediction-kaggle-master/4_categorical_data_join.py | # coding: utf-8
import os
import pandas as pd
import numpy as np
import xgboost as xgb
import feather
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
from itertools import combinations
df_all = feather.read_dataframe('tmp/clicks_train_50_50.feather')
df_test = feather.read_dataframe('tmp/clicks_test.feather')
# event features:
# - geo
# - time
# - user
# - platform
df_display = pd.read_csv('../data/events.csv')
df_display.geo_location.fillna('', inplace=1)
# geo features
df_geo = df_display.geo_location.str.split('>', expand=True)
df_geo.fillna('*', inplace=1)
df_geo.columns = ['geo_0', 'geo_1', 'geo_2']
del df_geo['geo_2']
df_geo['geo_second_lev'] = df_geo['geo_0'] + '>' + df_geo['geo_1']
del df_geo['geo_1']
df_display['geo_0'] = df_geo['geo_0']
df_display['geo_1'] = df_geo['geo_second_lev']
df_display.rename(columns={'geo_location': 'geo_2'}, inplace=1)
del df_geo
# time features
ts = (df_display.timestamp + 1465876799998) / 1000 - (4 * 60 * 60)
df_display.timestamp = pd.to_datetime(ts, unit='s')
dt = df_display.timestamp.dt
df_display['day'] = dt.dayofweek.astype('str')
df_display['hour'] = dt.hour.astype('str')
del df_display['timestamp'], dt, ts
# platform
df_display.platform = df_display.platform.astype('str')
del df_display['display_id']
# user: convert to base 32 to occupy less space
df_display['user_id'] = LabelEncoder().fit_transform(df_display.uuid)
del df_display['uuid']
def base32(i):
return np.base_repr(i, base=32)
df_display['user_id'] = df_display['user_id'].apply(base32)
# document features:
# - top category
# - top entity
# - top topic
# - meta: publisher, source
df_ads = pd.read_csv('../data/promoted_content.csv')
ad_to_idx = dict(zip(df_ads.ad_id, df_ads.index))
ads_docs = set(df_display.document_id)
ads_docs.update(df_ads.document_id)
# document categories
df_doc_cat = pd.read_csv('../data/documents_categories.csv')
df_doc_cat = df_doc_cat.drop_duplicates(subset='document_id', keep='first')
df_doc_cat = df_doc_cat[df_doc_cat.confidence_level >= 0.8]
df_doc_cat = df_doc_cat[df_doc_cat.document_id.isin(ads_docs)]
cat_counts = df_doc_cat.category_id.value_counts()
freq_cats = set(cat_counts[cat_counts >= 5].index)
df_doc_cat = df_doc_cat[df_doc_cat.category_id.isin(freq_cats)]
doc_top_cat = dict(zip(df_doc_cat.document_id, df_doc_cat.category_id))
del freq_cats, cat_counts, df_doc_cat
# document entities: hash them to occupy less space
D = 2 ** 24
def entity_name_reduce(entity):
return '%x' % abs(hash(entity) % D)
df_doc_entities = pd.read_csv('../data/documents_entities.csv')
df_doc_entities = df_doc_entities[df_doc_entities.confidence_level >= 0.8]
df_doc_entities = df_doc_entities[df_doc_entities.document_id.isin(ads_docs)]
df_doc_entities = df_doc_entities.drop_duplicates(subset='document_id', keep='first')
df_doc_entities = df_doc_entities.reset_index(drop=1)
df_doc_entities.entity_id = df_doc_entities.entity_id.apply(entity_name_reduce)
entity_counts = df_doc_entities.entity_id.value_counts()
freq_entites = set(entity_counts[entity_counts >= 5].index)
df_doc_entities = df_doc_entities[df_doc_entities.entity_id.isin(freq_entites)]
doc_top_entity = dict(zip(df_doc_entities.document_id, df_doc_entities.entity_id))
del df_doc_entities, entity_counts, freq_entites
# document topics
df_doc_topics = pd.read_csv('../data/documents_topics.csv')
df_doc_topics = df_doc_topics[df_doc_topics.confidence_level >= 0.8]
df_doc_topics = df_doc_topics[df_doc_topics.document_id.isin(ads_docs)]
df_doc_topics = df_doc_topics.drop_duplicates(subset='document_id', keep='first')
df_doc_topics = df_doc_topics.reset_index(drop=1)
topic_cnt = df_doc_topics.topic_id.value_counts()
freq_topics = set(topic_cnt[topic_cnt >= 5].index)
df_doc_topics = df_doc_topics[df_doc_topics.topic_id.isin(freq_topics)]
doc_top_topic = dict(zip(df_doc_topics.document_id, df_doc_topics.topic_id))
del df_doc_topics, topic_cnt, freq_topics
# document meta info
df_doc_meta = pd.read_csv('../data/documents_meta.csv')
df_doc_meta = df_doc_meta[df_doc_meta.document_id.isin(ads_docs)]
del df_doc_meta['publish_time']
df_doc_meta.source_id.fillna(0, inplace=1)
df_doc_meta.source_id = df_doc_meta.source_id.astype('uint32')
df_doc_meta.publisher_id.fillna(0, inplace=1)
df_doc_meta.publisher_id = df_doc_meta.publisher_id.astype('uint32')
df_doc_meta = df_doc_meta.reset_index(drop=1)
meta_idx = dict(zip(df_doc_meta.document_id, df_doc_meta.index))
# to avoid confusion, let's rename document_id columns
df_display.rename(columns={'document_id': 'on_document_id'}, inplace=1)
df_ads.rename(columns={'document_id': 'ad_document_id'}, inplace=1)
# we will do everything in batches
def prepare_batch(batch):
batch = batch.reset_index(drop=1)
batch_display = df_display.iloc[batch.display_id - 1].reset_index(drop=1)
batch_ad_ids = batch.ad_id.apply(ad_to_idx.get)
batch_ads = df_ads.iloc[batch_ad_ids].reset_index(drop=1)
del batch_ads['ad_id']
batch_meta_idx = batch_ads.ad_document_id.apply(meta_idx.get)
batch_ad_doc_meta = df_doc_meta.iloc[batch_meta_idx].reset_index(drop=1)
batch_ad_doc_meta['top_entity'] = \
batch_ad_doc_meta.document_id.apply(lambda did: doc_top_entity.get(did, 'unk'))
batch_ad_doc_meta['top_topic'] = \
batch_ad_doc_meta.document_id.apply(lambda did: doc_top_topic.get(did, 'unk'))
batch_ad_doc_meta['top_cat'] = \
batch_ad_doc_meta.document_id.apply(lambda did: doc_top_cat.get(did, 'unk'))
del batch_ad_doc_meta['document_id']
batch_ad_doc_meta.columns = ['ad_doc_%s' % c for c in batch_ad_doc_meta.columns]
batch_meta_idx = batch_display.on_document_id.apply(meta_idx.get)
batch_on_doc_meta = df_doc_meta.iloc[batch_meta_idx].reset_index(drop=1)
batch_on_doc_meta['top_entity'] = \
batch_on_doc_meta.document_id.apply(lambda did: doc_top_entity.get(did, 'unk'))
batch_on_doc_meta['top_topic'] = \
batch_on_doc_meta.document_id.apply(lambda did: doc_top_topic.get(did, 'unk'))
batch_on_doc_meta['top_cat'] = \
batch_on_doc_meta.document_id.apply(lambda did: doc_top_cat.get(did, 'unk'))
del batch_on_doc_meta['document_id']
batch_on_doc_meta.columns = ['on_doc_%s' % c for c in batch_on_doc_meta.columns]
joined_batch = pd.concat([batch, batch_ads, batch_display,
batch_ad_doc_meta, batch_on_doc_meta], axis=1)
for c in ['ad_doc_source_id', 'ad_doc_publisher_id', 'ad_document_id', 'ad_doc_top_cat',
'on_doc_source_id', 'on_doc_publisher_id', 'on_document_id', 'on_doc_top_cat',
'ad_id', 'campaign_id', 'advertiser_id']:
joined_batch[c] = joined_batch[c].astype('str')
joined_batch.fillna('unk', inplace=1)
all_features = set(joined_batch.columns) - {'clicked', 'fold', 'display_id'}
for c in sorted(all_features):
if 'on_doc' in c or 'geo' in c or c in {'day', 'hour', 'user_id', 'ad_id'}:
continue
for c2 in ['day', 'hour', 'geo_0', 'geo_1', 'geo_2']:
joined_batch['%s_%s' % (c, c2)] = joined_batch[c] + '_' + joined_batch[c2]
two_way_comb = sorted(all_features - {'day', 'hour', 'geo_0', 'geo_1', 'geo_2'})
combs = list(combinations(two_way_comb, 2))
for c1, c2 in combs:
if 'on_doc' in c1 and 'on_doc' in c2:
continue
joined_batch['%s_%s' % (c1, c2)] = joined_batch[c1].astype('str') + '_' + joined_batch[c2].astype('str')
return joined_batch
def append_to_csv(batch, csv_file):
props = dict(encoding='utf-8', index=False)
if not os.path.exists(csv_file):
batch.to_csv(csv_file, **props)
else:
batch.to_csv(csv_file, mode='a', header=False, **props)
def delete_file_if_exists(filename):
if os.path.exists(filename):
os.remove(filename)
def chunk_dataframe(df, n):
for i in range(0, len(df), n):
yield df.iloc[i:i+n]
# apply to train
df = feather.read_dataframe('tmp/clicks_train_50_50.feather')
delete_file_if_exists('tmp/categorical_joined_train.csv')
for batch in tqdm(chunk_dataframe(df, n=100000)):
batch = prepare_batch(batch)
append_to_csv(batch, 'tmp/categorical_joined_train.csv')
# apply to test
df = feather.read_dataframe('tmp/clicks_test.feather')
delete_file_if_exists('tmp/categorical_joined_test.csv')
for batch in tqdm(chunk_dataframe(df, n=100000)):
batch = prepare_batch(batch)
append_to_csv(batch, 'tmp/categorical_joined_test.csv')
| 8,560 | 29.906137 | 112 | py |
outbrain-click-prediction-kaggle | outbrain-click-prediction-kaggle-master/7_ensemble_xgb.py | import pandas as pd
import numpy as np
import xgboost as xgb
import feather
import gc
# prapare the data matrices
df_train_0 = feather.read_dataframe('tmp/df_train_0_ensemble.feather')
ignore = {'display_id', 'ad_id', 'clicked', 'fold'}
columns = sorted(set(df_train_0.columns) - ignore)
group0_sizes = df_train_0.display_id.value_counts(sort=False)
group0_sizes.sort_index(inplace=1)
group0_sizes = group0_sizes.values.astype('uint8')
y_0 = df_train_0.clicked.values
X_0 = df_train_0[columns].values
del df_train_0
gc.collect()
dfold0 = xgb.DMatrix(X_0, y_0, feature_names=columns)
dfold0.set_group(group0_sizes)
del X_0, y_0
gc.collect()
df_train_1 = feather.read_dataframe('tmp/df_train_1_ensemble.feather')
group1_sizes = df_train_1.display_id.value_counts(sort=False)
group1_sizes.sort_index(inplace=1)
group1_sizes = group1_sizes.values.astype('uint8')
y_1 = df_train_1.clicked.values
X_1 = df_train_1[columns].values
del df_train_1
gc.collect()
dfold1 = xgb.DMatrix(X_1, y_1, feature_names=columns)
dfold1.set_group(group1_sizes)
del X_1, y_1
gc.collect()
watchlist = [(dfold0, 'train'), (dfold1, 'val')]
# train the model
n_estimators = 1000
xgb_pars = {
'eta': 0.15,
'gamma': 0.0,
'max_depth': 8,
'min_child_weight': 1,
'max_delta_step': 0,
'subsample': 0.6,
'colsample_bytree': 0.6,
'colsample_bylevel': 1,
'lambda': 1,
'alpha': 0,
'tree_method': 'approx',
'objective': 'rank:pairwise',
'eval_metric': 'map@12',
'nthread': 12,
'seed': 42,
'silent': 1
}
# train the model
model = xgb.train(xgb_pars, dfold0, num_boost_round=n_estimators,
verbose_eval=1, evals=watchlist)
del dfold0, dfold1, watchlist
gc.collect()
# test predict
df_test = feather.read_dataframe('tmp/df_test_ensemble.feather')
group_test_sizes = df_test.display_id.value_counts(sort=False)
group_test_sizes.sort_index(inplace=1)
group_test_sizes = group_test_sizes.values.astype('uint8')
X_test = df_test[columns].values
df_test = df_test[['display_id', 'ad_id']].copy()
dtest = xgb.DMatrix(X_test, feature_names=columns)
dtest.set_group(group_test_sizes)
del X_test
test_pred = model.predict(dtest)
df_test['pred'] = test_pred
feather.write_dataframe(df_test, 'final_submission.feather')
# now run `Rscript submission.R final_submission.feather xgb_submission.csv` | 2,359 | 21.056075 | 76 | py |
outbrain-click-prediction-kaggle | outbrain-click-prediction-kaggle-master/5_mtv_xgb.py | import pandas as pd
import numpy as np
import xgboost as xgb
import feather
import gc
df_train_1 = feather.read_dataframe('tmp/mtv_df_train_1.feather')
features = sorted(set(df_train_1.columns) - {'display_id', 'clicked'})
y_1 = df_train_1.clicked.values
X_1 = df_train_1[features].values
del df_train_1
dfold1 = xgb.DMatrix(X_1, y_1, feature_names=features)
del X_1, y_1
gc.collect()
df_train_0 = feather.read_dataframe('tmp/mtv_df_train_0.feather')
y_0 = df_train_0.clicked.values
X_0 = df_train_0[features].values
del df_train_0
gc.collect()
dfold0 = xgb.DMatrix(X_0, y_0, feature_names=features)
del X_0, y_0
gc.collect()
# training a model
n_estimators = 100
xgb_pars = {
'eta': 0.2,
'gamma': 0.5,
'max_depth': 6,
'min_child_weight': 1,
'max_delta_step': 0,
'subsample': 1,
'colsample_bytree': 0.5,
'colsample_bylevel': 0.5,
'lambda': 1,
'alpha': 0,
'tree_method': 'approx',
'objective': 'binary:logistic',
'eval_metric': 'auc',
'nthread': 20,
'seed': 42,
'silent': 1
}
print('training model on fold 0...')
watchlist = [(dfold0, 'train'), (dfold1, 'val')]
model_fold1 = xgb.train(xgb_pars, dfold0, num_boost_round=n_estimators,
verbose_eval=1, evals=watchlist)
print('training model on fold 1...')
watchlist = [(dfold1, 'train'), (dfold0, 'val')]
model_fold0 = xgb.train(xgb_pars, dfold1, num_boost_round=n_estimators,
verbose_eval=1, evals=watchlist)
pred0 = model_fold0.predict(dfold0)
pred1 = model_fold1.predict(dfold1)
np.save('predictions/xgb_mtv_pred0.npy', pred0)
np.save('predictions/xgb_mtv_pred1.npy', pred1)
# saving the training leaves
leaves0 = model_0.predict(dfold0, pred_leaf=True).astype('uint8')
np.save('tmp/xgb_model_0_leaves.npy', leaves0)
del leaves0
gc.collect()
leaves1 = model_1.predict(dfold1, pred_leaf=True).astype('uint8')
np.save('tmp/xgb_model_1_leaves.npy', leaves1)
del leaves1
gc.collect()
# making prediction for test and getting the leaves
df_test = feather.read_dataframe('tmp/mtv_df_test.feather')
X_test = df_test[features].values
del df_test
gc.collect()
dtest = xgb.DMatrix(X_test, feature_names=features)
del X_test
gc.collect()
pred0_test = model_0.predict(dtest)
pred1_test = model_1.predict(dtest)
pred_test = (pred0_test + pred1_test) / 2
np.save('predictions/xgb_mtv_pred_test.npy', pred_test)
# predicting leaves for test
leaves0_test = model_0.predict(dtest, pred_leaf=True).astype('uint8')
np.save('tmp/xgb_model_0_test_leaves.npy', leaves0_test)
del leaves0_test
gc.collect()
leaves1_test = model_1.predict(dtest, pred_leaf=True).astype('uint8')
np.save('tmp/xgb_model_1_test_leaves.npy', leaves1_test)
del leaves1_test
gc.collect() | 2,747 | 20.637795 | 72 | py |
BraVL | BraVL-master/BraVL_EEG/run_epochs_trimodal.py | import os
import numpy as np
import math
import random
import torch
from torch.autograd import Variable
import torch.distributions as dist
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from divergence_measures.kl_div import calc_kl_divergence
from sklearn.svm import SVC
from sklearn.metrics import top_k_accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
import csv
from utils import utils
from utils.TBLogger import TBLogger
torch.set_default_tensor_type(torch.DoubleTensor)
TINY = 1e-8
CONSTANT = 1e6
# global variables
SEED = 2021
SAMPLE1 = None
if SEED is not None:
np.random.seed(SEED)
torch.manual_seed(SEED)
random.seed(SEED)
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
def m_cubo( x, qz_x, px_zs, zss):
lpz = dist.Normal(torch.zeros(1,zss.size(2)).cuda(),torch.ones(1,zss.size(2)).cuda()).log_prob(zss).sum(-1)
lqz_x = qz_x.log_prob(zss).sum(-1)
if 'brain' in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
lpx_z1 = px_zs['image'].log_prob(x['image']).sum(-1)
lpx_z2 = px_zs['text'].log_prob(x['text']).sum(-1)
lpx_z3 = px_zs['brain'].log_prob(x['brain']).sum(-1)
cubo = 0.5*log_mean_exp(2*(lpz+lpx_z1+lpx_z2+lpx_z3-lqz_x))
elif 'brain' not in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
lpx_z1 = px_zs['image'].log_prob(x['image']).sum(-1)
lpx_z2 = px_zs['text'].log_prob(x['text']).sum(-1)
cubo = 0.5*log_mean_exp(2*(lpz+lpx_z1+lpx_z2-lqz_x))
return cubo.mean()
def log_li(x_var, dist_info):
mean = dist_info[0]
std = dist_info[1]
epsilon = (x_var - mean) / (std + TINY)
pi = Variable(torch.ones(1) * np.pi).to(x_var.device)
logli = - 0.5 * torch.log(2 * pi) - torch.log(std + TINY) - 0.5 * torch.pow(epsilon,2)
return logli.sum(1)
def mutual_info(exp,px_zs,z):
if 'brain' in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
q2 = exp.Q2(px_zs['image'].loc)
q3 = exp.Q3(px_zs['text'].loc)
mi1 = log_li(z,q1).mean()
mi2 = log_li(z,q2).mean()
mi3 = log_li(z,q3).mean()
return mi1 + mi2 + mi3
elif 'brain' not in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
q2 = exp.Q2(px_zs['image'].loc)
q3 = exp.Q3(px_zs['text'].loc)
mi2 = log_li(z,q2).mean()
mi3 = log_li(z,q3).mean()
return mi2 + mi3
elif 'brain' in px_zs.keys() and 'image' not in px_zs.keys() and 'text' not in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
mi1 = log_li(z,q1).mean()
return mi1
elif 'brain' not in px_zs.keys() and 'image' in px_zs.keys() and 'text' not in px_zs.keys():
q2 = exp.Q2(px_zs['image'].loc)
mi2 = log_li(z,q2).mean()
return mi2
elif 'brain' not in px_zs.keys() and 'image' not in px_zs.keys() and 'text' in px_zs.keys():
q3 = exp.Q3(px_zs['text'].loc)
mi3 = log_li(z,q3).mean()
return mi3
elif 'brain' in px_zs.keys() and 'image' in px_zs.keys() and 'text' not in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
q2 = exp.Q2(px_zs['image'].loc)
mi1 = log_li(z,q1).mean()
mi2 = log_li(z,q2).mean()
return mi1+mi2
elif 'brain' in px_zs.keys() and 'image' not in px_zs.keys() and 'text' in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
q3 = exp.Q3(px_zs['text'].loc)
mi1 = log_li(z,q1).mean()
mi3 = log_li(z,q3).mean()
return mi1+mi3
def calc_log_probs(exp, result, batch):
mods = exp.modalities
log_probs = dict()
weighted_log_prob = 0.0
for m, m_key in enumerate(mods.keys()):
if m_key in batch[0].keys():
mod = mods[m_key]
log_probs[mod.name] = -mod.calc_log_prob(result['rec'][mod.name],
batch[0][mod.name],
exp.flags.batch_size)
weighted_log_prob += exp.rec_weights[mod.name]*log_probs[mod.name]
else:
mod = mods[m_key]
log_probs[mod.name] = 0
weighted_log_prob += exp.rec_weights[mod.name]*log_probs[mod.name]
return log_probs, weighted_log_prob
def calc_klds(exp, result):
latents = result['latents']['subsets']
klds = dict()
for m, key in enumerate(latents.keys()):
mu, logvar = latents[key]
klds[key] = calc_kl_divergence(mu, logvar,
norm_value=exp.flags.batch_size)
return klds
def calc_klds_style(exp, result):
latents = result['latents']['modalities']
klds = dict()
for m, key in enumerate(latents.keys()):
if key.endswith('style'):
mu, logvar = latents[key]
klds[key] = calc_kl_divergence(mu, logvar,
norm_value=exp.flags.batch_size)
return klds
def calc_style_kld(exp, klds):
mods = exp.modalities
style_weights = exp.style_weights
weighted_klds = 0.0
for m, m_key in enumerate(mods.keys()):
weighted_klds += style_weights[m_key]*klds[m_key+'_style']
return weighted_klds
def shuffle(a):
return a[torch.randperm(a.size()[0])]
def true_neg_idx(data, shuffle_data):
a = data.mean(1)
b = shuffle_data.mean(1)
index = torch.arange(0, len(a))
idx = index[a!=b]
return idx
def negative_sample_generator(batch_new, batch, case):
batch_d = batch_new[0]
data = batch
if 'brain' in batch_d.keys() and 'image' in batch_d.keys() and 'text' in batch_d.keys():
if case==1:
shuffle_data = shuffle(data[0])
idx = true_neg_idx(data[0], shuffle_data)
neg_batch = [shuffle_data[idx,:], data[1][idx,:], data[2][idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
return batch_new
elif case == 2:
shuffle_data = shuffle(data[1])
idx = true_neg_idx(data[1], shuffle_data)
neg_batch = [data[0][idx,:], shuffle_data[idx,:], data[2][idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
return batch_new
elif case == 3:
shuffle_data = shuffle(data[2])
idx = true_neg_idx(data[2], shuffle_data)
neg_batch = [data[0][idx,:], data[1][idx,:], shuffle_data[idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
return batch_new
elif case == 4:
shuffle_data0 = shuffle(data[0])
idx1 = true_neg_idx(data[0], shuffle_data0)
shuffle_data1 = shuffle(data[1])
idx2 = true_neg_idx(data[1], shuffle_data1)
idx = np.unique(np.concatenate((idx1,idx2),axis=0))
neg_batch = [shuffle_data0[idx,:], shuffle_data1[idx,:], data[2][idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
return batch_new
elif case == 5:
shuffle_data0 = shuffle(data[0])
idx1 = true_neg_idx(data[0], shuffle_data0)
shuffle_data2 = shuffle(data[2])
idx2 = true_neg_idx(data[2], shuffle_data2)
idx = np.unique(np.concatenate((idx1,idx2),axis=0))
neg_batch = [shuffle_data0[idx,:], data[1][idx,:], shuffle_data2[idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
return batch_new
elif case == 6:
shuffle_data1 = shuffle(data[1])
idx1 = true_neg_idx(data[1], shuffle_data1)
shuffle_data2 = shuffle(data[2])
idx2 = true_neg_idx(data[2], shuffle_data2)
idx = np.unique(np.concatenate((idx1,idx2),axis=0))
neg_batch = [data[0][idx,:], shuffle_data1[idx,:], shuffle_data2[idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
return batch_new
elif 'brain' not in batch_d.keys() and 'image' in batch_d.keys() and 'text' in batch_d.keys():
if case == 1:
shuffle_data = shuffle(data[0])
idx = true_neg_idx(data[0], shuffle_data)
neg_batch = [shuffle_data[idx,:], data[1][idx,:]]
batch = [neg_batch[0], neg_batch[1]]
batch_new[0] = dict()
batch_new[0] = {'image': batch[0], 'text': batch[1]}
# batch_new[1] = data[3]
return batch_new
elif case == 2:
shuffle_data = shuffle(data[1])
idx = true_neg_idx(data[1], shuffle_data)
neg_batch = [data[0][idx,:], shuffle_data[idx,:]]
batch = [neg_batch[0], neg_batch[1]]
batch_new[0] = dict()
batch_new[0] = {'image': batch[0], 'text': batch[1]}
return batch_new
def basic_routine_epoch(exp, batch, epoch):
# set up weights
beta_style = exp.flags.beta_style
beta_content = exp.flags.beta_content
beta = exp.flags.beta + epoch * 0.01
if beta>1.0:
beta = 1.0
rec_weight = 1.0
lambda1 = exp.flags.lambda1
mm_vae = exp.mm_vae
batch_d = batch[0]
mods = exp.modalities
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
results = mm_vae(batch_d)
log_probs, weighted_log_prob = calc_log_probs(exp, results, batch)
group_divergence = results['joint_divergence']
klds = calc_klds(exp, results)
z = results['class_embeddings']
px_zs = results['rec']
intra_mi = -mutual_info(exp,px_zs,z)
if exp.flags.factorized_representation:
klds_style = calc_klds_style(exp, results)
if (exp.flags.modality_jsd or exp.flags.modality_moe
or exp.flags.joint_elbo):
if exp.flags.factorized_representation:
kld_style = calc_style_kld(exp, klds_style)
else:
kld_style = 0.0
kld_content = group_divergence
kld_weighted = beta_style * kld_style + beta_content * kld_content
elbo_loss = rec_weight * weighted_log_prob + beta * kld_weighted
elif exp.flags.modality_poe:
klds_joint = {'content': group_divergence,
'style': dict()}
elbos = dict()
for m, m_key in enumerate(mods.keys()):
mod = mods[m_key]
if exp.flags.factorized_representation:
kld_style_m = klds_style[m_key + '_style']
else:
kld_style_m = 0.0
klds_joint['style'][m_key] = kld_style_m
if exp.flags.poe_unimodal_elbos:
if m_key=='brain' and 'brain' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='brain' and 'brain' not in batch_d.keys():
elbos[m_key] = 0
elif m_key=='image' and 'image' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='image' and 'image' not in batch_d.keys():
elbos[m_key] = 0
elif m_key == 'text' and 'text' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='text' and 'text' not in batch_d.keys():
elbos[m_key] = 0
elbo_joint = utils.calc_elbo(exp, 'joint', log_probs, klds_joint)
elbos['joint'] = elbo_joint
elbo_loss = sum(elbos.values())
total_loss = elbo_loss + lambda1 * intra_mi
out_basic_routine = dict()
out_basic_routine['results'] = results
out_basic_routine['log_probs'] = log_probs
out_basic_routine['total_loss'] = total_loss
out_basic_routine['klds'] = klds
out_basic_routine['intra_mi'] = intra_mi
out_basic_routine['elbo_loss'] = elbo_loss
return out_basic_routine
def elbo_contrast(exp, batch, epoch):
# set up weights
beta_style = exp.flags.beta_style
beta_content = exp.flags.beta_content
beta = exp.flags.beta + epoch * 0.01
if beta>1.0:
beta = 1.0
rec_weight = 1.0
mm_vae = exp.mm_vae
batch_d = batch[0]
mods = exp.modalities
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
results = mm_vae(batch_d, K=30)
cubo = m_cubo(batch_d, results['qz_x'], results['px_zs'], results['zss'])
log_probs, weighted_log_prob = calc_log_probs(exp, results, batch)
group_divergence = results['joint_divergence']
klds = calc_klds(exp, results)
z = results['class_embeddings']
neg_batch_size = z.shape[0]
if exp.flags.factorized_representation:
klds_style = calc_klds_style(exp, results)
if (exp.flags.modality_jsd or exp.flags.modality_moe
or exp.flags.joint_elbo):
if exp.flags.factorized_representation:
kld_style = calc_style_kld(exp, klds_style)
else:
kld_style = 0.0
kld_content = group_divergence
kld_weighted = beta_style * kld_style + beta_content * kld_content
elbo_loss = rec_weight * weighted_log_prob + beta * kld_weighted
elif exp.flags.modality_poe:
klds_joint = {'content': group_divergence,
'style': dict()}
elbos = dict()
for m, m_key in enumerate(mods.keys()):
mod = mods[m_key]
if exp.flags.factorized_representation:
kld_style_m = klds_style[m_key + '_style']
else:
kld_style_m = 0.0
klds_joint['style'][m_key] = kld_style_m
if exp.flags.poe_unimodal_elbos:
if m_key=='brain' and 'brain' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='brain' and 'brain' not in batch_d.keys():
elbos[m_key] = 0
elif m_key=='image' and 'image' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='image' and 'image' not in batch_d.keys():
elbos[m_key] = 0
elif m_key == 'text' and 'text' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='text' and 'text' not in batch_d.keys():
elbos[m_key] = 0
elbo_joint = utils.calc_elbo(exp, 'joint', log_probs, klds_joint)
elbos['joint'] = elbo_joint
elbo_loss = sum(elbos.values())
elbo_scale = cubo / CONSTANT
out_basic_routine = dict()
out_basic_routine['elbo_nega_sample_loss'] = torch.log(elbo_scale.exp().sum() * exp.flags.batch_size / neg_batch_size + TINY) * CONSTANT
return out_basic_routine
def update_Qnet(exp, batch):
with torch.no_grad():
mm_vae = exp.mm_vae
batch_d = batch[0]
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
results = mm_vae(batch_d)
z = results['class_embeddings']
px_zs = results['rec']
intra_mi = -mutual_info(exp, px_zs, z)
return intra_mi
def train_aug(epoch, exp, tb_logger):
mm_vae = exp.mm_vae
mm_vae.train()
exp.mm_vae = mm_vae
lambda2 = exp.flags.lambda2
if 'image_text' in exp.flags.aug_type:
print('aug type: image_text')
aug_loader = DataLoader(exp.dataset_aug, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(aug_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'image':batch[0],'text':batch[1]}
batch = [batch[0], batch[1]]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
elbo_loss = basic_routine['elbo_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2
inter_mi_loss = elbo_loss + elbo_nega_sample_loss/2.0
total_loss = total_loss + lambda2 * inter_mi_loss
total_loss.backward()
exp.optimizer['mvae'].step()
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, total_loss))
elif 'text_only' in exp.flags.aug_type:
print('aug type: text_only')
aug_loader = DataLoader(exp.dataset_aug, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(aug_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'text': batch[0]}
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
total_loss.backward()
exp.optimizer['mvae'].step()
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, total_loss))
elif 'image_only' in exp.flags.aug_type:
print('aug type: image_only')
aug_loader = DataLoader(exp.dataset_aug, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(aug_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'image': batch[0]}
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
total_loss.backward()
exp.optimizer['mvae'].step()
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, total_loss))
elif exp.flags.aug_type == 'no_aug':
print('aug type: no augmentation')
def train(epoch, exp, tb_logger):
mm_vae = exp.mm_vae
mm_vae.train()
exp.mm_vae = mm_vae
lambda2 = exp.flags.lambda2
test_loader = DataLoader(exp.dataset_test, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(test_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'image':batch[1],'text':batch[2]}
batch = [batch[1],batch[2]]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
elbo_loss = basic_routine['elbo_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2
inter_mi_loss = elbo_loss + elbo_nega_sample_loss/2.0
total_loss = total_loss + lambda2 * inter_mi_loss
total_loss.backward()
exp.optimizer['mvae'].step()
d_loader = DataLoader(exp.dataset_train, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(d_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'brain':batch[0],'image':batch[1],'text':batch[2]}
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
results = basic_routine['results']
total_loss = basic_routine['total_loss']
klds = basic_routine['klds']
log_probs = basic_routine['log_probs']
intra_mi = basic_routine['intra_mi']
elbo_loss = basic_routine['elbo_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=3)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_3 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=4)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_4 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=5)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_5 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=6)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_6 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2 + elbo_nega_sample_loss_case_3 + elbo_nega_sample_loss_case_4 + elbo_nega_sample_loss_case_5 + elbo_nega_sample_loss_case_6
inter_mi_loss = elbo_loss + elbo_nega_sample_loss/6.0
total_loss = total_loss + lambda2 * inter_mi_loss
total_loss.backward()
exp.optimizer['mvae'].step()
tb_logger.write_training_logs(results, total_loss, log_probs, klds, -inter_mi_loss)
print('====> Epoch: {:03d} Train loss: {:.4f} ELBO: {:.4f} IntraMI: {:.4f} InterMI: {:.4f}'.format(epoch,
total_loss,
-elbo_loss,
-intra_mi,
-inter_mi_loss))
def test(epoch, exp, tb_logger):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
lambda2 = exp.flags.lambda2
d_loader = DataLoader(exp.dataset_test, batch_size=1000,
shuffle=True,
num_workers=8, drop_last=False)
for iteration, batch in enumerate(d_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
results = basic_routine['results']
total_loss = basic_routine['total_loss']
klds = basic_routine['klds']
log_probs = basic_routine['log_probs']
intra_mi = basic_routine['intra_mi']
elbo_loss = basic_routine['elbo_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=3)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_3 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=4)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_4 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=5)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_5 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=6)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_6 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2 + elbo_nega_sample_loss_case_3 + elbo_nega_sample_loss_case_4 + elbo_nega_sample_loss_case_5 + elbo_nega_sample_loss_case_6
inter_mi_loss = elbo_loss + elbo_nega_sample_loss / 6.0
total_loss = total_loss + lambda2 * inter_mi_loss
tb_logger.write_testing_logs(results, total_loss, log_probs, klds, -inter_mi_loss)
print('====> Epoch: {:03d} Test loss: {:.4f} ELBO: {:.4f} IntraMI: {:.4f} InterMI: {:.4f}'.format(epoch,
total_loss,
-elbo_loss,
-intra_mi,
-inter_mi_loss))
def image_text_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type=='zsl':
image_text_data = {'image': exp.dataset_test.tensors[1].cuda(),'text': exp.dataset_test.tensors[2].cuda()}
label = exp.dataset_test.tensors[3]
brain = exp.dataset_test.tensors[0]
elif type=='normal':
image_text_data = {'image': exp.dataset_val.tensors[1].cuda(),'text': exp.dataset_val.tensors[2].cuda()}
label = exp.dataset_val.tensors[3]
brain = exp.dataset_test.tensors[0]
results = mm_vae(image_text_data)
z = results['class_embeddings']
brain_rec = mm_vae.lhoods['brain'](*mm_vae.decoders['brain'](None, z))
return z.cpu().numpy(), label.cpu().numpy(), brain_rec.loc.cpu().numpy(), brain.cpu().numpy()
def brain_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type == 'zsl':
data = {'brain':exp.dataset_test.tensors[0].cuda()}
label = exp.dataset_test.tensors[3]
image = exp.dataset_test.tensors[1]
text = exp.dataset_test.tensors[2]
elif type == 'normal':
data = {'brain':exp.dataset_val.tensors[0].cuda()}
label = exp.dataset_val.tensors[3]
image = exp.dataset_test.tensors[1]
text = exp.dataset_test.tensors[2]
results = mm_vae(data)
z = results['class_embeddings']
image_rec = mm_vae.lhoods['image'](*mm_vae.decoders['image'](None, z))
text_rec = mm_vae.lhoods['text'](*mm_vae.decoders['text'](None, z))
return z.cpu().numpy(), label.cpu().numpy(), image_rec.loc.cpu().numpy(), text_rec.loc.cpu().numpy(), image.cpu().numpy(), text.cpu().numpy()
def image_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type == 'zsl':
data = {'image':exp.dataset_test.tensors[1].cuda()}
label = exp.dataset_test.tensors[3]
elif type == 'normal':
data = {'image':exp.dataset_val.tensors[1].cuda()}
label = exp.dataset_val.tensors[3]
results = mm_vae(data)
z = results['class_embeddings']
return z.cpu().numpy(), label.cpu().numpy()
def text_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type == 'zsl':
data = {'text':exp.dataset_test.tensors[2].cuda()}
label = exp.dataset_test.tensors[3]
elif type == 'normal':
data = {'text':exp.dataset_val.tensors[2].cuda()}
label = exp.dataset_val.tensors[3]
results = mm_vae(data)
z = results['class_embeddings']
return z.cpu().numpy(), label.cpu().numpy()
def run_classification_test_50_way(exp,observation, type):
if observation=='image_text':
z_train = []
train_label = []
for i in range(1):
z, label,brain_rec,brain= image_text_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
index50 = np.squeeze(np.where(train_label < 51, True, False))
z_train = z_train[index50, :]
train_label = train_label[index50]
elif observation == 'image':
z_train = []
train_label = []
for i in range(1):
z, label= image_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
index50 = np.squeeze(np.where(train_label < 51, True, False))
z_train = z_train[index50, :]
train_label = train_label[index50]
elif observation == 'text':
z_train = []
train_label = []
for i in range(1):
z, label= text_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
index50 = np.squeeze(np.where(train_label < 51, True, False))
z_train = z_train[index50, :]
train_label = train_label[index50]
z_test, test_label, image_rec, text_rec, image, text = brain_inference(exp,type)
z_test = z_test[index50, :]
test_label = test_label[index50]
print(z_train.shape)
print(train_label.shape)
print(z_test.shape)
print(test_label.shape)
print('z_train', np.mean(z_train))
print('z_test', np.mean(z_test))
classifiers = [
SVC(gamma=0.001, C=1.0, probability=True),
]
for clf in classifiers:
clf.fit(z_train, train_label)
score = clf.score(z_test, test_label)
print(f"{observation}\n"
f"50_way_Classification report for classifier {clf}:\n"
f"{score}\n")
probas = clf.predict_proba(z_test)
top_acc = top_k_accuracy_score(test_label, probas, k=5)
print(f"{observation}\n"
f"50_way_Classification Top 5 Acc for classifier {clf}:\n"
f"{top_acc}\n")
return score, top_acc
def run_classification_test(exp,observation, type):
if observation=='image_text':
z_train = []
train_label = []
for i in range(1):
z, label,brain_rec,brain= image_text_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
elif observation == 'image':
z_train = []
train_label = []
for i in range(1):
z, label= image_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
elif observation == 'text':
z_train = []
train_label = []
for i in range(1):
z, label= text_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
z_test, test_label, image_rec, text_rec, image, text = brain_inference(exp,type)
print(z_train.shape)
print(train_label.shape)
print(z_test.shape)
print(test_label.shape)
print('z_train', np.mean(z_train))
print('z_test', np.mean(z_test))
classifiers = [
SVC(gamma=0.001, C=1.0, probability=True),
]
for clf in classifiers:
clf.fit(z_train, train_label)
score = clf.score(z_test, test_label)
print(f"{observation}\n"
f"200_way_Classification report for classifier {clf}:\n"
f"{score}\n")
probas = clf.predict_proba(z_test)
top_acc = top_k_accuracy_score(test_label, probas, k=5)
print(f"{observation}\n"
f"200_way_Classification Top 5 Acc for classifier {clf}:\n"
f"{top_acc}\n")
return score, top_acc
def create_csv(path,top1,top5):
with open(path,'w') as f:
csv_writer = csv.writer(f)
head = ["top1","top5"]
csv_writer.writerow(head)
def write_csv(path,top1,top5):
with open(path, 'a+') as f:
csv_writer = csv.writer(f)
row = []
row.append(top1)
row.append(top5)
csv_writer.writerow(row)
def run_epochs_trimodal(exp):
# initialize summary writer
writer = SummaryWriter(exp.flags.dir_logs)
tb_logger = TBLogger(exp.flags.str_experiment, writer)
str_flags = utils.save_and_log_flags(exp.flags)
tb_logger.writer.add_text('FLAGS', str_flags, 0)
lr_list = []
print('training epochs progress:')
for epoch in range(exp.flags.start_epoch, exp.flags.end_epoch):
utils.printProgressBar(epoch, exp.flags.end_epoch)
# one epoch of training and testing
exp.scheduler['Qnet'].step()
exp.scheduler['mvae'].step()
lr_list.append(exp.optimizer['Qnet'].state_dict()['param_groups'][0]['lr'])
train_aug(epoch, exp, tb_logger)
train(epoch, exp, tb_logger)
test(epoch, exp, tb_logger)
# save checkpoints after every 1 epochs
if (epoch + 1) % 100 == 0 or (epoch + 1) == exp.flags.end_epoch:
dir_network_epoch = os.path.join(exp.flags.dir_checkpoints, str(epoch).zfill(4))
if not os.path.exists(dir_network_epoch):
os.makedirs(dir_network_epoch)
exp.mm_vae.save_networks()
torch.save(exp.mm_vae.state_dict(),
os.path.join(dir_network_epoch, exp.flags.mm_vae_save))
print('lr = ',lr_list[-1])
# plt.plot(range(exp.flags.end_epoch), lr_list, color='r')
# plt.show()
if exp.flags.test_type=='zsl':
top1, top5 = run_classification_test_50_way(exp, 'image_text', 'zsl')
path = './results/'+exp.flags.dataname+'_'+exp.flags.sbj+'_'+exp.flags.roi+'_'+exp.flags.aug_type+'_'+exp.flags.text_model+'_'+exp.flags.image_model.split('/')[-1]+'_'+str(exp.flags.lambda1)+'_'+str(exp.flags.lambda2)+'_'+'_'+str(exp.flags.class_dim)+'_'+exp.flags.method+'_image_text_50_way.csv'
create_csv(path, top1, top5)
write_csv(path, top1, top5)
top1, top5 = run_classification_test(exp, 'image_text', 'zsl')
path = './results/'+exp.flags.dataname+'_'+exp.flags.sbj+'_'+exp.flags.roi+'_'+exp.flags.aug_type+'_'+exp.flags.text_model+'_'+exp.flags.image_model.split('/')[-1]+'_'+str(exp.flags.lambda1)+'_'+str(exp.flags.lambda2)+'_'+'_'+str(exp.flags.class_dim)+'_'+exp.flags.method+'_image_text.csv'
create_csv(path, top1, top5)
write_csv(path, top1, top5)
| 42,106 | 42.231006 | 304 | py |
BraVL | BraVL-master/BraVL_EEG/main_trimodal.py | import sys
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '5'
import json
import torch
from run_epochs_trimodal import run_epochs_trimodal
from utils.filehandling import create_dir_structure
from brain_image_text.flags import parser
from brain_image_text.experiment import BrainImageText
torch.set_default_tensor_type(torch.DoubleTensor)
if __name__ == '__main__':
FLAGS = parser.parse_args()
use_cuda = torch.cuda.is_available()
FLAGS.device = torch.device('cuda' if use_cuda else 'cpu')
if FLAGS.method == 'poe':
FLAGS.modality_poe=True
elif FLAGS.method == 'moe':
FLAGS.modality_moe=True
elif FLAGS.method == 'jsd':
FLAGS.modality_jsd=True
elif FLAGS.method == 'joint_elbo':
FLAGS.joint_elbo=True
else:
print('method implemented...exit!')
sys.exit()
print(FLAGS.modality_poe)
print(FLAGS.modality_moe)
print(FLAGS.modality_jsd)
print(FLAGS.joint_elbo)
FLAGS.alpha_modalities = [FLAGS.div_weight_uniform_content, FLAGS.div_weight_m1_content,
FLAGS.div_weight_m2_content, FLAGS.div_weight_m3_content]
FLAGS = create_dir_structure(FLAGS)
alphabet_path = os.path.join(os.getcwd(), 'alphabet.json')
with open(alphabet_path) as alphabet_file:
alphabet = str(''.join(json.load(alphabet_file)))
mst = BrainImageText(FLAGS, alphabet)
mst.set_optimizer()
total_params = sum(p.numel() for p in mst.mm_vae.parameters())
print('num parameters model: ' + str(total_params))
run_epochs_trimodal(mst)
| 1,562 | 33.733333 | 92 | py |
BraVL | BraVL-master/BraVL_EEG/modalities/Modality.py |
from abc import ABC, abstractmethod
import os
import torch
import torch.distributions as dist
class Modality(ABC):
def __init__(self, name, enc, dec, class_dim, style_dim, lhood_name):
self.name = name;
self.encoder = enc;
self.decoder = dec;
self.class_dim = class_dim;
self.style_dim = style_dim;
self.likelihood_name = lhood_name;
self.likelihood = self.get_likelihood(lhood_name);
def get_likelihood(self, name):
if name == 'laplace':
pz = dist.Laplace;
elif name == 'bernoulli':
pz = dist.Bernoulli;
elif name == 'normal':
pz = dist.Normal;
elif name == 'categorical':
pz = dist.OneHotCategorical;
else:
print('likelihood not implemented')
pz = None;
return pz;
def calc_log_prob(self, out_dist, target, norm_value):
log_prob = out_dist.log_prob(target).sum();
mean_val_logprob = log_prob/norm_value;
return mean_val_logprob;
def save_networks(self, dir_checkpoints):
torch.save(self.encoder.state_dict(), os.path.join(dir_checkpoints,
'enc_' + self.name))
torch.save(self.decoder.state_dict(), os.path.join(dir_checkpoints,
'dec_' + self.name))
| 1,414 | 27.877551 | 79 | py |
BraVL | BraVL-master/BraVL_EEG/brain_image_text/experiment.py | import os
import numpy as np
import itertools
import scipy.io as sio
import torch
import torch.optim as optim
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
from modalities.Modality import Modality
from brain_image_text.networks.VAEtrimodal import VAEtrimodal,VAEbimodal
from brain_image_text.networks.QNET import QNet
from brain_image_text.networks.MLP_Brain import EncoderBrain, DecoderBrain
from brain_image_text.networks.MLP_Image import EncoderImage, DecoderImage
from brain_image_text.networks.MLP_Text import EncoderText, DecoderText
from utils.BaseExperiment import BaseExperiment
class BrainImageText(BaseExperiment):
def __init__(self, flags, alphabet):
super().__init__(flags)
self.modalities = self.set_modalities()
self.num_modalities = len(self.modalities.keys())
self.subsets = self.set_subsets()
self.dataset_train = None
self.dataset_test = None
self.set_dataset()
self.mm_vae = self.set_model()
self.optimizer = None
self.rec_weights = self.set_rec_weights()
self.style_weights = self.set_style_weights()
self.Q1,self.Q2,self.Q3 = self.set_Qmodel()
self.eval_metric = accuracy_score
self.labels = ['digit']
def set_model(self):
model = VAEtrimodal(self.flags, self.modalities, self.subsets)
model = model.to(self.flags.device)
return model
def set_modalities(self):
mod1 = Modality('brain', EncoderBrain(self.flags), DecoderBrain(self.flags),
self.flags.class_dim, self.flags.style_m1_dim, 'normal')
mod2 = Modality('image', EncoderImage(self.flags), DecoderImage(self.flags),
self.flags.class_dim, self.flags.style_m2_dim, 'normal')
mod3 = Modality('text', EncoderText(self.flags), DecoderText(self.flags),
self.flags.class_dim, self.flags.style_m3_dim, 'normal')
mods = {mod1.name: mod1, mod2.name: mod2, mod3.name: mod3}
return mods
def set_dataset(self):
# load data
data_dir_root = self.flags.data_dir_root
sbj = self.flags.sbj
image_model = self.flags.image_model
text_model = self.flags.text_model
roi = self.flags.roi
brain_dir = os.path.join(data_dir_root, 'brain_feature', roi, sbj)
image_dir_train = os.path.join(data_dir_root, 'visual_feature/ThingsTrain', image_model, sbj)
image_dir_test = os.path.join(data_dir_root, 'visual_feature/ThingsTest', image_model, sbj)
text_dir_train = os.path.join(data_dir_root, 'textual_feature/ThingsTrain/text', text_model, sbj)
text_dir_test = os.path.join(data_dir_root, 'textual_feature/ThingsTest/text', text_model, sbj)
train_brain = sio.loadmat(os.path.join(brain_dir, 'eeg_train_data_within.mat'))['data'].astype('double') * 2.0
# train_brain = sio.loadmat(os.path.join(brain_dir, 'eeg_train_data_between.mat'))['data'].astype('double')*2.0
train_brain = train_brain[:,:,27:60] # 70ms-400ms
train_brain = np.reshape(train_brain, (train_brain.shape[0], -1))
train_image = sio.loadmat(os.path.join(image_dir_train, 'feat_pca_train.mat'))['data'].astype('double')*50.0
train_text = sio.loadmat(os.path.join(text_dir_train, 'text_feat_train.mat'))['data'].astype('double')*2.0
train_label = sio.loadmat(os.path.join(brain_dir, 'eeg_train_data_within.mat'))['class_idx'].T.astype('int')
train_image = train_image[:,0:100]
# test_brain = sio.loadmat(os.path.join(brain_dir, 'eeg_test_data_unique.mat'))['data'].astype('double')*2.0
# test_brain = test_brain[:, :, 27:60]
# test_brain = np.reshape(test_brain, (test_brain.shape[0], -1))
# test_image = sio.loadmat(os.path.join(image_dir_test, 'feat_pca_test_unique.mat'))['data'].astype('double')*50.0
# test_text = sio.loadmat(os.path.join(text_dir_test, 'text_feat_test_unique.mat'))['data'].astype('double')*2.0
# test_label = sio.loadmat(os.path.join(brain_dir, 'eeg_test_data_unique.mat'))['class_idx'].T.astype('int')
# train_image = train_image[:, 0:100]
test_brain = sio.loadmat(os.path.join(brain_dir, 'eeg_test_data.mat'))['data'].astype('double')*2.0
test_brain = test_brain[:, :, 27:60]
test_brain = np.reshape(test_brain, (test_brain.shape[0], -1))
test_image = sio.loadmat(os.path.join(image_dir_test, 'feat_pca_test.mat'))['data'].astype('double')*50.0
test_text = sio.loadmat(os.path.join(text_dir_test, 'text_feat_test.mat'))['data'].astype('double')*2.0
test_label = sio.loadmat(os.path.join(brain_dir, 'eeg_test_data.mat'))['class_idx'].T.astype('int')
test_image = test_image[:, 0:100]
if self.flags.aug_type == 'image_text_ilsvrc2012_val':
image_dir_aug = os.path.join(data_dir_root, 'visual_feature/Aug_ILSVRC2012_val', image_model, sbj)
text_dir_aug = os.path.join(data_dir_root, 'textual_feature/Aug_ILSVRC2012_val/text', text_model, sbj)
aug_image = sio.loadmat(os.path.join(image_dir_aug, 'feat_pca_aug_ilsvrc2012_val.mat'))['data'].astype('double')
aug_image = aug_image[:, 0:100]
aug_text = sio.loadmat(os.path.join(text_dir_aug, 'text_feat_aug_ilsvrc2012_val.mat'))['data'].astype('double')
aug_image = torch.from_numpy(aug_image)
aug_text = torch.from_numpy(aug_text)
print('aug_image=', aug_image.shape)
print('aug_text=', aug_text.shape)
elif self.flags.aug_type == 'no_aug':
print('no augmentation')
if self.flags.test_type=='normal':
train_label_stratify = train_label
train_brain, val_brain, train_label, val_label = train_test_split(train_brain, train_label_stratify, test_size=0.2, stratify=train_label_stratify)
train_image, val_image, train_label, val_label = train_test_split(train_image, train_label_stratify, test_size=0.2, stratify=train_label_stratify)
train_text, val_text, train_label, val_label = train_test_split(train_text, train_label_stratify, test_size=0.2, stratify=train_label_stratify)
val_brain = torch.from_numpy(val_brain)
val_image = torch.from_numpy(val_image)
val_text = torch.from_numpy(val_text)
val_label = torch.from_numpy(val_label)
print('val_brain=', val_brain.shape)
print('val_image=', val_image.shape)
print('val_text=', val_text.shape)
train_brain = torch.from_numpy(train_brain)
test_brain = torch.from_numpy(test_brain)
train_image = torch.from_numpy(train_image)
test_image = torch.from_numpy(test_image)
train_text = torch.from_numpy(train_text)
test_text = torch.from_numpy(test_text)
train_label = torch.from_numpy(train_label)
test_label = torch.from_numpy(test_label)
print('train_brain=', train_brain.shape)
print('train_image=', train_image.shape)
print('train_text=', train_text.shape)
print('test_brain=', test_brain.shape)
print('test_image=', test_image.shape)
print('test_text=', test_text.shape)
self.m1_dim = train_brain.shape[1]
self.m2_dim = train_image.shape[1]
self.m3_dim = train_text.shape[1]
train_dataset = torch.utils.data.TensorDataset(train_brain, train_image, train_text, train_label)
test_dataset = torch.utils.data.TensorDataset(test_brain, test_image, test_text,test_label)
self.dataset_train = train_dataset
self.dataset_test = test_dataset
if self.flags.test_type == 'normal':
val_dataset = torch.utils.data.TensorDataset(val_brain, val_image, val_text, val_label)
self.dataset_val = val_dataset
if 'image_text' in self.flags.aug_type:
aug_dataset = torch.utils.data.TensorDataset(aug_image, aug_text)
self.dataset_aug = aug_dataset
elif self.flags.aug_type == 'no_aug':
print('no augmentation')
def set_optimizer(self):
optimizer = optim.Adam(
itertools.chain(self.mm_vae.parameters(),self.Q1.parameters(),self.Q2.parameters(),self.Q3.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
optimizer_mvae = optim.Adam(
list(self.mm_vae.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
optimizer_Qnet = optim.Adam(
itertools.chain(self.Q1.parameters(),self.Q2.parameters(),self.Q3.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
self.optimizer = {'mvae':optimizer_mvae,'Qnet':optimizer_Qnet,'all':optimizer}
scheduler_mvae = optim.lr_scheduler.StepLR(optimizer_mvae, step_size=20, gamma=1.0)
scheduler_Qnet = optim.lr_scheduler.StepLR(optimizer_Qnet, step_size=20, gamma=1.0)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=1.0)
self.scheduler = {'mvae': scheduler_mvae, 'Qnet': scheduler_Qnet, 'all': scheduler}
def set_Qmodel(self):
Q1 = QNet(input_dim=self.flags.m1_dim, latent_dim=self.flags.class_dim).cuda()
Q2 = QNet(input_dim=self.flags.m2_dim, latent_dim=self.flags.class_dim).cuda()
Q3 = QNet(input_dim=self.flags.m3_dim, latent_dim=self.flags.class_dim).cuda()
return Q1, Q2 ,Q3
def set_rec_weights(self):
weights = dict()
weights['brain'] = self.flags.beta_m1_rec
weights['image'] = self.flags.beta_m2_rec
weights['text'] = self.flags.beta_m3_rec
return weights
def set_style_weights(self):
weights = dict()
weights['brain'] = self.flags.beta_m1_style
weights['image'] = self.flags.beta_m2_style
weights['text'] = self.flags.beta_m3_style
return weights
| 10,118 | 50.106061 | 158 | py |
BraVL | BraVL-master/BraVL_EEG/brain_image_text/networks/MLP_Text.py |
import torch
import torch.nn as nn
class EncoderText(nn.Module):
def __init__(self, flags):
super(EncoderText, self).__init__()
self.flags = flags;
self.hidden_dim = 256;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.m3_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.enc(x);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderText(nn.Module):
def __init__(self, flags):
super(DecoderText, self).__init__();
self.flags = flags;
self.hidden_dim = 256;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.class_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(self.hidden_dim, flags.m3_dim)
self.relu = nn.ReLU();
def forward(self, style_latent_space, class_latent_space):
z = class_latent_space;
x_hat = self.dec(z);
x_hat = self.fc3(x_hat);
return x_hat, torch.tensor(0.75).to(z.device); | 1,996 | 36.679245 | 108 | py |
BraVL | BraVL-master/BraVL_EEG/brain_image_text/networks/VAEtrimodal.py | import os
import torch
import torch.nn as nn
from utils import utils
from utils.BaseMMVae import BaseMMVae
class VAEtrimodal(BaseMMVae, nn.Module):
def __init__(self, flags, modalities, subsets):
super().__init__(flags, modalities, subsets)
class VAEbimodal(BaseMMVae, nn.Module):
def __init__(self, flags, modalities, subsets):
super().__init__(flags, modalities, subsets)
| 406 | 19.35 | 52 | py |
BraVL | BraVL-master/BraVL_EEG/brain_image_text/networks/MLP_Image.py |
import torch
import torch.nn as nn
class EncoderImage(nn.Module):
def __init__(self, flags):
super(EncoderImage, self).__init__()
self.flags = flags;
self.hidden_dim = 256;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.m2_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.enc(x);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderImage(nn.Module):
def __init__(self, flags):
super(DecoderImage, self).__init__();
self.flags = flags;
self.hidden_dim = 256;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.class_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(self.hidden_dim, flags.m2_dim, bias=True)
def forward(self, style_latent_space, class_latent_space):
z = class_latent_space;
x_hat = self.dec(z);
x_hat = self.fc3(x_hat);
return x_hat, torch.tensor(0.75).to(z.device); | 1,980 | 37.096154 | 108 | py |
BraVL | BraVL-master/BraVL_EEG/brain_image_text/networks/QNET.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class QNet(nn.Module):
def __init__(self, input_dim,latent_dim):
super(QNet, self).__init__()
self.fc1 = nn.Linear(input_dim,512)
self.fc21 = nn.Linear(512, latent_dim)
self.fc22 = nn.Linear(512, latent_dim)
def forward(self, x):
e = F.relu(self.fc1(x))
mu = self.fc21(e)
lv = self.fc22(e)
# return mu,lv.mul(0.5).exp_()
return mu,torch.tensor(0.75).cuda() | 504 | 30.5625 | 46 | py |
BraVL | BraVL-master/BraVL_EEG/brain_image_text/networks/MLP_Brain.py |
import torch
import torch.nn as nn
class EncoderBrain(nn.Module):
def __init__(self, flags):
super(EncoderBrain, self).__init__()
self.flags = flags;
self.hidden_dim = 256;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.m1_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.enc(x);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderBrain(nn.Module):
def __init__(self, flags):
super(DecoderBrain, self).__init__();
self.flags = flags;
self.hidden_dim = 256;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.class_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(self.hidden_dim, flags.m1_dim)
self.relu = nn.ReLU();
def forward(self, style_latent_space, class_latent_space):
z = class_latent_space;
x_hat = self.dec(z);
x_hat = self.fc3(x_hat);
return x_hat, torch.tensor(0.75).to(z.device);
| 2,001 | 36.074074 | 108 | py |
BraVL | BraVL-master/BraVL_EEG/divergence_measures/mm_div.py |
import torch
import torch.nn as nn
from divergence_measures.kl_div import calc_kl_divergence
from divergence_measures.kl_div import calc_kl_divergence_lb_gauss_mixture
from divergence_measures.kl_div import calc_kl_divergence_ub_gauss_mixture
from divergence_measures.kl_div import calc_entropy_gauss
from utils.utils import reweight_weights
def poe(mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
def alpha_poe(alpha, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
if var.dim() == 3:
alpha_expanded = alpha.unsqueeze(-1).unsqueeze(-1);
elif var.dim() == 4:
alpha_expanded = alpha.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1);
T = 1 / var;
pd_var = 1. / torch.sum(alpha_expanded * T, dim=0)
pd_mu = pd_var * torch.sum(alpha_expanded * mu * T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar;
def calc_alphaJSD_modalities_mixture(m1_mu, m1_logvar, m2_mu, m2_logvar, flags):
klds = torch.zeros(2);
entropies_mixture = torch.zeros(2);
w_modalities = torch.Tensor(flags.alpha_modalities[1:]);
if flags.cuda:
w_modalities = w_modalities.cuda();
klds = klds.cuda();
entropies_mixture = entropies_mixture.cuda();
w_modalities = reweight_weights(w_modalities);
mus = [m1_mu, m2_mu]
logvars = [m1_logvar, m2_logvar]
for k in range(0, len(mus)):
ent = calc_entropy_gauss(flags, logvars[k], norm_value=flags.batch_size);
# print('entropy: ' + str(ent))
# print('lb: ' )
kld_lb = calc_kl_divergence_lb_gauss_mixture(flags, k, mus[k], logvars[k], mus, logvars,
norm_value=flags.batch_size);
print('kld_lb: ' + str(kld_lb))
# print('ub: ')
kld_ub = calc_kl_divergence_ub_gauss_mixture(flags, k, mus[k], logvars[k], mus, logvars, ent,
norm_value=flags.batch_size);
print('kld_ub: ' + str(kld_ub))
# kld_mean = (kld_lb+kld_ub)/2;
entropies_mixture[k] = ent.clone();
klds[k] = 0.5*(kld_lb + kld_ub);
# klds[k] = kld_ub;
summed_klds = (w_modalities * klds).sum();
# print('summed klds: ' + str(summed_klds));
return summed_klds, klds, entropies_mixture;
def calc_alphaJSD_modalities(flags, mus, logvars, weights, normalization=None):
num_mods = mus.shape[0];
num_samples = mus.shape[1];
alpha_mu, alpha_logvar = alpha_poe(weights, mus, logvars)
if normalization is not None:
klds = torch.zeros(num_mods);
else:
klds = torch.zeros(num_mods, num_samples);
klds = klds.to(flags.device);
for k in range(0, num_mods):
kld = calc_kl_divergence(mus[k,:,:], logvars[k,:,:], alpha_mu,
alpha_logvar, norm_value=normalization);
if normalization is not None:
klds[k] = kld;
else:
klds[k,:] = kld;
if normalization is None:
weights = weights.unsqueeze(1).repeat(1, num_samples);
group_div = (weights * klds).sum(dim=0);
return group_div, klds, [alpha_mu, alpha_logvar];
def calc_group_divergence_moe(flags, mus, logvars, weights, normalization=None):
num_mods = mus.shape[0];
num_samples = mus.shape[1];
if normalization is not None:
klds = torch.zeros(num_mods);
else:
klds = torch.zeros(num_mods, num_samples);
klds = klds.to(flags.device);
weights = weights.to(flags.device);
for k in range(0, num_mods):
kld_ind = calc_kl_divergence(mus[k,:,:], logvars[k,:,:],
norm_value=normalization);
if normalization is not None:
klds[k] = kld_ind;
else:
klds[k,:] = kld_ind;
if normalization is None:
weights = weights.unsqueeze(1).repeat(1, num_samples);
group_div = (weights*klds).sum(dim=0);
return group_div, klds;
def calc_group_divergence_poe(flags, mus, logvars, norm=None):
num_mods = mus.shape[0];
poe_mu, poe_logvar = poe(mus, logvars)
kld_poe = calc_kl_divergence(poe_mu, poe_logvar, norm_value=norm);
klds = torch.zeros(num_mods).to(flags.device);
for k in range(0, num_mods):
kld_ind = calc_kl_divergence(mus[k,:,:], logvars[k,:,:],
norm_value=norm);
klds[k] = kld_ind;
return kld_poe, klds, [poe_mu, poe_logvar];
def calc_modality_divergence(m1_mu, m1_logvar, m2_mu, m2_logvar, flags):
if flags.modality_poe:
kld_batch = calc_kl_divergence(m1_mu, m1_logvar, m2_mu, m2_logvar, norm_value=flags.batch_size).sum();
return kld_batch;
else:
uniform_mu = torch.zeros(m1_mu.shape)
uniform_logvar = torch.zeros(m1_logvar.shape)
klds = torch.zeros(3,3)
klds_modonly = torch.zeros(2,2)
if flags.cuda:
klds = klds.cuda();
klds_modonly = klds_modonly.cuda();
uniform_mu = uniform_mu.cuda();
uniform_logvar = uniform_logvar.cuda();
mus = [uniform_mu, m1_mu, m2_mu]
logvars = [uniform_logvar, m1_logvar, m2_logvar]
for i in range(1, len(mus)): # CAREFUL: index starts from one, not zero
for j in range(0, len(mus)):
kld = calc_kl_divergence(mus[i], logvars[i], mus[j], logvars[j], norm_value=flags.batch_size);
klds[i,j] = kld;
if i >= 1 and j >= 1:
klds_modonly[i-1,j-1] = kld;
klds = klds.sum()/(len(mus)*(len(mus)-1))
klds_modonly = klds_modonly.sum()/((len(mus)-1)*(len(mus)-1));
return [klds, klds_modonly];
| 5,927 | 38 | 110 | py |
BraVL | BraVL-master/BraVL_EEG/divergence_measures/kl_div.py | import math
import torch
from utils.utils import reweight_weights
def calc_kl_divergence(mu0, logvar0, mu1=None, logvar1=None, norm_value=None):
if mu1 is None or logvar1 is None:
KLD = -0.5 * torch.sum(1 - logvar0.exp() - mu0.pow(2) + logvar0)
else:
KLD = -0.5 * (torch.sum(1 - logvar0.exp()/logvar1.exp() - (mu0-mu1).pow(2)/logvar1.exp() + logvar0 - logvar1))
if norm_value is not None:
KLD = KLD / float(norm_value);
return KLD
def calc_gaussian_scaling_factor(PI, mu1, logvar1, mu2=None, logvar2=None, norm_value=None):
d = mu1.shape[1];
if mu2 is None or logvar2 is None:
# print('S_11: ' + str(torch.sum(1/((2*PI*(logvar1.exp() + 1)).pow(0.5)))))
# print('S_12: ' + str(torch.sum(torch.exp(-0.5*(mu1.pow(2)/(logvar1.exp()+1))))))
S_pre = (1/(2*PI).pow(d/2))*torch.sum((logvar1.exp() + 1), dim=1).pow(0.5);
S = S_pre*torch.sum((-0.5*(mu1.pow(2)/(logvar1.exp()+1))).exp(), dim=1);
S = torch.sum(S)
else:
# print('S_21: ' + str(torch.sum(1/((2*PI).pow(d/2)*(logvar1.exp()+logvar2.exp()).pow(0.5)))));
# print('S_22: ' + str(torch.sum(torch.exp(-0.5 * ((mu1 - mu2).pow(2) / (logvar1.exp() + logvar2.exp()))))));
S_pre = torch.sum(1/((2*PI).pow(d/2)*(logvar1.exp()+logvar2.exp())), dim=1).pow(0.5)
S = S_pre*torch.sum(torch.exp(-0.5*((mu1-mu2).pow(2)/(logvar1.exp()+logvar2.exp()))), dim=1);
S = torch.sum(S)
if norm_value is not None:
S = S / float(norm_value);
# print('S: ' + str(S))
return S
def calc_gaussian_scaling_factor_self(PI, logvar1, norm_value=None):
d = logvar1.shape[1];
S = (1/(2*PI).pow(d/2))*torch.sum(logvar1.exp(), dim=1).pow(0.5);
S = torch.sum(S);
# S = torch.sum(1 / (2*(PI*torch.exp(logvar1)).pow(0.5)));
if norm_value is not None:
S = S / float(norm_value);
# print('S self: ' + str(S))
return S
#def calc_kl_divergence_lb_gauss_mixture(flags, index, mu1, logvar1, mus, logvars, norm_value=None):
# klds = torch.zeros(mus.shape[0]+1)
# if flags.cuda:
# klds = klds.cuda();
#
# klds[0] = calc_kl_divergence(mu1, logvar1, norm_value=norm_value);
# for k in range(0, mus.shape[0]):
# if k == index:
# kld = 0.0;
# else:
# kld = calc_kl_divergence(mu1, logvar1, mus[k], logvars[k], norm_value=norm_value);
# klds[k+1] = kld;
# kld_mixture = klds.mean();
# return kld_mixture;
def calc_kl_divergence_lb_gauss_mixture(flags, index, mu1, logvar1, mus, logvars, norm_value=None):
PI = torch.Tensor([math.pi]);
w_modalities = torch.Tensor(flags.alpha_modalities);
if flags.cuda:
PI = PI.cuda();
w_modalities = w_modalities.cuda();
w_modalities = reweight_weights(w_modalities);
denom = w_modalities[0]*calc_gaussian_scaling_factor(PI, mu1, logvar1, norm_value=norm_value);
for k in range(0, len(mus)):
if index == k:
denom += w_modalities[k+1]*calc_gaussian_scaling_factor_self(PI, logvar1, norm_value=norm_value);
else:
denom += w_modalities[k+1]*calc_gaussian_scaling_factor(PI, mu1, logvar1, mus[k], logvars[k], norm_value=norm_value)
lb = -torch.log(denom);
return lb;
def calc_kl_divergence_ub_gauss_mixture(flags, index, mu1, logvar1, mus, logvars, entropy, norm_value=None):
PI = torch.Tensor([math.pi]);
w_modalities = torch.Tensor(flags.alpha_modalities);
if flags.cuda:
PI = PI.cuda();
w_modalities = w_modalities.cuda();
w_modalities = reweight_weights(w_modalities);
nom = calc_gaussian_scaling_factor_self(PI, logvar1, norm_value=norm_value);
kl_div = calc_kl_divergence(mu1, logvar1, norm_value=norm_value);
print('kl div uniform: ' + str(kl_div))
denom = w_modalities[0]*torch.min(torch.Tensor([kl_div.exp(), 100000]));
for k in range(0, len(mus)):
if index == k:
denom += w_modalities[k+1];
else:
kl_div = calc_kl_divergence(mu1, logvar1, mus[k], logvars[k], norm_value=norm_value)
print('kl div ' + str(k) + ': ' + str(kl_div))
denom += w_modalities[k+1]*torch.min(torch.Tensor([kl_div.exp(), 100000]));
ub = torch.log(nom) - torch.log(denom) + entropy;
return ub;
def calc_entropy_gauss(flags, logvar, norm_value=None):
PI = torch.Tensor([math.pi]);
if flags.cuda:
PI = PI.cuda();
ent = 0.5*torch.sum(torch.log(2*PI) + logvar + 1)
if norm_value is not None:
ent = ent / norm_value;
return ent; | 4,561 | 40.099099 | 128 | py |
BraVL | BraVL-master/BraVL_EEG/utils/BaseMMVae.py | from abc import ABC, abstractmethod
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.distributions as dist
from divergence_measures.mm_div import calc_alphaJSD_modalities
from divergence_measures.mm_div import calc_group_divergence_moe
from divergence_measures.mm_div import poe
from utils import utils
class BaseMMVae(ABC, nn.Module):
def __init__(self, flags, modalities, subsets):
super(BaseMMVae, self).__init__()
self.num_modalities = len(modalities.keys());
self.flags = flags;
self.modalities = modalities;
self.subsets = subsets;
self.set_fusion_functions();
encoders = nn.ModuleDict();
decoders = nn.ModuleDict();
lhoods = dict();
for m, m_key in enumerate(sorted(modalities.keys())):
encoders[m_key] = modalities[m_key].encoder;
decoders[m_key] = modalities[m_key].decoder;
lhoods[m_key] = modalities[m_key].likelihood;
self.encoders = encoders;
self.decoders = decoders;
self.lhoods = lhoods;
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
def set_fusion_functions(self):
weights = utils.reweight_weights(torch.Tensor(self.flags.alpha_modalities));
self.weights = weights.to(self.flags.device);
if self.flags.modality_moe:
self.modality_fusion = self.moe_fusion;
self.fusion_condition = self.fusion_condition_moe; self.calc_joint_divergence = self.divergence_static_prior;
elif self.flags.modality_jsd:
self.modality_fusion = self.moe_fusion;
self.fusion_condition = self.fusion_condition_moe;
self.calc_joint_divergence = self.divergence_dynamic_prior;
elif self.flags.modality_poe:
self.modality_fusion = self.poe_fusion;
self.fusion_condition = self.fusion_condition_poe;
self.calc_joint_divergence = self.divergence_static_prior;
elif self.flags.joint_elbo:
self.modality_fusion = self.poe_fusion;
self.fusion_condition = self.fusion_condition_joint;
self.calc_joint_divergence = self.divergence_static_prior;
def divergence_static_prior(self, mus, logvars, weights=None):
if weights is None:
weights=self.weights;
weights = weights.clone();
weights = utils.reweight_weights(weights);
div_measures = calc_group_divergence_moe(self.flags,
mus,
logvars,
weights,
normalization=self.flags.batch_size);
divs = dict();
divs['joint_divergence'] = div_measures[0]; divs['individual_divs'] = div_measures[1]; divs['dyn_prior'] = None;
return divs;
def divergence_dynamic_prior(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights;
div_measures = calc_alphaJSD_modalities(self.flags,
mus,
logvars,
weights,
normalization=self.flags.batch_size);
divs = dict();
divs['joint_divergence'] = div_measures[0];
divs['individual_divs'] = div_measures[1];
divs['dyn_prior'] = div_measures[2];
return divs;
def moe_fusion(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights;
weights = utils.reweight_weights(weights);
#mus = torch.cat(mus, dim=0);
#logvars = torch.cat(logvars, dim=0);
mu_moe, logvar_moe = utils.mixture_component_selection(self.flags,
mus,
logvars,
weights);
return [mu_moe, logvar_moe];
def poe_fusion(self, mus, logvars, weights=None):
if (self.flags.modality_poe or mus.shape[0] ==
len(self.modalities.keys())):
num_samples = mus[0].shape[0];
mus = torch.cat((mus, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
logvars = torch.cat((logvars, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
#mus = torch.cat(mus, dim=0);
#logvars = torch.cat(logvars, dim=0);
mu_poe, logvar_poe = poe(mus, logvars);
return [mu_poe, logvar_poe];
def fusion_condition_moe(self, subset, input_batch=None):
if len(subset) == 1:
return True;
else:
return False;
def fusion_condition_poe(self, subset, input_batch=None):
if len(subset) == len(input_batch.keys()):
return True;
else:
return False;
def fusion_condition_joint(self, subset, input_batch=None):
return True;
def forward(self, input_batch,K=1):
latents = self.inference(input_batch);
results = dict();
results['latents'] = latents;
results['group_distr'] = latents['joint'];
class_embeddings = self.reparameterize(latents['joint'][0],
latents['joint'][1]);
#### For CUBO ####
qz_x = dist.Normal(latents['joint'][0],latents['joint'][1].mul(0.5).exp_())
zss = qz_x.rsample(torch.Size([K]))
div = self.calc_joint_divergence(latents['mus'],
latents['logvars'],
latents['weights']);
for k, key in enumerate(div.keys()):
results[key] = div[key];
results_rec = dict();
px_zs = dict();
enc_mods = latents['modalities'];
for m, m_key in enumerate(self.modalities.keys()):
if m_key in input_batch.keys():
m_s_mu, m_s_logvar = enc_mods[m_key + '_style'];
if self.flags.factorized_representation:
m_s_embeddings = self.reparameterize(mu=m_s_mu, logvar=m_s_logvar);
else:
m_s_embeddings = None;
m_rec = self.lhoods[m_key](*self.decoders[m_key](m_s_embeddings, class_embeddings));
px_z = self.lhoods[m_key](*self.decoders[m_key](m_s_embeddings, zss));
results_rec[m_key] = m_rec;
px_zs[m_key] = px_z
results['rec'] = results_rec;
results['class_embeddings'] = class_embeddings
results['qz_x'] = qz_x
results['zss'] = zss
results['px_zs'] = px_zs
return results;
def encode(self, input_batch):
latents = dict();
for m, m_key in enumerate(self.modalities.keys()):
if m_key in input_batch.keys():
i_m = input_batch[m_key];
l = self.encoders[m_key](i_m)
latents[m_key + '_style'] = l[:2]
latents[m_key] = l[2:]
else:
latents[m_key + '_style'] = [None, None];
latents[m_key] = [None, None];
return latents;
def inference(self, input_batch, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size;
latents = dict();
enc_mods = self.encode(input_batch);
latents['modalities'] = enc_mods;
mus = torch.Tensor().to(self.flags.device);
logvars = torch.Tensor().to(self.flags.device);
distr_subsets = dict();
for k, s_key in enumerate(self.subsets.keys()):
if s_key != '':
mods = self.subsets[s_key];
mus_subset = torch.Tensor().to(self.flags.device);
logvars_subset = torch.Tensor().to(self.flags.device);
mods_avail = True
for m, mod in enumerate(mods):
if mod.name in input_batch.keys():
mus_subset = torch.cat((mus_subset,
enc_mods[mod.name][0].unsqueeze(0)),
dim=0);
logvars_subset = torch.cat((logvars_subset,
enc_mods[mod.name][1].unsqueeze(0)),
dim=0);
else:
mods_avail = False;
if mods_avail:
weights_subset = ((1/float(len(mus_subset)))*
torch.ones(len(mus_subset)).to(self.flags.device));
s_mu, s_logvar = self.modality_fusion(mus_subset,
logvars_subset,
weights_subset); #子集内部POE#
distr_subsets[s_key] = [s_mu, s_logvar];
if self.fusion_condition(mods, input_batch):
mus = torch.cat((mus, s_mu.unsqueeze(0)), dim=0);
logvars = torch.cat((logvars, s_logvar.unsqueeze(0)),
dim=0);
if self.flags.modality_jsd:
num_samples = mus[0].shape[0]
mus = torch.cat((mus, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
logvars = torch.cat((logvars, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0);
#weights = (1/float(len(mus)))*torch.ones(len(mus)).to(self.flags.device);
weights = (1/float(mus.shape[0]))*torch.ones(mus.shape[0]).to(self.flags.device);
joint_mu, joint_logvar = self.moe_fusion(mus, logvars, weights); #子集之间MOE#
#mus = torch.cat(mus, dim=0);
#logvars = torch.cat(logvars, dim=0);
latents['mus'] = mus;
latents['logvars'] = logvars;
latents['weights'] = weights;
latents['joint'] = [joint_mu, joint_logvar];
latents['subsets'] = distr_subsets;
return latents;
def generate(self, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size;
mu = torch.zeros(num_samples,
self.flags.class_dim).to(self.flags.device);
logvar = torch.zeros(num_samples,
self.flags.class_dim).to(self.flags.device);
z_class = self.reparameterize(mu, logvar);
z_styles = self.get_random_styles(num_samples);
random_latents = {'content': z_class, 'style': z_styles};
random_samples = self.generate_from_latents(random_latents);
return random_samples;
def generate_sufficient_statistics_from_latents(self, latents):
suff_stats = dict();
content = latents['content']
for m, m_key in enumerate(self.modalities.keys()):
s = latents['style'][m_key];
cg = self.lhoods[m_key](*self.decoders[m_key](s, content));
suff_stats[m_key] = cg;
return suff_stats;
def generate_from_latents(self, latents):
suff_stats = self.generate_sufficient_statistics_from_latents(latents);
cond_gen = dict();
for m, m_key in enumerate(latents['style'].keys()):
cond_gen_m = suff_stats[m_key].mean;
cond_gen[m_key] = cond_gen_m;
return cond_gen;
def cond_generation(self, latent_distributions, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size;
style_latents = self.get_random_styles(num_samples);
cond_gen_samples = dict();
for k, key in enumerate(latent_distributions.keys()):
[mu, logvar] = latent_distributions[key];
content_rep = self.reparameterize(mu=mu, logvar=logvar);
latents = {'content': content_rep, 'style': style_latents}
cond_gen_samples[key] = self.generate_from_latents(latents);
return cond_gen_samples;
def get_random_style_dists(self, num_samples):
styles = dict();
for k, m_key in enumerate(self.modalities.keys()):
mod = self.modalities[m_key];
s_mu = torch.zeros(num_samples,
mod.style_dim).to(self.flags.device)
s_logvar = torch.zeros(num_samples,
mod.style_dim).to(self.flags.device);
styles[m_key] = [s_mu, s_logvar];
return styles;
def get_random_styles(self, num_samples):
styles = dict();
for k, m_key in enumerate(self.modalities.keys()):
if self.flags.factorized_representation:
mod = self.modalities[m_key];
z_style = torch.randn(num_samples, mod.style_dim);
z_style = z_style.to(self.flags.device);
else:
z_style = None;
styles[m_key] = z_style;
return styles;
def save_networks(self):
for k, m_key in enumerate(self.modalities.keys()):
torch.save(self.encoders[m_key].state_dict(),
os.path.join(self.flags.dir_checkpoints, 'enc_' +
self.modalities[m_key].name))
torch.save(self.decoders[m_key].state_dict(),
os.path.join(self.flags.dir_checkpoints, 'dec_' +
self.modalities[m_key].name))
| 14,033 | 41.017964 | 121 | py |
BraVL | BraVL-master/BraVL_EEG/utils/utils.py | import os
import torch
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def get_likelihood(str):
if str == 'laplace':
pz = dist.Laplace
elif str == 'bernoulli':
pz = dist.Bernoulli
elif str == 'normal':
pz = dist.Normal
elif str == 'categorical':
pz = dist.OneHotCategorical
else:
print('likelihood not implemented')
pz = None
return pz
def reweight_weights(w):
w = w / w.sum()
return w
def mixture_component_selection(flags, mus, logvars, w_modalities=None):
#if not defined, take pre-defined weights
num_components = mus.shape[0]
num_samples = mus.shape[1]
if w_modalities is None:
w_modalities = torch.Tensor(flags.alpha_modalities).to(flags.device)
idx_start = []
idx_end = []
for k in range(0, num_components):
if k == 0:
i_start = 0
else:
i_start = int(idx_end[k-1])
if k == w_modalities.shape[0]-1:
i_end = num_samples
else:
i_end = i_start + int(torch.floor(num_samples*w_modalities[k]))
idx_start.append(i_start)
idx_end.append(i_end)
idx_end[-1] = num_samples
mu_sel = torch.cat([mus[k, idx_start[k]:idx_end[k], :] for k in range(w_modalities.shape[0])])
logvar_sel = torch.cat([logvars[k, idx_start[k]:idx_end[k], :] for k in range(w_modalities.shape[0])])
return [mu_sel, logvar_sel]
def calc_elbo(exp, modality, recs, klds):
flags = exp.flags
mods = exp.modalities
s_weights = exp.style_weights
r_weights = exp.rec_weights
kld_content = klds['content']
if modality == 'joint':
w_style_kld = 0.0
w_rec = 0.0
klds_style = klds['style']
for k, m_key in enumerate(mods.keys()):
w_style_kld += s_weights[m_key] * klds_style[m_key]
w_rec += r_weights[m_key] * recs[m_key]
kld_style = w_style_kld
rec_error = w_rec
else:
beta_style_mod = s_weights[modality]
#rec_weight_mod = r_weights[modality]
rec_weight_mod = 1.0
kld_style = beta_style_mod * klds['style'][modality]
rec_error = rec_weight_mod * recs[modality]
div = flags.beta_content * kld_content + flags.beta_style * kld_style
elbo = rec_error + flags.beta * div
return elbo
def save_and_log_flags(flags):
#filename_flags = os.path.join(flags.dir_experiment_run, 'flags.json')
#with open(filename_flags, 'w') as f:
# json.dump(flags.__dict__, f, indent=2, sort_keys=True)
filename_flags_rar = os.path.join(flags.dir_experiment_run, 'flags.rar')
torch.save(flags, filename_flags_rar)
str_args = ''
for k, key in enumerate(sorted(flags.__dict__.keys())):
str_args = str_args + '\n' + key + ': ' + str(flags.__dict__[key])
return str_args
class Flatten(torch.nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Unflatten(torch.nn.Module):
def __init__(self, ndims):
super(Unflatten, self).__init__()
self.ndims = ndims
def forward(self, x):
return x.view(x.size(0), *self.ndims)
| 4,100 | 32.892562 | 106 | py |
BraVL | BraVL-master/BraVL_EEG/utils/BaseFlags.py | import os
import argparse
import numpy as np
import torch
import scipy.io as sio
parser = argparse.ArgumentParser()
# TRAINING
parser.add_argument('--batch_size', type=int, default=1024, help="batch size for training")
parser.add_argument('--initial_learning_rate', type=float, default=0.0001, help="starting learning rate")
parser.add_argument('--beta_1', type=float, default=0.9, help="default beta_1 val for adam")
parser.add_argument('--beta_2', type=float, default=0.999, help="default beta_2 val for adam")
parser.add_argument('--start_epoch', type=int, default=0, help="flag to set the starting epoch for training")
parser.add_argument('--end_epoch', type=int, default=100, help="flag to indicate the final epoch of training")
# DATA DEPENDENT
parser.add_argument('--class_dim', type=int, default=32, help="dimension of common factor latent space")
# SAVE and LOAD
parser.add_argument('--mm_vae_save', type=str, default='mm_vae', help="model save for vae_bimodal")
parser.add_argument('--load_saved', type=bool, default=False, help="flag to indicate if a saved model will be loaded")
# DIRECTORIES
# experiments
parser.add_argument('--dir_experiment', type=str, default='./logs', help="directory to save logs in")
parser.add_argument('--dataname', type=str, default='ThingsEEG-Text', help="dataset")
parser.add_argument('--sbj', type=str, default='sub-01', help="eeg subject")
parser.add_argument('--roi', type=str, default='17channels', help="ROI")
parser.add_argument('--text_model', type=str, default='CLIPText', help="text embedding model")
parser.add_argument('--image_model', type=str, default='pytorch/cornet_s', help="image embedding model")
parser.add_argument('--test_type', type=str, default='zsl', help='normal or zsl')
parser.add_argument('--aug_type', type=str, default='no_aug', help='no_aug, image_text_ilsvrc2012_val')
parser.add_argument('--unimodal', type=str, default='image', help='image, text')
#multimodal
parser.add_argument('--method', type=str, default='joint_elbo', help='choose method for training the model')
parser.add_argument('--modality_jsd', type=bool, default=False, help="modality_jsd")
parser.add_argument('--modality_poe', type=bool, default=False, help="modality_poe")
parser.add_argument('--modality_moe', type=bool, default=False, help="modality_moe")
parser.add_argument('--joint_elbo', type=bool, default=False, help="modality_moe")
parser.add_argument('--poe_unimodal_elbos', type=bool, default=True, help="unimodal_klds")
parser.add_argument('--factorized_representation', action='store_true', default=False, help="factorized_representation")
# LOSS TERM WEIGHTS
parser.add_argument('--beta', type=float, default=0.0, help="default initial weight of sum of weighted divergence terms")
parser.add_argument('--beta_style', type=float, default=1.0, help="default weight of sum of weighted style divergence terms")
parser.add_argument('--beta_content', type=float, default=1.0, help="default weight of sum of weighted content divergence terms")
parser.add_argument('--lambda1', type=float, default=0.001, help="default weight of intra_mi terms")
parser.add_argument('--lambda2', type=float, default=0.001, help="default weight of inter_mi terms")
FLAGS = parser.parse_args()
data_dir_root = os.path.join('./data', FLAGS.dataname)
brain_dir = os.path.join(data_dir_root, 'brain_feature', FLAGS.roi, FLAGS.sbj)
image_dir_train = os.path.join(data_dir_root, 'visual_feature/ThingsTrain', FLAGS.image_model, FLAGS.sbj)
text_dir_train = os.path.join(data_dir_root, 'textual_feature/ThingsTrain/text', FLAGS.text_model, FLAGS.sbj)
train_brain = sio.loadmat(os.path.join(brain_dir, 'eeg_train_data_within.mat'))['data'].astype('double')
train_brain = train_brain[:,:,27:60] # 70ms-400ms
train_brain = np.reshape(train_brain,(train_brain.shape[0],-1))
train_image = sio.loadmat(os.path.join(image_dir_train, 'feat_pca_train.mat'))['data'].astype('double')
train_text = sio.loadmat(os.path.join(text_dir_train, 'text_feat_train.mat'))['data'].astype('double')
train_image = train_image[:,0:100] # top 100 PCs
train_brain = torch.from_numpy(train_brain)
train_image = torch.from_numpy(train_image)
train_text = torch.from_numpy(train_text)
dim_brain = train_brain.shape[1]
dim_image = train_image.shape[1]
dim_text = train_text.shape[1]
parser.add_argument('--m1_dim', type=int, default=dim_brain, help="dimension of modality brain")
parser.add_argument('--m2_dim', type=int, default=dim_image, help="dimension of modality image")
parser.add_argument('--m3_dim', type=int, default=dim_text, help="dimension of modality text")
parser.add_argument('--data_dir_root', type=str, default=data_dir_root, help="data dir")
FLAGS = parser.parse_args()
print(FLAGS)
| 4,707 | 57.85 | 129 | py |
BraVL | BraVL-master/BraVL_fMRI/run_epochs_trimodal.py | import os
import numpy as np
import math
import random
import torch
from torch.autograd import Variable
import torch.distributions as dist
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from divergence_measures.kl_div import calc_kl_divergence
from sklearn.svm import SVC
from sklearn.metrics import top_k_accuracy_score
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
import seaborn as sns
import csv
from utils import utils
from utils.TBLogger import TBLogger
sns.set(rc={'figure.figsize':(11.7,8.27)})
palette = sns.color_palette("bright", 2)
torch.set_default_tensor_type(torch.DoubleTensor)
TINY = 1e-8
CONSTANT = 1e6
# global variables
SEED = 2021
SAMPLE1 = None
if SEED is not None:
np.random.seed(SEED)
torch.manual_seed(SEED)
random.seed(SEED)
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
def m_cubo( x, qz_x, px_zs, zss):
lpz = dist.Normal(torch.zeros(1,zss.size(2)).cuda(),torch.ones(1,zss.size(2)).cuda()).log_prob(zss).sum(-1)
lqz_x = qz_x.log_prob(zss).sum(-1)
if 'brain' in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
lpx_z1 = px_zs['image'].log_prob(x['image']).sum(-1)
lpx_z2 = px_zs['text'].log_prob(x['text']).sum(-1)
lpx_z3 = px_zs['brain'].log_prob(x['brain']).sum(-1)
cubo = 0.5*log_mean_exp(2*(lpz+lpx_z1+lpx_z2+lpx_z3-lqz_x))
elif 'brain' not in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
lpx_z1 = px_zs['image'].log_prob(x['image']).sum(-1)
lpx_z2 = px_zs['text'].log_prob(x['text']).sum(-1)
cubo = 0.5*log_mean_exp(2*(lpz+lpx_z1+lpx_z2-lqz_x))
return cubo.mean()
def log_li(x_var, dist_info):
mean = dist_info[0]
std = dist_info[1]
epsilon = (x_var - mean) / (std + TINY)
pi = Variable(torch.ones(1) * np.pi).to(x_var.device)
logli = - 0.5 * torch.log(2 * pi) - torch.log(std + TINY) - 0.5 * torch.pow(epsilon,2)
return logli.sum(1)
def mutual_info(exp,px_zs,z):
if 'brain' in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
q2 = exp.Q2(px_zs['image'].loc)
q3 = exp.Q3(px_zs['text'].loc)
mi1 = log_li(z,q1).mean()
mi2 = log_li(z,q2).mean()
mi3 = log_li(z,q3).mean()
return mi1 + mi2 + mi3
elif 'brain' not in px_zs.keys() and 'image' in px_zs.keys() and 'text' in px_zs.keys():
q2 = exp.Q2(px_zs['image'].loc)
q3 = exp.Q3(px_zs['text'].loc)
mi2 = log_li(z,q2).mean()
mi3 = log_li(z,q3).mean()
return mi2 + mi3
elif 'brain' in px_zs.keys() and 'image' not in px_zs.keys() and 'text' not in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
mi1 = log_li(z,q1).mean()
return mi1
elif 'brain' not in px_zs.keys() and 'image' in px_zs.keys() and 'text' not in px_zs.keys():
q2 = exp.Q2(px_zs['image'].loc)
mi2 = log_li(z,q2).mean()
return mi2
elif 'brain' not in px_zs.keys() and 'image' not in px_zs.keys() and 'text' in px_zs.keys():
q3 = exp.Q3(px_zs['text'].loc)
mi3 = log_li(z,q3).mean()
return mi3
elif 'brain' in px_zs.keys() and 'image' in px_zs.keys() and 'text' not in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
q2 = exp.Q2(px_zs['image'].loc)
mi1 = log_li(z,q1).mean()
mi2 = log_li(z,q2).mean()
return mi1+mi2
elif 'brain' in px_zs.keys() and 'image' not in px_zs.keys() and 'text' in px_zs.keys():
q1 = exp.Q1(px_zs['brain'].loc)
q3 = exp.Q3(px_zs['text'].loc)
mi1 = log_li(z,q1).mean()
mi3 = log_li(z,q3).mean()
return mi1+mi3
def calc_log_probs(exp, result, batch):
mods = exp.modalities
log_probs = dict()
weighted_log_prob = 0.0
for m, m_key in enumerate(mods.keys()):
if m_key in batch[0].keys():
mod = mods[m_key]
log_probs[mod.name] = -mod.calc_log_prob(result['rec'][mod.name],
batch[0][mod.name],
exp.flags.batch_size)
weighted_log_prob += exp.rec_weights[mod.name]*log_probs[mod.name]
else:
mod = mods[m_key]
log_probs[mod.name] = 0
weighted_log_prob += exp.rec_weights[mod.name]*log_probs[mod.name]
return log_probs, weighted_log_prob
def calc_klds(exp, result):
latents = result['latents']['subsets']
klds = dict()
for m, key in enumerate(latents.keys()):
mu, logvar = latents[key]
klds[key] = calc_kl_divergence(mu, logvar,
norm_value=exp.flags.batch_size)
return klds
def calc_klds_style(exp, result):
latents = result['latents']['modalities']
klds = dict()
for m, key in enumerate(latents.keys()):
if key.endswith('style'):
mu, logvar = latents[key]
klds[key] = calc_kl_divergence(mu, logvar,
norm_value=exp.flags.batch_size)
return klds
def calc_style_kld(exp, klds):
mods = exp.modalities
style_weights = exp.style_weights
weighted_klds = 0.0
for m, m_key in enumerate(mods.keys()):
weighted_klds += style_weights[m_key]*klds[m_key+'_style']
return weighted_klds
def shuffle(a):
return a[torch.randperm(a.size()[0])]
def true_neg_idx(data, shuffle_data):
a = data.mean(1)
b = shuffle_data.mean(1)
index = torch.arange(0, len(a))
idx = index[a!=b]
return idx
def negative_sample_generator(batch_new, batch, case):
batch_d = batch_new[0]
data = batch
if 'brain' in batch_d.keys() and 'image' in batch_d.keys() and 'text' in batch_d.keys():
if case==1:
shuffle_data = shuffle(data[0])
idx = true_neg_idx(data[0], shuffle_data)
neg_batch = [shuffle_data[idx,:], data[1][idx,:], data[2][idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = data[3]
return batch_new
elif case == 2:
shuffle_data = shuffle(data[1])
idx = true_neg_idx(data[1], shuffle_data)
neg_batch = [data[0][idx,:], shuffle_data[idx,:], data[2][idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = data[3]
return batch_new
elif case == 3:
shuffle_data = shuffle(data[2])
idx = true_neg_idx(data[2], shuffle_data)
neg_batch = [data[0][idx,:], data[1][idx,:], shuffle_data[idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = data[3]
return batch_new
elif case == 4:
shuffle_data0 = shuffle(data[0])
idx1 = true_neg_idx(data[0], shuffle_data0)
shuffle_data1 = shuffle(data[1])
idx2 = true_neg_idx(data[1], shuffle_data1)
idx = np.unique(np.concatenate((idx1,idx2),axis=0))
neg_batch = [shuffle_data0[idx,:], shuffle_data1[idx,:], data[2][idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = data[3]
return batch_new
elif case == 5:
shuffle_data0 = shuffle(data[0])
idx1 = true_neg_idx(data[0], shuffle_data0)
shuffle_data2 = shuffle(data[2])
idx2 = true_neg_idx(data[2], shuffle_data2)
idx = np.unique(np.concatenate((idx1,idx2),axis=0))
neg_batch = [shuffle_data0[idx,:], data[1][idx,:], shuffle_data2[idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = data[3]
return batch_new
elif case == 6:
shuffle_data1 = shuffle(data[1])
idx1 = true_neg_idx(data[1], shuffle_data1)
shuffle_data2 = shuffle(data[2])
idx2 = true_neg_idx(data[2], shuffle_data2)
idx = np.unique(np.concatenate((idx1,idx2),axis=0))
neg_batch = [data[0][idx,:], shuffle_data1[idx,:], shuffle_data2[idx,:]]
batch = [neg_batch[0], neg_batch[1], neg_batch[2]]
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = data[3]
return batch_new
elif 'brain' not in batch_d.keys() and 'image' in batch_d.keys() and 'text' in batch_d.keys():
if case == 1:
shuffle_data = shuffle(data[0])
idx = true_neg_idx(data[0], shuffle_data)
neg_batch = [shuffle_data[idx,:], data[1][idx,:]]
batch = [neg_batch[0], neg_batch[1]]
batch_new[0] = dict()
batch_new[0] = {'image': batch[0], 'text': batch[1]}
# batch_new[1] = data[3]
return batch_new
elif case == 2:
shuffle_data = shuffle(data[1])
idx = true_neg_idx(data[1], shuffle_data)
neg_batch = [data[0][idx,:], shuffle_data[idx,:]]
batch = [neg_batch[0], neg_batch[1]]
batch_new[0] = dict()
batch_new[0] = {'image': batch[0], 'text': batch[1]}
# batch_new[1] = data[3]
return batch_new
def basic_routine_epoch(exp, batch, epoch):
# set up weights
beta_style = exp.flags.beta_style
beta_content = exp.flags.beta_content
beta = exp.flags.beta + epoch * 0.01
if beta>1.0:
beta = 1.0
rec_weight = 1.0
lambda1 = exp.flags.lambda1
mm_vae = exp.mm_vae
batch_d = batch[0]
mods = exp.modalities
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
results = mm_vae(batch_d)
log_probs, weighted_log_prob = calc_log_probs(exp, results, batch)
group_divergence = results['joint_divergence']
klds = calc_klds(exp, results)
z = results['class_embeddings']
px_zs = results['rec']
intra_mi = -mutual_info(exp,px_zs,z)
if exp.flags.factorized_representation:
klds_style = calc_klds_style(exp, results)
if (exp.flags.modality_jsd or exp.flags.modality_moe
or exp.flags.joint_elbo):
if exp.flags.factorized_representation:
kld_style = calc_style_kld(exp, klds_style)
else:
kld_style = 0.0
kld_content = group_divergence
kld_weighted = beta_style * kld_style + beta_content * kld_content
elbo_loss = rec_weight * weighted_log_prob + beta * kld_weighted
elif exp.flags.modality_poe:
klds_joint = {'content': group_divergence,
'style': dict()}
elbos = dict()
for m, m_key in enumerate(mods.keys()):
mod = mods[m_key]
if exp.flags.factorized_representation:
kld_style_m = klds_style[m_key + '_style']
else:
kld_style_m = 0.0
klds_joint['style'][m_key] = kld_style_m
if exp.flags.poe_unimodal_elbos:
if m_key=='brain' and 'brain' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='brain' and 'brain' not in batch_d.keys():
elbos[m_key] = 0
elif m_key=='image' and 'image' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='image' and 'image' not in batch_d.keys():
elbos[m_key] = 0
elif m_key == 'text' and 'text' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='text' and 'text' not in batch_d.keys():
elbos[m_key] = 0
elbo_joint = utils.calc_elbo(exp, 'joint', log_probs, klds_joint)
elbos['joint'] = elbo_joint
elbo_loss = sum(elbos.values())
total_loss = elbo_loss + lambda1 * intra_mi
out_basic_routine = dict()
out_basic_routine['results'] = results
out_basic_routine['log_probs'] = log_probs
out_basic_routine['total_loss'] = total_loss
out_basic_routine['klds'] = klds
out_basic_routine['intra_mi'] = intra_mi
out_basic_routine['elbo_loss'] = elbo_loss
return out_basic_routine
def elbo_contrast(exp, batch, epoch):
# set up weights
beta_style = exp.flags.beta_style
beta_content = exp.flags.beta_content
beta = exp.flags.beta + epoch * 0.01
if beta>1.0:
beta = 1.0
rec_weight = 1.0
mm_vae = exp.mm_vae
batch_d = batch[0]
mods = exp.modalities
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
results = mm_vae(batch_d, K=30)
cubo = m_cubo(batch_d, results['qz_x'], results['px_zs'], results['zss'])
log_probs, weighted_log_prob = calc_log_probs(exp, results, batch)
group_divergence = results['joint_divergence']
klds = calc_klds(exp, results)
z = results['class_embeddings']
neg_batch_size = z.shape[0]
if exp.flags.factorized_representation:
klds_style = calc_klds_style(exp, results)
if (exp.flags.modality_jsd or exp.flags.modality_moe
or exp.flags.joint_elbo):
if exp.flags.factorized_representation:
kld_style = calc_style_kld(exp, klds_style)
else:
kld_style = 0.0
kld_content = group_divergence
kld_weighted = beta_style * kld_style + beta_content * kld_content
elbo_loss = rec_weight * weighted_log_prob + beta * kld_weighted
elif exp.flags.modality_poe:
klds_joint = {'content': group_divergence,
'style': dict()}
elbos = dict()
for m, m_key in enumerate(mods.keys()):
mod = mods[m_key]
if exp.flags.factorized_representation:
kld_style_m = klds_style[m_key + '_style']
else:
kld_style_m = 0.0
klds_joint['style'][m_key] = kld_style_m
if exp.flags.poe_unimodal_elbos:
if m_key=='brain' and 'brain' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='brain' and 'brain' not in batch_d.keys():
elbos[m_key] = 0
elif m_key=='image' and 'image' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='image' and 'image' not in batch_d.keys():
elbos[m_key] = 0
elif m_key == 'text' and 'text' in batch_d.keys():
i_batch_mod = {m_key: batch_d[m_key]}
r_mod = mm_vae(i_batch_mod)
log_prob_mod = -mod.calc_log_prob(r_mod['rec'][m_key],
batch_d[m_key],
exp.flags.batch_size)
log_prob = {m_key: log_prob_mod}
klds_mod = {'content': klds[m_key],
'style': {m_key: kld_style_m}}
elbo_mod = utils.calc_elbo(exp, m_key, log_prob, klds_mod)
elbos[m_key] = elbo_mod
elif m_key=='text' and 'text' not in batch_d.keys():
elbos[m_key] = 0
elbo_joint = utils.calc_elbo(exp, 'joint', log_probs, klds_joint)
elbos['joint'] = elbo_joint
elbo_loss = sum(elbos.values())
# elbo_scale = - elbo_loss / CONSTANT
elbo_scale = cubo / CONSTANT
out_basic_routine = dict()
out_basic_routine['elbo_nega_sample_loss'] = torch.log(elbo_scale.exp().sum() * exp.flags.batch_size / neg_batch_size + TINY) * CONSTANT
return out_basic_routine
def update_Qnet(exp, batch):
with torch.no_grad():
mm_vae = exp.mm_vae
batch_d = batch[0]
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
results = mm_vae(batch_d)
z = results['class_embeddings']
px_zs = results['rec']
intra_mi = -mutual_info(exp, px_zs, z)
return intra_mi
def train_aug(epoch, exp, tb_logger):
mm_vae = exp.mm_vae
mm_vae.train()
exp.mm_vae = mm_vae
lambda2 = exp.flags.lambda2
if exp.flags.aug_type == 'image_text':
print('aug type: image_text')
aug_loader = DataLoader(exp.dataset_aug, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(aug_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'image':batch[0],'text':batch[1]}
# batch_new[1] = batch[0]
batch = [batch[0], batch[1]]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
elbo_loss = basic_routine['elbo_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2
inter_mi_loss = elbo_loss + elbo_nega_sample_loss/2.0
total_loss = total_loss + lambda2 * inter_mi_loss
total_loss.backward()
exp.optimizer['mvae'].step()
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, total_loss))
elif exp.flags.aug_type == 'text_only':
print('aug type: text_only')
aug_loader = DataLoader(exp.dataset_aug, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(aug_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'text': batch[0]}
# batch_new[1] = batch[0]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
total_loss.backward()
exp.optimizer['mvae'].step()
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, total_loss))
elif exp.flags.aug_type == 'image_only':
print('aug type: image_only')
aug_loader = DataLoader(exp.dataset_aug, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(aug_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'image': batch[0]}
# batch_new[1] = batch[0]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
total_loss.backward()
exp.optimizer['mvae'].step()
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, total_loss))
elif exp.flags.aug_type == 'no_aug':
print('aug type: no augmentation')
def train(epoch, exp, tb_logger):
mm_vae = exp.mm_vae
mm_vae.train()
exp.mm_vae = mm_vae
lambda2 = exp.flags.lambda2
test_loader = DataLoader(exp.dataset_test, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(test_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'image':batch[1],'text':batch[2]}
# batch_new[1] = batch[3]
batch = [batch[1],batch[2]]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
total_loss = basic_routine['total_loss']
elbo_loss = basic_routine['elbo_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2
inter_mi_loss = elbo_loss + elbo_nega_sample_loss/2.0
total_loss = total_loss + lambda2 * inter_mi_loss
total_loss.backward()
exp.optimizer['mvae'].step()
d_loader = DataLoader(exp.dataset_train, batch_size=exp.flags.batch_size,
shuffle=True,
num_workers=8, drop_last=True)
for iteration, batch in enumerate(d_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'brain':batch[0],'image':batch[1],'text':batch[2]}
# batch_new[1] = batch[3]
# Stage 1
intra_mi = update_Qnet(exp, batch_new)
exp.optimizer['Qnet'].zero_grad()
exp.optimizer['mvae'].zero_grad()
intra_mi.backward()
exp.optimizer['Qnet'].step()
# Stage 2
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
results = basic_routine['results']
total_loss = basic_routine['total_loss']
klds = basic_routine['klds']
log_probs = basic_routine['log_probs']
intra_mi = basic_routine['intra_mi']
elbo_loss = basic_routine['elbo_loss']
exp.optimizer['mvae'].zero_grad()
exp.optimizer['Qnet'].zero_grad()
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=3)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_3 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=4)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_4 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=5)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_5 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=6)
basic_routine_contrast=elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_6 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2 + elbo_nega_sample_loss_case_3 + elbo_nega_sample_loss_case_4 + elbo_nega_sample_loss_case_5 + elbo_nega_sample_loss_case_6
inter_mi_loss = elbo_loss + elbo_nega_sample_loss/6.0
total_loss = total_loss + lambda2 * inter_mi_loss
total_loss.backward()
exp.optimizer['mvae'].step()
tb_logger.write_training_logs(results, total_loss, log_probs, klds, -inter_mi_loss)
print('====> Epoch: {:03d} Train loss: {:.4f} ELBO: {:.4f} IntraMI: {:.4f} InterMI: {:.4f}'.format(epoch,
total_loss,
-elbo_loss,
-intra_mi,
-inter_mi_loss))
def test(epoch, exp, tb_logger):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
lambda2 = exp.flags.lambda2
d_loader = DataLoader(exp.dataset_test, batch_size=200,
shuffle=True,
num_workers=8, drop_last=False)
for iteration, batch in enumerate(d_loader):
batch_new = {}
batch_new[0] = dict()
batch_new[0] = {'brain': batch[0], 'image': batch[1], 'text': batch[2]}
# batch_new[1] = batch[3]
basic_routine = basic_routine_epoch(exp, batch_new, epoch)
results = basic_routine['results']
total_loss = basic_routine['total_loss']
klds = basic_routine['klds']
log_probs = basic_routine['log_probs']
intra_mi = basic_routine['intra_mi']
elbo_loss = basic_routine['elbo_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=1)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_1 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=2)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_2 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=3)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_3 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=4)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_4 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=5)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_5 = basic_routine_contrast['elbo_nega_sample_loss']
neg_batch_new = negative_sample_generator(batch_new, batch, case=6)
basic_routine_contrast = elbo_contrast(exp, neg_batch_new, epoch)
elbo_nega_sample_loss_case_6 = basic_routine_contrast['elbo_nega_sample_loss']
elbo_nega_sample_loss = elbo_nega_sample_loss_case_1 + elbo_nega_sample_loss_case_2 + elbo_nega_sample_loss_case_3 + elbo_nega_sample_loss_case_4 + elbo_nega_sample_loss_case_5 + elbo_nega_sample_loss_case_6
inter_mi_loss = elbo_loss + elbo_nega_sample_loss / 6.0
total_loss = total_loss + lambda2 * inter_mi_loss
tb_logger.write_testing_logs(results, total_loss, log_probs, klds, -inter_mi_loss)
print('====> Epoch: {:03d} Test loss: {:.4f} ELBO: {:.4f} IntraMI: {:.4f} InterMI: {:.4f}'.format(epoch,
total_loss,
-elbo_loss,
-intra_mi,
-inter_mi_loss))
def image_text_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type=='zsl':
image_text_data = {'image': exp.dataset_test.tensors[1].cuda(),'text': exp.dataset_test.tensors[2].cuda()}
label = exp.dataset_test.tensors[3]
brain = exp.dataset_test.tensors[0]
elif type=='normal':
image_text_data = {'image': exp.dataset_val.tensors[1].cuda(),'text': exp.dataset_val.tensors[2].cuda()}
label = exp.dataset_val.tensors[3]
brain = exp.dataset_test.tensors[0]
results = mm_vae(image_text_data)
z = results['class_embeddings']
brain_rec = mm_vae.lhoods['brain'](*mm_vae.decoders['brain'](None, z))
return z.cpu().numpy(), label.cpu().numpy(), brain_rec.loc.cpu().numpy(), brain.cpu().numpy()
def brain_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type == 'zsl':
data = {'brain':exp.dataset_test.tensors[0].cuda()}
label = exp.dataset_test.tensors[3]
image = exp.dataset_test.tensors[1]
text = exp.dataset_test.tensors[2]
elif type == 'normal':
data = {'brain':exp.dataset_val.tensors[0].cuda()}
label = exp.dataset_val.tensors[3]
image = exp.dataset_test.tensors[1]
text = exp.dataset_test.tensors[2]
results = mm_vae(data)
z = results['class_embeddings']
image_rec = mm_vae.lhoods['image'](*mm_vae.decoders['image'](None, z))
text_rec = mm_vae.lhoods['text'](*mm_vae.decoders['text'](None, z))
return z.cpu().numpy(), label.cpu().numpy(), image_rec.loc.cpu().numpy(), text_rec.loc.cpu().numpy(), image.cpu().numpy(), text.cpu().numpy()
def image_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type == 'zsl':
data = {'image':exp.dataset_test.tensors[1].cuda()}
label = exp.dataset_test.tensors[3]
elif type == 'normal':
data = {'image':exp.dataset_val.tensors[1].cuda()}
label = exp.dataset_val.tensors[3]
results = mm_vae(data)
z = results['class_embeddings']
return z.cpu().numpy(), label.cpu().numpy()
def text_inference(exp, type):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
if type == 'zsl':
data = {'text':exp.dataset_test.tensors[2].cuda()}
label = exp.dataset_test.tensors[3]
elif type == 'normal':
data = {'text':exp.dataset_val.tensors[2].cuda()}
label = exp.dataset_val.tensors[3]
results = mm_vae(data)
z = results['class_embeddings']
return z.cpu().numpy(), label.cpu().numpy()
def run_classification_test(exp,observation, type):
if observation=='image_text':
z_train = []
train_label = []
for i in range(5):
z, label,brain_rec,brain= image_text_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
X = np.concatenate((brain, brain_rec),axis=0)
sgn=np.concatenate((np.ones_like(np.squeeze(label)),np.zeros_like(np.squeeze(label))),axis=0)
# tsne = TSNE()
# X_embedded = tsne.fit_transform(X)
# sns.scatterplot(X_embedded[:, 0], X_embedded[:, 1], hue=sgn, style=sgn, legend='brief', palette=palette)
# path = './results/' + exp.flags.dataname + '_' + exp.flags.sbj + '_' + exp.flags.roi + '_' + exp.flags.aug_type + '_' + exp.flags.text_model + '_' + exp.flags.image_model.split('/')[-1] + '_' + str(exp.flags.lambda1) + '_' + str(exp.flags.lambda2) + '_' + str(exp.flags.class_dim) + '_' + exp.flags.method
# plt.savefig(path+'_brain_vs_brain_rec.pdf',dpi=500)
elif observation == 'image':
z_train = []
train_label = []
for i in range(5):
z, label= image_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
elif observation == 'text':
z_train = []
train_label = []
for i in range(5):
z, label= text_inference(exp,type)
z_train.append(z)
train_label.append(label)
z_train = np.vstack(z_train)
train_label = np.vstack(train_label)
z_test, test_label, image_rec, text_rec, image, text = brain_inference(exp,type)
classifiers = [
SVC(gamma=0.00001, C=1.0, probability=True),
]
for clf in classifiers:
clf.fit(z_train, train_label)
score = clf.score(z_test, test_label)
print(f"{observation}\n"
f"Classification report for classifier {clf}:\n"
f"{score}\n")
probas = clf.predict_proba(z_test)
top_acc = top_k_accuracy_score(test_label, probas, k=5)
print(f"{observation}\n"
f"Classification Top 5 Acc for classifier {clf}:\n"
f"{top_acc}\n")
return score, top_acc
def create_csv(path,top1,top5):
with open(path,'w') as f:
csv_writer = csv.writer(f)
head = ["top1","top5"]
csv_writer.writerow(head)
def write_csv(path,top1,top5):
with open(path, 'a+') as f:
csv_writer = csv.writer(f)
row = []
row.append(top1)
row.append(top5)
csv_writer.writerow(row)
def run_epochs_trimodal(exp):
# initialize summary writer
writer = SummaryWriter(exp.flags.dir_logs)
tb_logger = TBLogger(exp.flags.str_experiment, writer)
str_flags = utils.save_and_log_flags(exp.flags)
tb_logger.writer.add_text('FLAGS', str_flags, 0)
lr_list = []
print('training epochs progress:')
for epoch in range(exp.flags.start_epoch, exp.flags.end_epoch):
utils.printProgressBar(epoch, exp.flags.end_epoch)
# one epoch of training and testing
exp.scheduler['Qnet'].step()
exp.scheduler['mvae'].step()
lr_list.append(exp.optimizer['Qnet'].state_dict()['param_groups'][0]['lr'])
train_aug(epoch, exp, tb_logger)
train(epoch, exp, tb_logger)
test(epoch, exp, tb_logger)
# save checkpoints after every 1 epochs
if (epoch + 1) % 100 == 0 or (epoch + 1) == exp.flags.end_epoch:
dir_network_epoch = os.path.join(exp.flags.dir_checkpoints, str(epoch).zfill(4))
if not os.path.exists(dir_network_epoch):
os.makedirs(dir_network_epoch)
exp.mm_vae.save_networks()
torch.save(exp.mm_vae.state_dict(),
os.path.join(dir_network_epoch, exp.flags.mm_vae_save))
print('lr = ',lr_list[-1])
# plt.plot(range(exp.flags.end_epoch), lr_list, color='r')
# plt.show()
if exp.flags.test_type=='normal':
top1, top5 = run_classification_test(exp,'image_text', 'normal')
elif exp.flags.test_type=='zsl':
top1, top5 = run_classification_test(exp, 'image_text', 'zsl')
path = './results/'+exp.flags.dataname+'_'+exp.flags.sbj+'_'+exp.flags.roi+'_'+exp.flags.aug_type+'_'+exp.flags.text_model+'_'+exp.flags.image_model.split('/')[-1]+'_'+str(exp.flags.lambda1)+'_'+str(exp.flags.lambda2)+'_'+'_'+str(exp.flags.class_dim)+'_'+exp.flags.method+'_image_text.csv'
create_csv(path, top1, top5)
write_csv(path, top1, top5)
| 40,510 | 43.12963 | 315 | py |
BraVL | BraVL-master/BraVL_fMRI/extract_fea_with_timm.py | import argparse
import os
from scipy import io
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import PIL
import torch
import timm
# python extract_fea_with_timm.py --data ./data/GenericObjectDecoding-v2/images/training --save_dir ./data/GOD-Wiki/visual_feature/ImageNetTraining --model repvgg_b3g4 --resolution 224
parser = argparse.ArgumentParser(description='PyTorch ImageNet Test')
parser.add_argument('-i', '--data', metavar='./data/GenericObjectDecoding-v2/images/training',
help='path to dataset')
parser.add_argument('-o', '--save_dir', metavar='./data/GOD-Wiki/visual_feature/ImageNetTraining',
help='path to save')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N',
help='mini-batch size (default: 100) for test')
parser.add_argument('-r', '--resolution', default=224, type=int,
metavar='R', help='resolution (default: 224) for test')
parser.add_argument('-m', '--model', default='resnet50', type=str,
metavar='M', help='pretrained model for test')
args = parser.parse_args()
root_dir = args.save_dir+'/pytorch/'+ args.model +'/'
if not os.path.exists(root_dir):
os.makedirs(root_dir)
def get_default_val_trans(args):
if (not hasattr(args, 'resolution')) or args.resolution == 224:
trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
else:
trans = transforms.Compose([
transforms.Resize(args.resolution, interpolation=PIL.Image.BILINEAR),
transforms.CenterCrop(args.resolution),
transforms.ToTensor(),
])
return trans
def get_ImageNet_val_dataset(args, trans):
val_dataset = datasets.ImageFolder(args.data, trans)
return val_dataset
def get_default_ImageNet_val_loader_withpath(args):
val_trans = get_default_val_trans(args)
val_dataset = get_ImageNet_val_dataset(args, val_trans)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
return val_loader, val_dataset
def extract(val_loader, val_dataset, model_final, model_linear, model_multiscale, use_gpu):
def save_feature(feat, flag):
feature_name = feature
l = feature_name.split('_')
if 'out' in l:
l.remove('out')
if 'list' in l:
l.remove('list')
feature_name = '_'.join(l)
feat = feat.cpu().numpy()
if flag == 'list':
dir1 = '{}/{}_{}'.format(root_dir, feature_name, i)
else:
dir1 = '{}/{}'.format(root_dir, feature_name)
if not os.path.exists(dir1):
os.makedirs(dir1)
filename = '{}.mat'.format(imid)
io.savemat(dir1 + '/' + filename, {'feat': feat})
# switch to evaluate mode
model_final.eval()
model_linear.eval()
model_multiscale.eval()
# 对应文件夹的label
# print(val_dataset.class_to_idx)
with torch.no_grad():
for i, images in enumerate(val_loader):
if use_gpu:
images = images[0].cuda(non_blocking=True)
final = model_final(images)
print(f'Original shape: {final.shape}')
linear = model_linear(images)
print(f'Pooled shape: {linear.shape}')
Conv = model_multiscale(images)
# Conv = [Conv[-4],Conv[-3],Conv[-2],Conv[-1]]
for x in Conv:
print(x.shape)
wnid = val_dataset.imgs[i][0].split("/")[-2]
imid = val_dataset.imgs[i][0].split("/")[-1].split('.')[0]
print(wnid, imid)
feature_list = ['final','linear','Conv']
for feature in feature_list:
feat = eval(feature)
if type(feat) == list:
for i in range(len(feat)):
save_feature(feat[i], 'list')
else:
save_feature(feat, 'single')
def inference():
model_final = timm.create_model(args.model, pretrained=True)
model_linear = timm.create_model(args.model, pretrained=True, num_classes=0)
model_multiscale = timm.create_model(args.model, pretrained=True, features_only=True)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
use_gpu = False
else:
model_final = model_final.cuda()
model_linear = model_linear.cuda()
model_multiscale = model_multiscale.cuda()
use_gpu = True
cudnn.benchmark = True
val_loader, val_dataset = get_default_ImageNet_val_loader_withpath(args)
extract(val_loader, val_dataset, model_final, model_linear, model_multiscale, use_gpu)
def extract_no_conv(val_loader, val_dataset, model_final, model_linear, use_gpu):
def save_feature(feat, flag):
feature_name = feature
l = feature_name.split('_')
if 'out' in l:
l.remove('out')
if 'list' in l:
l.remove('list')
feature_name = '_'.join(l)
feat = feat.cpu().numpy()
if flag == 'list':
dir1 = '{}/{}_{}'.format(root_dir, feature_name, i)
else:
dir1 = '{}/{}'.format(root_dir, feature_name)
if not os.path.exists(dir1):
os.makedirs(dir1)
filename = '{}.mat'.format(imid)
io.savemat(dir1 + '/' + filename, {'feat': feat})
# switch to evaluate mode
model_final.eval()
model_linear.eval()
with torch.no_grad():
for i, images in enumerate(val_loader):
if use_gpu:
images = images[0].cuda(non_blocking=True)
final = model_final(images)
print(f'Original shape: {final.shape}')
linear = model_linear(images)
print(f'Pooled shape: {linear.shape}')
wnid = val_dataset.imgs[i][0].split("/")[-2]
imid = val_dataset.imgs[i][0].split("/")[-1].split('.')[0]
print(wnid, imid)
feature_list = ['final','linear']
for feature in feature_list:
feat = eval(feature)
if type(feat) == list:
for i in range(len(feat)):
save_feature(feat[i], 'list')
else:
save_feature(feat, 'single')
def inference_no_conv():
model_final = timm.create_model(args.model, pretrained=True)
model_linear = timm.create_model(args.model, pretrained=True, num_classes=0)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
use_gpu = False
else:
model_final = model_final.cuda()
model_linear = model_linear.cuda()
use_gpu = True
cudnn.benchmark = True
val_loader, val_dataset = get_default_ImageNet_val_loader_withpath(args)
extract_no_conv(val_loader, val_dataset, model_final, model_linear, use_gpu)
if __name__ == '__main__':
inference()
# inference_no_conv()
| 7,451 | 35.529412 | 184 | py |
BraVL | BraVL-master/BraVL_fMRI/data_prepare_with_aug_DIR_Wiki.py | from __future__ import print_function
from itertools import product
import os
import pickle
import bdpy
from bdpy.dataform import Features
from bdpy.util import dump_info, makedir_ifnot
import numpy as np
from stability_selection import stability_selection
from sklearn.decomposition import PCA
from scipy import io
# Settings ###################################################################
seed = 42
TINY = 1e-8
# Python RNG
np.random.seed(seed)
subject_set=['subject1','subject2','subject3']
for subject in subject_set:
if subject == 'subject1':
subjects_list = {
'sub-01': 'sub-01_perceptionNaturalImageTraining_VC_v2.h5',
}
subjects_list_test = {
'sub-01': 'sub-01_perceptionNaturalImageTest_VC_v2.h5',
}
elif subject == 'subject2':
subjects_list = {
'sub-02': 'sub-02_perceptionNaturalImageTraining_VC_v2.h5',
}
subjects_list_test = {
'sub-02': 'sub-02_perceptionNaturalImageTest_VC_v2.h5',
}
elif subject == 'subject3':
subjects_list = {
'sub-03': 'sub-03_perceptionNaturalImageTraining_VC_v2.h5',
}
subjects_list_test = {
'sub-03': 'sub-03_perceptionNaturalImageTest_VC_v2.h5',
}
text_model_list = [
'GPTNeo',
'ALBERT',
# 'GPTNeo_phrases',
# 'ALBERT_phrases'
]
rois_list = {
# 'VC': 'ROI_VC = 1',
'LVC': 'ROI_LVC = 1',
'HVC': 'ROI_HVC = 1',
# 'V1': 'ROI_V1 = 1',
# 'V2': 'ROI_V2 = 1',
# 'V3': 'ROI_V3 = 1',
# 'V4': 'ROI_V4 = 1',
# 'LOC': 'ROI_LOC = 1',
# 'FFA': 'ROI_FFA = 1',
# 'PPA': 'ROI_PPA = 1',
'IT': 'ROI_IT = 1',
}
network = 'pytorch/repvgg_b3g4'
features_list = [#'Conv_0',
# 'Conv_1',
'Conv_2',
'Conv_3',
'Conv_4',
'linear',
'final']
features_list = features_list[::-1] # Start training from deep layers
# Brain data
brain_dir = './data/DeepImageReconstruction/data/fmri'
# Image features
timm_extracted_visual_features = './data/DIR-Wiki/visual_feature/ImageNetTraining/'+network
timm_extracted_visual_features_test = './data/DIR-Wiki/visual_feature/ImageNetTest/'+network
timm_extracted_visual_features_aug = './data/DIR-Wiki/visual_feature/Aug_1000/'+network
print('DNN feature')
print(timm_extracted_visual_features)
# Text features
model_extracted_textual_features = './data/Wiki_articles_features'
# Results directory
results_dir_root = './data/DIR-Wiki/visual_feature/ImageNetTraining/'+network+'-PCA'
results_dir_root_test = './data/DIR-Wiki/visual_feature/ImageNetTest/'+network+'-PCA'
results_dir_root_aug = './data/DIR-Wiki/visual_feature/Aug_1000/'+network+'-PCA'
results_fmri_root = './data/DIR-Wiki/brain_feature/LVC_HVC_IT'
results_text_root = './data/DIR-Wiki/textual_feature/ImageNetTraining/text'
results_text_root_test = './data/DIR-Wiki/textual_feature/ImageNetTest/text'
results_text_root_aug = './data/DIR-Wiki/textual_feature/Aug_1000/text'
# Main #######################################################################
analysis_basename = os.path.splitext(os.path.basename(__file__))[0]
# Print info -----------------------------------------------------------------
print('Subjects: %s' % subjects_list.keys())
print('ROIs: %s' % rois_list.keys())
print('Target features: %s' % network.split('/')[-1])
print('Layers: %s' % features_list)
print('')
# Load data ------------------------------------------------------------------
print('----------------------------------------')
print('Loading data')
data_brain = {sbj: bdpy.BData(os.path.join(brain_dir, dat_file))
for sbj, dat_file in subjects_list.items()}
data_features = Features(os.path.join(timm_extracted_visual_features, network))
data_brain_test = {sbj: bdpy.BData(os.path.join(brain_dir, dat_file))
for sbj, dat_file in subjects_list_test.items()}
data_features_test = Features(os.path.join(timm_extracted_visual_features_test, network))
data_features_aug = Features(os.path.join(timm_extracted_visual_features_aug, network))
# Initialize directories -----------------------------------------------------
makedir_ifnot(results_dir_root)
makedir_ifnot(results_dir_root_test)
makedir_ifnot(results_dir_root_aug)
makedir_ifnot(results_text_root)
makedir_ifnot(results_text_root_test)
makedir_ifnot(results_text_root_aug)
makedir_ifnot('tmp')
# Save runtime information ---------------------------------------------------
info_dir = results_dir_root
runtime_params = {
'fMRI data': [os.path.abspath(os.path.join(brain_dir, v)) for v in subjects_list.values()],
'ROIs': rois_list.keys(),
'target DNN': network.split('/')[-1],
'target DNN features': os.path.abspath(timm_extracted_visual_features),
'target DNN layers': features_list,
}
dump_info(info_dir, script=__file__, parameters=runtime_params)
#######################################
# Stability selection
#######################################
select_ratio = 0.15
totalnum = 0
first = 1
best_roi_sel = []
num_voxel = dict()
for sbj, roi in product(subjects_list ,rois_list):
print('--------------------')
print('VC ROI: %s' % roi)
trial1 = []
l1 = []
trial2 = []
l2 = []
trial3 = []
l3 = []
trial4 = []
l4 = []
trial5 = []
l5 = []
# Brain data
x = data_brain[sbj].select(rois_list[roi]) # Brain data
x_labels = data_brain[sbj].select('image_index').flatten() # Label (image index)
x_test = data_brain_test[sbj].select(rois_list[roi]) # Brain data
x_labels_test = data_brain_test[sbj].select('image_index').flatten() # Label (image index)
for l in range(1,int(len(x_labels)/5)+1):
n = np.where(x_labels==l)
#trial1
l1.append(l)
trial1.append(x[n[0][0]])
#trial2
l2.append(l)
trial2.append(x[n[0][1]])
#trial3
l3.append(l)
trial3.append(x[n[0][2]])
#trial4
l4.append(l)
trial4.append(x[n[0][3]])
#trial5
l5.append(l)
trial5.append(x[n[0][4]])
#reshape to select
sel_input = np.array([trial1])
sel_input = np.append(sel_input, np.array([trial2]), axis=0)
sel_input = np.append(sel_input, np.array([trial3]), axis=0)
sel_input = np.append(sel_input, np.array([trial4]), axis=0)
sel_input = np.append(sel_input, np.array([trial5]), axis=0)
select_num = int(select_ratio * (x.shape)[1])
num_voxel.update({roi:select_num})
print('roi_shape=',x.shape)
sel_idx = stability_selection(sel_input, select_num)
#save as best_roi_sel mat
if first:
best_roi_sel = np.array(x[:,sel_idx])
best_roi_sel_test = np.array(x_test[:, sel_idx])
first = 0
else:
best_roi_sel = np.append(best_roi_sel, x[:,sel_idx], axis=1)
best_roi_sel_test = np.append(best_roi_sel_test, x_test[:,sel_idx], axis=1)
totalnum_voxel = (best_roi_sel.shape)[1]
print('total_selected_voxel=', totalnum_voxel)
print(num_voxel)
print('best_roi_sel_shape=',best_roi_sel.shape)
print('x_labels_shape=',x_labels.shape)
print('best_roi_sel_test_shape=',best_roi_sel_test.shape)
print('x_labels_test_shape=',x_labels_test.shape)
#######################################
# Save brain and image feature data
#######################################
# Analysis loop --------------------------------------------------------------
print('----------------------------------------')
print('Analysis loop')
first = 1
for feat, sbj in product(features_list, subjects_list):
print('--------------------')
print('Feature: %s' % feat)
print('Subject: %s' % sbj)
results_dir_alllayer_pca = os.path.join(results_dir_root, sbj)
results_dir_alllayer_pca_test = os.path.join(results_dir_root_test, sbj)
results_dir_alllayer_pca_aug = os.path.join(results_dir_root_aug, sbj)
results_fmri_dir = os.path.join(results_fmri_root, sbj)
# Preparing data
# --------------
print('Preparing data')
# Brain data
x = best_roi_sel # Brain data
x_labels = x_labels # Label (image index)
x_class = data_brain[sbj].select('Label') # Label (class index)
WordNetID = x_class[:, 2]
if sbj == 'sub-03':
class_idx = data_brain[sbj].select('image_index').flatten()
else:
class_idx = x_class[:, 1]
x_test = best_roi_sel_test # Brain data
x_labels_test = x_labels_test # Label (image index)
x_class_test = data_brain_test[sbj].select('Label') # Label (class index)
WordNetID_test = x_class_test[:, 2]
if sbj == 'sub-03':
class_idx_test = data_brain_test[sbj].select('image_index').flatten()
else:
class_idx_test = x_class_test[:, 1]
# Averaging test brain data
x_labels_test_unique, indices = np.unique(x_labels_test, return_index=True)
x_test_unique = np.vstack([np.mean(x_test[(np.array(x_labels_test) == lb).flatten(), :], axis=0) for lb in x_labels_test_unique])
WordNetID_test_unique = WordNetID_test[indices]
class_idx_test_unique = class_idx_test[indices]
# Target features and image labels (file names)
y = data_features.get_features(feat) # Target DNN features
y_labels = data_features.index # Label (image index)
y = np.reshape(y,(y.shape[0],-1))
y_test = data_features_test.get_features(feat) # Target DNN features
y_labels_test = data_features_test.index # Label (image index)
y_test = np.reshape(y_test,(y_test.shape[0],-1))
y_aug = data_features_aug.get_features(feat) # Target DNN features
y_labels_aug_temp = data_features_aug.labels # Label (image index)
y_labels_aug = []
for it in y_labels_aug_temp:
y_labels_aug.append(int(it.split('_')[0][1:]))
y_labels_aug = np.array(y_labels_aug)
y_aug = np.reshape(y_aug,(y_aug.shape[0],-1))
# Calculate normalization parameters
# Normalize X (fMRI data)
x_mean = np.mean(x, axis=0)[np.newaxis, :] # np.newaxis was added to match Matlab outputs
x_norm = np.std(x, axis=0, ddof=1)[np.newaxis, :]
# Normalize Y (DNN features)
y_mean = np.mean(y, axis=0)[np.newaxis, :]
y_norm = np.std(y, axis=0, ddof=1)[np.newaxis, :]
# Y index to sort Y by X (matching samples)
y_index = np.array([np.where(np.array(y_labels) == xl) for xl in x_labels]).flatten()
y_index_test = np.array([np.where(np.array(y_labels_test) == xl) for xl in x_labels_test]).flatten()
y_index_test_unique = np.array([np.where(np.array(y_labels_test) == xl) for xl in x_labels_test_unique]).flatten()
# X preprocessing
print('Normalizing X')
x = (x - x_mean) / (x_norm+TINY)
x[np.isinf(x)] = 0
x_test = (x_test - x_mean) / (x_norm+TINY)
x_test[np.isinf(x_test)] = 0
x_test_unique = (x_test_unique - x_mean) / (x_norm+TINY)
x_test_unique[np.isinf(x_test_unique)] = 0
print('Doing PCA')
ipca = PCA(n_components=0.99, random_state=seed)
ipca.fit(x)
x = ipca.transform(x)
x_test = ipca.transform(x_test)
x_test_unique = ipca.transform(x_test_unique)
print(x.shape)
# Y preprocessing
print('Normalizing Y')
y = (y - y_mean) / (y_norm+TINY)
y[np.isinf(y)] = 0
y_test = (y_test - y_mean) / (y_norm+TINY)
y_test[np.isinf(y_test)] = 0
y_aug = (y_aug - y_mean) / (y_norm+TINY)
y_aug[np.isinf(y_aug)] = 0
print('Doing PCA')
ipca = PCA(n_components=0.99, random_state=seed)
ipca.fit(y)
# ipca.fit(y_aug)
y = ipca.transform(y)
y_test = ipca.transform(y_test)
y_aug = ipca.transform(y_aug)
print(y.shape)
print('Sorting Y')
y = y[y_index, :]
y_test = y_test[y_index_test, :]
y_test_unique = y_test[y_index_test_unique, :]
if first:
feat_pca_train = y
feat_pca_test = y_test
feat_pca_aug = y_aug
feat_pca_test_unique = y_test_unique
first = 0
else:
feat_pca_train = np.concatenate((feat_pca_train, y), axis=1)
feat_pca_test = np.concatenate((feat_pca_test, y_test), axis=1)
feat_pca_aug = np.concatenate((feat_pca_aug, y_aug), axis=1)
feat_pca_test_unique = np.concatenate((feat_pca_test_unique, y_test_unique), axis=1)
print(feat_pca_test_unique.shape)
makedir_ifnot(results_dir_alllayer_pca)
makedir_ifnot(results_dir_alllayer_pca_test)
makedir_ifnot(results_dir_alllayer_pca_aug)
results_dir_alllayer_pca_path = os.path.join(results_dir_alllayer_pca, "feat_pca_train.mat")
io.savemat(results_dir_alllayer_pca_path, {"data":feat_pca_train})
results_dir_alllayer_pca_test_path = os.path.join(results_dir_alllayer_pca_test, "feat_pca_test.mat")
io.savemat(results_dir_alllayer_pca_test_path, {"data":feat_pca_test})
results_dir_alllayer_pca_aug_path = os.path.join(results_dir_alllayer_pca_aug, "feat_pca_aug.mat")
io.savemat(results_dir_alllayer_pca_aug_path, {"data":feat_pca_aug})
results_dir_alllayer_pca_test_path = os.path.join(results_dir_alllayer_pca_test, "feat_pca_test_unique.mat")
io.savemat(results_dir_alllayer_pca_test_path, {"data":feat_pca_test_unique})
makedir_ifnot(results_fmri_dir)
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_train_data.mat")
io.savemat(results_fmri_dir_path, {"data":x, "image_idx":x_labels, "WordNetID":WordNetID, "class_idx":class_idx})
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_test_data.mat")
io.savemat(results_fmri_dir_path, {"data":x_test, "image_idx":x_labels_test, "WordNetID":WordNetID_test, "class_idx":class_idx_test})
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_test_data_unique.mat")
io.savemat(results_fmri_dir_path, {"data":x_test_unique, "image_idx":x_labels_test_unique, "WordNetID":WordNetID_test_unique, "class_idx":class_idx_test_unique})
#######################################
# Save text feature data
#######################################
for feat, sbj in product(text_model_list, subjects_list):
print('--------------------')
print('Feature: %s' % feat)
print('Subject: %s' % sbj)
results_dir_text_fea = os.path.join(results_text_root, feat, sbj)
results_dir_text_fea_test = os.path.join(results_text_root_test, feat, sbj)
results_dir_text_fea_aug = os.path.join(results_text_root_aug, feat, sbj)
# Preparing data
# --------------
print('Preparing data')
# Brain data
x_class = data_brain[sbj].select('Label') # Label (class index)
WordNetID = x_class[:, 2]
class_idx = x_class[:, 1]
x_labels_test = x_labels_test # Label (image index)
x_class_test = data_brain_test[sbj].select('Label') # Label (class index)
WordNetID_test = x_class_test[:, 2]
class_idx_test = x_class_test[:, 1]
# Averaging test brain data
x_labels_test_unique, indices = np.unique(x_labels_test, return_index=True)
WordNetID_test_unique = WordNetID_test[indices]
class_idx_test_unique = class_idx_test[indices]
# Target text features and wnid
name = 'ImageNet_class200_' + feat + '.pkl'
full = os.path.join(model_extracted_textual_features, name)
dictionary = pickle.load(open(full, 'rb'))
firstfeat = 1
firstlabel = 1
for key, value in dictionary.items():
for k, v in value.items():
# print(k, v)
if k == 'wnid':
# print(v)
v = int(v[1:])
if firstlabel:
text_label = np.array([v])
firstlabel = 0
else:
text_label = np.concatenate((text_label, np.array([v])), axis=0)
elif k == 'feats':
v = np.expand_dims(v, axis=0)
if firstfeat:
text_feat = v
firstfeat = 0
else:
text_feat = np.concatenate((text_feat, v), axis=0)
# Extra text features and wnid
name = 'ImageNet_trainval_classes_' + feat + '.pkl'
full = os.path.join(model_extracted_textual_features, name)
dictionary = pickle.load(open(full, 'rb'))
firstfeat = 1
firstlabel = 1
for key, value in dictionary.items():
for k, v in value.items():
# print(k, v)
if k == 'wnid':
# print(v)
v = int(v[1:])
if firstlabel:
text_label_aug = np.array([v])
firstlabel = 0
else:
text_label_aug = np.concatenate((text_label_aug, np.array([v])), axis=0)
elif k == 'feats':
v = np.expand_dims(v, axis=0)
if firstfeat:
text_feat_aug = v
firstfeat = 0
else:
text_feat_aug = np.concatenate((text_feat_aug, v), axis=0)
# t index to sort t by X (matching samples)
t_index = np.array([np.where(np.array(text_label) == xl) for xl in WordNetID.astype(int)]).flatten()
t_index_test = np.array([np.where(np.array(text_label) == xl) for xl in WordNetID_test.astype(int)]).flatten()
t_index_test_unique = np.array([np.where(np.array(text_label) == xl) for xl in WordNetID_test_unique.astype(int)]).flatten()
t_index_aug = np.array([np.where(np.array(text_label_aug) == xl) for xl in y_labels_aug]).flatten()
print('Sorting text')
t = text_feat[t_index, :]
t_test = text_feat[t_index_test, :]
t_aug = text_feat_aug[t_index_aug, :]
t_test_unique = text_feat[t_index_test_unique, :]
print(t.shape)
print(t_test.shape)
print(t_aug.shape)
print(t_test_unique.shape)
makedir_ifnot(results_dir_text_fea)
makedir_ifnot(results_dir_text_fea_test)
makedir_ifnot(results_dir_text_fea_aug)
results_dir_text_fea_path = os.path.join(results_dir_text_fea, "text_feat_train.mat")
io.savemat(results_dir_text_fea_path, {"data": t})
results_dir_text_fea_test_path = os.path.join(results_dir_text_fea_test, "text_feat_test.mat")
io.savemat(results_dir_text_fea_test_path, {"data": t_test})
results_dir_text_fea_aug_path = os.path.join(results_dir_text_fea_aug, "text_feat_aug.mat")
io.savemat(results_dir_text_fea_aug_path, {"data": t_aug})
results_dir_text_fea_test_path = os.path.join(results_dir_text_fea_test, "text_feat_test_unique.mat")
io.savemat(results_dir_text_fea_test_path, {"data": t_test_unique})
print('%s finished.' % analysis_basename) | 20,190 | 40.375 | 169 | py |
BraVL | BraVL-master/BraVL_fMRI/data_prepare_with_aug_GOD_Wiki.py | from __future__ import print_function
from itertools import product
import os
import pickle
import bdpy
from bdpy.dataform import Features
from bdpy.util import dump_info, makedir_ifnot
import numpy as np
from sklearn.decomposition import PCA
from scipy import io
# Settings ###################################################################
seed = 42
TINY = 1e-8
# Python RNG
np.random.seed(seed)
subject_set=['subject1','subject2','subject3','subject4','subject5']
for subject in subject_set:
if subject == 'subject1':
subjects_list = {
'sub-01': 'Subject1.h5',
}
subjects_list_test = {
'sub-01': 'Subject1.h5',
}
elif subject == 'subject2':
subjects_list = {
'sub-02': 'Subject2.h5',
}
subjects_list_test = {
'sub-02': 'Subject2.h5',
}
elif subject == 'subject3':
subjects_list = {
'sub-03': 'Subject3.h5',
}
subjects_list_test = {
'sub-03': 'Subject3.h5',
}
elif subject == 'subject4':
subjects_list = {
'sub-04': 'Subject4.h5',
}
subjects_list_test = {
'sub-04': 'Subject4.h5',
}
elif subject == 'subject5':
subjects_list = {
'sub-05': 'Subject5.h5',
}
subjects_list_test = {
'sub-05': 'Subject5.h5',
}
text_embedding_list = [
'GPTNeo',
'ALBERT',
# 'GPTNeo_phrases',
# 'ALBERT_phrases'
]
rois_list = {
# 'VC': 'ROI_VC = 1',
# 'LVC': 'ROI_LVC = 1',
# 'HVC': 'ROI_HVC = 1',
'V1': 'ROI_V1 = 1',
'V2': 'ROI_V2 = 1',
'V3': 'ROI_V3 = 1',
'V4': 'ROI_V4 = 1',
'LOC': 'ROI_LOC = 1',
'FFA': 'ROI_FFA = 1',
'PPA': 'ROI_PPA = 1',
}
network = 'pytorch/repvgg_b3g4'
features_list = [ # 'Conv_0',
# 'Conv_1',
'Conv_2',
'Conv_3',
'Conv_4',
'linear',
'final']
features_list = features_list[::-1] # Start training from deep layers
# Brain data
brain_dir = './data/GenericObjectDecoding-v2'
# Image features
timm_extracted_visual_features = './data/GOD-Wiki/visual_feature/ImageNetTraining/' + network
timm_extracted_visual_features_test = './data/GOD-Wiki/visual_feature/ImageNetTest/' + network
timm_extracted_visual_features_aug = './data/GOD-Wiki/visual_feature/Aug_1000/' + network
print('DNN feature')
print(timm_extracted_visual_features)
# Text features
model_extracted_textual_features = './data/Wiki_articles_features'
# Results directory
results_dir_root = './data/GOD-Wiki/visual_feature/ImageNetTraining/' + network + '-PCA'
results_dir_root_test = './data/GOD-Wiki/visual_feature/ImageNetTest/' + network + '-PCA'
results_dir_root_aug = './data/GOD-Wiki/visual_feature/Aug_1000/' + network + '-PCA'
results_fmri_root = './data/GOD-Wiki/brain_feature/LVC_HVC_IT'
results_text_root = './data/GOD-Wiki/textual_feature/ImageNetTraining/text'
results_text_root_test = './data/GOD-Wiki/textual_feature/ImageNetTest/text'
results_text_root_aug = './data/GOD-Wiki/textual_feature/Aug_1000/text'
# Main #######################################################################
analysis_basename = os.path.splitext(os.path.basename(__file__))[0]
# Print info -----------------------------------------------------------------
print('Subjects: %s' % subjects_list.keys())
print('ROIs: %s' % rois_list.keys())
print('Target features: %s' % network.split('/')[-1])
print('Layers: %s' % features_list)
print('')
# Load data ------------------------------------------------------------------
print('----------------------------------------')
print('Loading data')
data_brain = {sbj: bdpy.BData(os.path.join(brain_dir, dat_file))
for sbj, dat_file in subjects_list.items()}
data_features = Features(os.path.join(timm_extracted_visual_features, network))
data_brain_test = {sbj: bdpy.BData(os.path.join(brain_dir, dat_file))
for sbj, dat_file in subjects_list_test.items()}
data_features_test = Features(os.path.join(timm_extracted_visual_features_test, network))
data_features_aug = Features(os.path.join(timm_extracted_visual_features_aug, network))
# Initialize directories -----------------------------------------------------
makedir_ifnot(results_dir_root)
makedir_ifnot(results_dir_root_test)
makedir_ifnot(results_dir_root_aug)
makedir_ifnot(results_text_root)
makedir_ifnot(results_text_root_test)
makedir_ifnot(results_text_root_aug)
# Save runtime information ---------------------------------------------------
info_dir = results_dir_root
runtime_params = {
'fMRI data': [os.path.abspath(os.path.join(brain_dir, v)) for v in subjects_list.values()],
'ROIs': rois_list.keys(),
'target DNN': network.split('/')[-1],
'target DNN features': os.path.abspath(timm_extracted_visual_features),
'target DNN layers': features_list,
}
dump_info(info_dir, script=__file__, parameters=runtime_params)
#######################################
# Original
#######################################
first = 1
for sbj, roi in product(subjects_list ,rois_list):
print('--------------------')
print('VC ROI: %s' % roi)
# Brain data
# data_brain[sbj].show_metadata()
x= data_brain[sbj].select(rois_list[roi])
x_labels = data_brain[sbj].select('image_index').flatten() # Label (image index)
print('roi_shape=', x.shape)
x_test= data_brain_test[sbj].select(rois_list[roi]) # Brain data
x_labels_test = data_brain_test[sbj].select('image_index').flatten() # Label (image index)
if first:
best_roi_sel = x
best_roi_sel_test = x_test
first = 0
else:
best_roi_sel = np.append(best_roi_sel, x, axis=1)
best_roi_sel_test = np.append(best_roi_sel_test, x_test, axis=1)
#######################################
# Save brain and image feature data
#######################################
# Analysis loop --------------------------------------------------------------
print('----------------------------------------')
print('Analysis loop')
first = 1
for feat, sbj in product(features_list, subjects_list):
print('--------------------')
print('Feature: %s' % feat)
print('Subject: %s' % sbj)
results_dir_alllayer_pca = os.path.join(results_dir_root, sbj)
results_dir_alllayer_pca_test = os.path.join(results_dir_root_test, sbj)
results_dir_alllayer_pca_aug = os.path.join(results_dir_root_aug, sbj)
results_fmri_dir = os.path.join(results_fmri_root, sbj)
# Preparing data
print('Preparing data')
# Brain data
x = best_roi_sel[0:1200] # Brain data
x_labels = data_brain[sbj].select('image_index').flatten() # Label (image index)
x_labels = x_labels[0:1200] # Label (image index)
x_class = data_brain[sbj].select('Label') # Label (class index)
WordNetID = data_brain[sbj].select('stimulus_id') # Label (class index)
WordNetID = WordNetID[0:1200,0]
class_idx = x_class[0:1200, 1]
x_test = best_roi_sel_test[1200:2950] # Brain data
x_labels_test = data_brain_test[sbj].select('image_index').flatten() # Label (image index)
x_labels_test = x_labels_test[1200:2950] # Label (image index)
x_class_test = data_brain_test[sbj].select('Label') # Label (class index)
WordNetID_test = data_brain_test[sbj].select('stimulus_id') # Label (class index)
WordNetID_test = WordNetID_test[1200:2950,0]
class_idx_test = x_class_test[1200:2950, 1]
# Averaging test brain data
x_labels_test_unique, indices = np.unique(x_labels_test, return_index=True)
x_test_unique = np.vstack([np.mean(x_test[(np.array(x_labels_test) == lb).flatten(), :], axis=0) for lb in x_labels_test_unique])
WordNetID_test_unique = WordNetID_test[indices]
class_idx_test_unique = class_idx_test[indices]
# Target features and image labels (file names)
y = data_features.get_features(feat) # Target DNN features
y_labels = data_features.index # Label (image index)
y = np.reshape(y,(y.shape[0],-1))
y_test = data_features_test.get_features(feat) # Target DNN features
y_labels_test = data_features_test.index # Label (image index)
y_test = np.reshape(y_test,(y_test.shape[0],-1))
y_aug = data_features_aug.get_features(feat) # Target DNN features
y_labels_aug_temp = data_features_aug.labels # Label (image index)
y_labels_aug = []
for it in y_labels_aug_temp:
y_labels_aug.append(int(it.split('_')[0][1:]))
y_labels_aug = np.array(y_labels_aug)
y_aug = np.reshape(y_aug,(y_aug.shape[0],-1))
# Calculate normalization parameters
# Normalize X (fMRI data)
x_mean = np.mean(x, axis=0)[np.newaxis, :] # np.newaxis was added to match Matlab outputs
x_norm = np.std(x, axis=0, ddof=1)[np.newaxis, :]
# Normalize Y (DNN features)
y_mean = np.mean(y, axis=0)[np.newaxis, :]
y_norm = np.std(y, axis=0, ddof=1)[np.newaxis, :]
# Y index to sort Y by X (matching samples)
y_index = np.array([np.where(np.array(y_labels) == xl) for xl in x_labels]).flatten()
y_index_test = np.array([np.where(np.array(y_labels_test) == xl) for xl in x_labels_test]).flatten()
y_index_test_unique = np.array([np.where(np.array(y_labels_test) == xl) for xl in x_labels_test_unique]).flatten()
# X preprocessing
print('Normalizing X')
x = (x - x_mean) / (x_norm+TINY)
x[np.isinf(x)] = 0
x_test = (x_test - x_mean) / (x_norm+TINY)
x_test[np.isinf(x_test)] = 0
x_test_unique = (x_test_unique - x_mean) / (x_norm+TINY)
x_test_unique[np.isinf(x_test_unique)] = 0
print('Doing PCA')
ipca = PCA(n_components=0.99, random_state=seed)
ipca.fit(x)
x = ipca.transform(x)
x_test = ipca.transform(x_test)
x_test_unique = ipca.transform(x_test_unique)
print(x.shape)
# Y preprocessing
print('Normalizing Y')
y = (y - y_mean) / (y_norm+TINY)
y[np.isinf(y)] = 0
y_test = (y_test - y_mean) / (y_norm+TINY)
y_test[np.isinf(y_test)] = 0
y_aug = (y_aug - y_mean) / (y_norm+TINY)
y_aug[np.isinf(y_aug)] = 0
print('Doing PCA')
ipca = PCA(n_components=0.99, random_state=seed)
ipca.fit(y)
# ipca.fit(y_aug)
y = ipca.transform(y)
y_test = ipca.transform(y_test)
y_aug = ipca.transform(y_aug)
print(y.shape)
print('Sorting Y')
y = y[y_index, :]
y_test = y_test[y_index_test, :]
y_test_unique = y_test[y_index_test_unique, :]
if first:
feat_pca_train = y
feat_pca_test = y_test
feat_pca_aug = y_aug
feat_pca_test_unique = y_test_unique
first = 0
else:
feat_pca_train = np.concatenate((feat_pca_train, y), axis=1)
feat_pca_test = np.concatenate((feat_pca_test, y_test), axis=1)
feat_pca_aug = np.concatenate((feat_pca_aug, y_aug), axis=1)
feat_pca_test_unique = np.concatenate((feat_pca_test_unique, y_test_unique), axis=1)
print(feat_pca_test_unique.shape)
makedir_ifnot(results_dir_alllayer_pca)
makedir_ifnot(results_dir_alllayer_pca_test)
makedir_ifnot(results_dir_alllayer_pca_aug)
results_dir_alllayer_pca_path = os.path.join(results_dir_alllayer_pca, "feat_pca_train.mat")
io.savemat(results_dir_alllayer_pca_path, {"data":feat_pca_train})
results_dir_alllayer_pca_test_path = os.path.join(results_dir_alllayer_pca_test, "feat_pca_test.mat")
io.savemat(results_dir_alllayer_pca_test_path, {"data":feat_pca_test})
results_dir_alllayer_pca_aug_path = os.path.join(results_dir_alllayer_pca_aug, "feat_pca_aug.mat")
io.savemat(results_dir_alllayer_pca_aug_path, {"data":feat_pca_aug})
results_dir_alllayer_pca_test_path = os.path.join(results_dir_alllayer_pca_test, "feat_pca_test_unique.mat")
io.savemat(results_dir_alllayer_pca_test_path, {"data":feat_pca_test_unique})
makedir_ifnot(results_fmri_dir)
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_train_data.mat")
io.savemat(results_fmri_dir_path, {"data":x, "image_idx":x_labels, "WordNetID":WordNetID, "class_idx":class_idx})
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_test_data.mat")
io.savemat(results_fmri_dir_path, {"data":x_test, "image_idx":x_labels_test, "WordNetID":WordNetID_test, "class_idx":class_idx_test})
results_fmri_dir_path = os.path.join(results_fmri_dir, "fmri_test_data_unique.mat")
io.savemat(results_fmri_dir_path, {"data":x_test_unique, "image_idx":x_labels_test_unique, "WordNetID":WordNetID_test_unique, "class_idx":class_idx_test_unique})
#######################################
# Save text feature data
#######################################
for feat, sbj in product(text_embedding_list, subjects_list):
print('--------------------')
print('Feature: %s' % feat)
print('Subject: %s' % sbj)
results_dir_text_fea = os.path.join(results_text_root, feat, sbj)
results_dir_text_fea_test = os.path.join(results_text_root_test, feat, sbj)
results_dir_text_fea_aug = os.path.join(results_text_root_aug, feat, sbj)
# Preparing data
print('Preparing data')
# Brain data
x_class = data_brain[sbj].select('Label')[0:1200] # Label (class index)
WordNetID = data_brain[sbj].select('stimulus_id') # Label (class index)
WordNetID = WordNetID[0:1200,0]
class_idx = x_class[0:1200, 1]
x_labels_test = data_brain_test[sbj].select('image_index').flatten() # Label (image index)
x_labels_test = x_labels_test[1200:2950] # Label (image index)
x_class_test = data_brain_test[sbj].select('Label') # Label (class index)
WordNetID_test = data_brain_test[sbj].select('stimulus_id') # Label (class index)
WordNetID_test = WordNetID_test[1200:2950,0]
class_idx_test = x_class_test[1200:2950, 1]
# Averaging test brain data
x_labels_test_unique, indices = np.unique(x_labels_test, return_index=True)
WordNetID_test_unique = WordNetID_test[indices]
class_idx_test_unique = class_idx_test[indices]
# Target text features and wnid
name = 'ImageNet_class200_' + feat + '.pkl'
full = os.path.join(model_extracted_textual_features, name)
dictionary = pickle.load(open(full, 'rb'))
firstfeat = 1
firstlabel = 1
for key, value in dictionary.items():
for k, v in value.items():
# print(k, v)
if k == 'wnid':
# print(v)
v = int(v[1:])
if firstlabel:
text_label = np.array([v])
firstlabel = 0
else:
text_label = np.concatenate((text_label, np.array([v])), axis=0)
elif k == 'feats':
v = np.expand_dims(v, axis=0)
if firstfeat:
text_feat = v
firstfeat = 0
else:
text_feat = np.concatenate((text_feat, v), axis=0)
# Target text features and wnid
name = 'ImageNet_trainval_classes_' + feat + '.pkl'
full = os.path.join(model_extracted_textual_features, name)
dictionary = pickle.load(open(full, 'rb'))
firstfeat = 1
firstlabel = 1
for key, value in dictionary.items():
for k, v in value.items():
# print(k, v)
if k == 'wnid':
# print(v)
v = int(v[1:])
if firstlabel:
text_label_aug = np.array([v])
firstlabel = 0
else:
text_label_aug = np.concatenate((text_label_aug, np.array([v])), axis=0)
elif k == 'feats':
v = np.expand_dims(v, axis=0)
if firstfeat:
text_feat_aug = v
firstfeat = 0
else:
text_feat_aug = np.concatenate((text_feat_aug, v), axis=0)
# t index to sort t by X (matching samples)
t_index = np.array([np.where(np.array(text_label) == xl) for xl in WordNetID.astype(int)]).flatten()
t_index_test = np.array([np.where(np.array(text_label) == xl) for xl in WordNetID_test.astype(int)]).flatten()
t_index_test_unique = np.array([np.where(np.array(text_label) == xl) for xl in WordNetID_test_unique.astype(int)]).flatten()
t_index_aug = np.array([np.where(np.array(text_label_aug) == xl) for xl in y_labels_aug]).flatten()
print('Sorting text')
t = text_feat[t_index, :]
t_test = text_feat[t_index_test, :]
t_aug = text_feat_aug[t_index_aug, :]
t_test_unique = text_feat[t_index_test_unique, :]
print(t.shape)
print(t_test.shape)
print(t_aug.shape)
print(t_test_unique.shape)
makedir_ifnot(results_dir_text_fea)
makedir_ifnot(results_dir_text_fea_test)
makedir_ifnot(results_dir_text_fea_aug)
results_dir_text_fea_path = os.path.join(results_dir_text_fea, "text_feat_train.mat")
io.savemat(results_dir_text_fea_path, {"data": t})
results_dir_text_fea_test_path = os.path.join(results_dir_text_fea_test, "text_feat_test.mat")
io.savemat(results_dir_text_fea_test_path, {"data": t_test})
results_dir_text_fea_aug_path = os.path.join(results_dir_text_fea_aug, "text_feat_aug.mat")
io.savemat(results_dir_text_fea_aug_path, {"data": t_aug})
results_dir_text_fea_test_path = os.path.join(results_dir_text_fea_test, "text_feat_test_unique.mat")
io.savemat(results_dir_text_fea_test_path, {"data": t_test_unique})
print('%s finished.' % analysis_basename) | 18,971 | 41.066519 | 169 | py |
BraVL | BraVL-master/BraVL_fMRI/main_trimodal.py | import sys
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import json
import torch
from run_epochs_trimodal import run_epochs_trimodal
from utils.filehandling import create_dir_structure
from brain_image_text.flags import parser
from brain_image_text.experiment import BrainImageText
torch.set_default_tensor_type(torch.DoubleTensor)
if __name__ == '__main__':
FLAGS = parser.parse_args()
use_cuda = torch.cuda.is_available()
FLAGS.device = torch.device('cuda' if use_cuda else 'cpu')
if FLAGS.method == 'poe':
FLAGS.modality_poe=True
elif FLAGS.method == 'moe':
FLAGS.modality_moe=True
elif FLAGS.method == 'jsd':
FLAGS.modality_jsd=True
elif FLAGS.method == 'joint_elbo':
FLAGS.joint_elbo=True
else:
print('method implemented...exit!')
sys.exit()
print(FLAGS.modality_poe)
print(FLAGS.modality_moe)
print(FLAGS.modality_jsd)
print(FLAGS.joint_elbo)
FLAGS.alpha_modalities = [FLAGS.div_weight_uniform_content, FLAGS.div_weight_m1_content,
FLAGS.div_weight_m2_content, FLAGS.div_weight_m3_content]
FLAGS = create_dir_structure(FLAGS)
alphabet_path = os.path.join(os.getcwd(), 'alphabet.json')
with open(alphabet_path) as alphabet_file:
alphabet = str(''.join(json.load(alphabet_file)))
mst = BrainImageText(FLAGS, alphabet)
mst.set_optimizer()
total_params = sum(p.numel() for p in mst.mm_vae.parameters())
print('num parameters model: ' + str(total_params))
run_epochs_trimodal(mst)
| 1,562 | 33.733333 | 92 | py |
BraVL | BraVL-master/BraVL_fMRI/modalities/Modality.py |
from abc import ABC, abstractmethod
import os
import torch
import torch.distributions as dist
class Modality(ABC):
def __init__(self, name, enc, dec, class_dim, style_dim, lhood_name):
self.name = name;
self.encoder = enc;
self.decoder = dec;
self.class_dim = class_dim;
self.style_dim = style_dim;
self.likelihood_name = lhood_name;
self.likelihood = self.get_likelihood(lhood_name);
def get_likelihood(self, name):
if name == 'laplace':
pz = dist.Laplace;
elif name == 'bernoulli':
pz = dist.Bernoulli;
elif name == 'normal':
pz = dist.Normal;
elif name == 'categorical':
pz = dist.OneHotCategorical;
else:
print('likelihood not implemented')
pz = None;
return pz;
def calc_log_prob(self, out_dist, target, norm_value):
log_prob = out_dist.log_prob(target).sum();
mean_val_logprob = log_prob/norm_value;
return mean_val_logprob;
def save_networks(self, dir_checkpoints):
torch.save(self.encoder.state_dict(), os.path.join(dir_checkpoints,
'enc_' + self.name))
torch.save(self.decoder.state_dict(), os.path.join(dir_checkpoints,
'dec_' + self.name))
| 1,414 | 27.877551 | 79 | py |
BraVL | BraVL-master/BraVL_fMRI/brain_image_text/experiment.py | import os
import numpy as np
import itertools
import scipy.io as sio
import torch
import torch.optim as optim
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
from modalities.Modality import Modality
from brain_image_text.networks.VAEtrimodal import VAEtrimodal,VAEbimodal
from brain_image_text.networks.QNET import QNet
from brain_image_text.networks.MLP_Brain import EncoderBrain, DecoderBrain
from brain_image_text.networks.MLP_Image import EncoderImage, DecoderImage
from brain_image_text.networks.MLP_Text import EncoderText, DecoderText
from utils.BaseExperiment import BaseExperiment
class BrainImageText(BaseExperiment):
def __init__(self, flags, alphabet):
super().__init__(flags)
self.modalities = self.set_modalities()
self.num_modalities = len(self.modalities.keys())
self.subsets = self.set_subsets()
self.dataset_train = None
self.dataset_test = None
self.set_dataset()
self.mm_vae = self.set_model()
self.optimizer = None
self.rec_weights = self.set_rec_weights()
self.style_weights = self.set_style_weights()
self.Q1,self.Q2,self.Q3 = self.set_Qmodel()
self.eval_metric = accuracy_score
self.labels = ['digit']
def set_model(self):
model = VAEtrimodal(self.flags, self.modalities, self.subsets)
model = model.to(self.flags.device)
return model
def set_modalities(self):
mod1 = Modality('brain', EncoderBrain(self.flags), DecoderBrain(self.flags),
self.flags.class_dim, self.flags.style_m1_dim, 'normal')
mod2 = Modality('image', EncoderImage(self.flags), DecoderImage(self.flags),
self.flags.class_dim, self.flags.style_m2_dim, 'normal')
mod3 = Modality('text', EncoderText(self.flags), DecoderText(self.flags),
self.flags.class_dim, self.flags.style_m3_dim, 'normal')
mods = {mod1.name: mod1, mod2.name: mod2, mod3.name: mod3}
return mods
def set_dataset(self):
# load data
data_dir_root = self.flags.data_dir_root
sbj = self.flags.sbj
stability_ratio = self.flags.stability_ratio
image_model = self.flags.image_model
text_model = self.flags.text_model
roi = self.flags.roi
brain_dir = os.path.join(data_dir_root, 'brain_feature', roi, sbj)
image_dir_train = os.path.join(data_dir_root, 'visual_feature/ImageNetTraining', image_model + '-PCA', sbj)
image_dir_test = os.path.join(data_dir_root, 'visual_feature/ImageNetTest', image_model + '-PCA', sbj)
text_dir_train = os.path.join(data_dir_root, 'textual_feature/ImageNetTraining/text', text_model, sbj)
text_dir_test = os.path.join(data_dir_root, 'textual_feature/ImageNetTest/text', text_model, sbj)
train_brain = sio.loadmat(os.path.join(brain_dir, 'fmri_train_data'+stability_ratio+'.mat'))['data'].astype('double')
train_image = sio.loadmat(os.path.join(image_dir_train, 'feat_pca_train.mat'))['data'].astype('double')
train_text = sio.loadmat(os.path.join(text_dir_train, 'text_feat_train.mat'))['data'].astype('double')
train_label = sio.loadmat(os.path.join(brain_dir, 'fmri_train_data'+stability_ratio+'.mat'))['class_idx'].T.astype('int')
# test_brain = sio.loadmat(os.path.join(brain_dir, 'fmri_test_data_unique.mat'))['data'].astype('double')
# test_image = sio.loadmat(os.path.join(image_dir_test, 'feat_pca_test_unique.mat'))['data'].astype('double')
# test_text = sio.loadmat(os.path.join(text_dir_test, 'text_feat_test_unique.mat'))['data'].astype('double')
# test_label = sio.loadmat(os.path.join(brain_dir, 'fmri_test_data_unique.mat'))['class_idx'].T.astype('int')
test_brain = sio.loadmat(os.path.join(brain_dir, 'fmri_test_data'+stability_ratio+'.mat'))['data'].astype('double')
test_image = sio.loadmat(os.path.join(image_dir_test, 'feat_pca_test.mat'))['data'].astype('double')
test_text = sio.loadmat(os.path.join(text_dir_test, 'text_feat_test.mat'))['data'].astype('double')
test_label = sio.loadmat(os.path.join(brain_dir, 'fmri_test_data'+stability_ratio+'.mat'))['class_idx'].T.astype('int')
if self.flags.aug_type == 'image_text':
image_dir_aug = os.path.join(data_dir_root, 'visual_feature/Aug_1000', image_model + '-PCA', sbj)
text_dir_aug = os.path.join(data_dir_root, 'textual_feature/Aug_1000/text', text_model, sbj)
aug_image = sio.loadmat(os.path.join(image_dir_aug, 'feat_pca_aug.mat'))['data'].astype('double')
aug_text = sio.loadmat(os.path.join(text_dir_aug, 'text_feat_aug.mat'))['data'].astype('double')
aug_image = torch.from_numpy(aug_image)
aug_text = torch.from_numpy(aug_text)
print('aug_image=', aug_image.shape)
print('aug_text=', aug_text.shape)
elif self.flags.aug_type == 'text_only':
text_dir_aug = os.path.join(data_dir_root, 'textual_feature/Aug_1000/text', text_model, sbj)
aug_text = sio.loadmat(os.path.join(text_dir_aug, 'text_feat_aug.mat'))['data'].astype('double')
aug_text = aug_text
aug_text = torch.from_numpy(aug_text)
print('aug_text=', aug_text.shape)
elif self.flags.aug_type == 'image_only':
image_dir_aug = os.path.join(data_dir_root, 'visual_feature/Aug_1000', image_model + '-PCA', sbj)
aug_image = sio.loadmat(os.path.join(image_dir_aug, 'feat_pca_aug.mat'))['data'].astype('double')
aug_image = torch.from_numpy(aug_image)
print('aug_image=', aug_image.shape)
elif self.flags.aug_type == 'no_aug':
print('no augmentation')
if self.flags.test_type=='normal':
train_label_stratify = train_label
train_brain, val_brain, train_label, val_label = train_test_split(train_brain, train_label_stratify, test_size=0.2, stratify=train_label_stratify)
train_image, val_image, train_label, val_label = train_test_split(train_image, train_label_stratify, test_size=0.2, stratify=train_label_stratify)
train_text, val_text, train_label, val_label = train_test_split(train_text, train_label_stratify, test_size=0.2, stratify=train_label_stratify)
val_brain = torch.from_numpy(val_brain)
val_image = torch.from_numpy(val_image)
val_text = torch.from_numpy(val_text)
val_label = torch.from_numpy(val_label)
print('val_brain=', val_brain.shape)
print('val_image=', val_image.shape)
print('val_text=', val_text.shape)
train_brain = torch.from_numpy(train_brain)
test_brain = torch.from_numpy(test_brain)
train_image = torch.from_numpy(train_image)
test_image = torch.from_numpy(test_image)
train_text = torch.from_numpy(train_text)
test_text = torch.from_numpy(test_text)
train_label = torch.from_numpy(train_label)
test_label = torch.from_numpy(test_label)
print('train_brain=', train_brain.shape)
print('train_image=', train_image.shape)
print('train_text=', train_text.shape)
print('test_brain=', test_brain.shape)
print('test_image=', test_image.shape)
print('test_text=', test_text.shape)
self.m1_dim = train_brain.shape[1]
self.m2_dim = train_image.shape[1]
self.m3_dim = train_text.shape[1]
train_dataset = torch.utils.data.TensorDataset(train_brain, train_image, train_text, train_label)
test_dataset = torch.utils.data.TensorDataset(test_brain, test_image, test_text,test_label)
self.dataset_train = train_dataset
self.dataset_test = test_dataset
if self.flags.test_type == 'normal':
val_dataset = torch.utils.data.TensorDataset(val_brain, val_image, val_text, val_label)
self.dataset_val = val_dataset
if self.flags.aug_type == 'image_text':
aug_dataset = torch.utils.data.TensorDataset(aug_image, aug_text)
self.dataset_aug = aug_dataset
elif self.flags.aug_type == 'text_only':
aug_dataset = torch.utils.data.TensorDataset(aug_text)
self.dataset_aug = aug_dataset
elif self.flags.aug_type == 'image_only':
aug_image = torch.utils.data.TensorDataset(aug_image)
self.dataset_aug = aug_image
elif self.flags.aug_type == 'no_aug':
print('no augmentation')
def set_optimizer(self):
optimizer = optim.Adam(
itertools.chain(self.mm_vae.parameters(),self.Q1.parameters(),self.Q2.parameters(),self.Q3.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
optimizer_mvae = optim.Adam(
list(self.mm_vae.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
optimizer_Qnet = optim.Adam(
itertools.chain(self.Q1.parameters(),self.Q2.parameters(),self.Q3.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
self.optimizer = {'mvae':optimizer_mvae,'Qnet':optimizer_Qnet,'all':optimizer}
scheduler_mvae = optim.lr_scheduler.StepLR(optimizer_mvae, step_size=20, gamma=1.0)
scheduler_Qnet = optim.lr_scheduler.StepLR(optimizer_Qnet, step_size=20, gamma=1.0)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=1.0)
self.scheduler = {'mvae': scheduler_mvae, 'Qnet': scheduler_Qnet, 'all': scheduler}
def set_Qmodel(self):
Q1 = QNet(input_dim=self.flags.m1_dim, latent_dim=self.flags.class_dim).cuda()
Q2 = QNet(input_dim=self.flags.m2_dim, latent_dim=self.flags.class_dim).cuda()
Q3 = QNet(input_dim=self.flags.m3_dim, latent_dim=self.flags.class_dim).cuda()
return Q1, Q2 ,Q3
def set_rec_weights(self):
weights = dict()
weights['brain'] = self.flags.beta_m1_rec
weights['image'] = self.flags.beta_m2_rec
weights['text'] = self.flags.beta_m3_rec
return weights
def set_style_weights(self):
weights = dict()
weights['brain'] = self.flags.beta_m1_style
weights['image'] = self.flags.beta_m2_style
weights['text'] = self.flags.beta_m3_style
return weights
| 10,591 | 50.417476 | 158 | py |
BraVL | BraVL-master/BraVL_fMRI/brain_image_text/networks/MLP_Text.py |
import torch
import torch.nn as nn
class EncoderText(nn.Module):
def __init__(self, flags):
super(EncoderText, self).__init__()
self.flags = flags;
self.hidden_dim = 512;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.m3_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=self.hidden_dim, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.enc(x);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderText(nn.Module):
def __init__(self, flags):
super(DecoderText, self).__init__();
self.flags = flags;
self.hidden_dim = 512;
modules = []
modules.append(nn.Sequential(nn.Linear(flags.class_dim, self.hidden_dim), nn.ReLU(True)))
modules.extend([nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.ReLU(True))
for _ in range(flags.num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(self.hidden_dim, flags.m3_dim)
self.relu = nn.ReLU();
def forward(self, style_latent_space, class_latent_space):
z = class_latent_space;
x_hat = self.dec(z);
x_hat = self.fc3(x_hat);
return x_hat, torch.tensor(0.75).to(z.device); | 1,996 | 36.679245 | 108 | py |
BraVL | BraVL-master/BraVL_fMRI/brain_image_text/networks/VAEtrimodal.py | import os
import torch
import torch.nn as nn
from utils import utils
from utils.BaseMMVae import BaseMMVae
class VAEtrimodal(BaseMMVae, nn.Module):
def __init__(self, flags, modalities, subsets):
super().__init__(flags, modalities, subsets)
class VAEbimodal(BaseMMVae, nn.Module):
def __init__(self, flags, modalities, subsets):
super().__init__(flags, modalities, subsets)
| 406 | 19.35 | 52 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.