repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/ppo_agent.py | import numpy as np
import torch
from torch import optim
from rl_utils.running_filter.running_filter import ZFilter
from models import cnn_net, mlp_net
from utils import select_actions, evaluate_actions
from datetime import datetime
import os
import copy
class ppo_agent:
def __init__(self, envs, args):
self.envs = envs
self.args = args
# start to build the network.
if self.args.env_type == 'atari':
self.net = cnn_net(envs.action_space.n)
elif self.args.env_type == 'mujoco':
self.net = mlp_net(envs.observation_space.shape[0], envs.action_space.shape[0], self.args.dist)
self.old_net = copy.deepcopy(self.net)
# if use the cuda...
if self.args.cuda:
self.net.cuda()
self.old_net.cuda()
# define the optimizer...
self.optimizer = optim.Adam(self.net.parameters(), self.args.lr, eps=self.args.eps)
# running filter...
if self.args.env_type == 'mujoco':
num_states = self.envs.observation_space.shape[0]
self.running_state = ZFilter((num_states, ), clip=5)
# check saving folder..
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# env folder..
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# get the observation
self.batch_ob_shape = (self.args.num_workers * self.args.nsteps, ) + self.envs.observation_space.shape
self.obs = np.zeros((self.args.num_workers, ) + self.envs.observation_space.shape, dtype=self.envs.observation_space.dtype.name)
if self.args.env_type == 'mujoco':
self.obs[:] = np.expand_dims(self.running_state(self.envs.reset()), 0)
else:
self.obs[:] = self.envs.reset()
self.dones = [False for _ in range(self.args.num_workers)]
# start to train the network...
def learn(self):
num_updates = self.args.total_frames // (self.args.nsteps * self.args.num_workers)
# get the reward to calculate other informations
episode_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
final_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []
if self.args.lr_decay:
self._adjust_learning_rate(update, num_updates)
for step in range(self.args.nsteps):
with torch.no_grad():
# get tensors
obs_tensor = self._get_tensors(self.obs)
values, pis = self.net(obs_tensor)
# select actions
actions = select_actions(pis, self.args.dist, self.args.env_type)
if self.args.env_type == 'atari':
input_actions = actions
else:
if self.args.dist == 'gauss':
input_actions = actions.copy()
elif self.args.dist == 'beta':
input_actions = -1 + 2 * actions
# start to store information
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_dones.append(self.dones)
mb_values.append(values.detach().cpu().numpy().squeeze())
# start to excute the actions in the environment
obs, rewards, dones, _ = self.envs.step(input_actions)
# update dones
if self.args.env_type == 'mujoco':
dones = np.array([dones])
rewards = np.array([rewards])
self.dones = dones
mb_rewards.append(rewards)
# clear the observation
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n] * 0
if self.args.env_type == 'mujoco':
# reset the environment
obs = self.envs.reset()
self.obs = obs if self.args.env_type == 'atari' else np.expand_dims(self.running_state(obs), 0)
# process the rewards part -- display the rewards on the screen
episode_rewards += rewards
masks = np.array([0.0 if done_ else 1.0 for done_ in dones], dtype=np.float32)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
# process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_values = np.asarray(mb_values, dtype=np.float32)
if self.args.env_type == 'mujoco':
mb_values = np.expand_dims(mb_values, 1)
# compute the last state value
with torch.no_grad():
obs_tensor = self._get_tensors(self.obs)
last_values, _ = self.net(obs_tensor)
last_values = last_values.detach().cpu().numpy().squeeze()
# start to compute advantages...
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.args.nsteps)):
if t == self.args.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.args.gamma * self.args.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# after compute the returns, let's process the rollouts
mb_obs = mb_obs.swapaxes(0, 1).reshape(self.batch_ob_shape)
if self.args.env_type == 'atari':
mb_actions = mb_actions.swapaxes(0, 1).flatten()
mb_returns = mb_returns.swapaxes(0, 1).flatten()
mb_advs = mb_advs.swapaxes(0, 1).flatten()
# before update the network, the old network will try to load the weights
self.old_net.load_state_dict(self.net.state_dict())
# start to update the network
pl, vl, ent = self._update_network(mb_obs, mb_actions, mb_returns, mb_advs)
# display the training information
if update % self.args.display_interval == 0:
print('[{}] Update: {} / {}, Frames: {}, Rewards: {:.3f}, Min: {:.3f}, Max: {:.3f}, PL: {:.3f},'\
'VL: {:.3f}, Ent: {:.3f}'.format(datetime.now(), update, num_updates, (update + 1)*self.args.nsteps*self.args.num_workers, \
final_rewards.mean(), final_rewards.min(), final_rewards.max(), pl, vl, ent))
# save the model
if self.args.env_type == 'atari':
torch.save(self.net.state_dict(), self.model_path + '/model.pt')
else:
# for the mujoco, we also need to keep the running mean filter!
torch.save([self.net.state_dict(), self.running_state], self.model_path + '/model.pt')
# update the network
def _update_network(self, obs, actions, returns, advantages):
inds = np.arange(obs.shape[0])
nbatch_train = obs.shape[0] // self.args.batch_size
for _ in range(self.args.epoch):
np.random.shuffle(inds)
for start in range(0, obs.shape[0], nbatch_train):
# get the mini-batchs
end = start + nbatch_train
mbinds = inds[start:end]
mb_obs = obs[mbinds]
mb_actions = actions[mbinds]
mb_returns = returns[mbinds]
mb_advs = advantages[mbinds]
# convert minibatches to tensor
mb_obs = self._get_tensors(mb_obs)
mb_actions = torch.tensor(mb_actions, dtype=torch.float32)
mb_returns = torch.tensor(mb_returns, dtype=torch.float32).unsqueeze(1)
mb_advs = torch.tensor(mb_advs, dtype=torch.float32).unsqueeze(1)
# normalize adv
mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-8)
if self.args.cuda:
mb_actions = mb_actions.cuda()
mb_returns = mb_returns.cuda()
mb_advs = mb_advs.cuda()
# start to get values
mb_values, pis = self.net(mb_obs)
# start to calculate the value loss...
value_loss = (mb_returns - mb_values).pow(2).mean()
# start to calculate the policy loss
with torch.no_grad():
_, old_pis = self.old_net(mb_obs)
# get the old log probs
old_log_prob, _ = evaluate_actions(old_pis, mb_actions, self.args.dist, self.args.env_type)
old_log_prob = old_log_prob.detach()
# evaluate the current policy
log_prob, ent_loss = evaluate_actions(pis, mb_actions, self.args.dist, self.args.env_type)
prob_ratio = torch.exp(log_prob - old_log_prob)
# surr1
surr1 = prob_ratio * mb_advs
surr2 = torch.clamp(prob_ratio, 1 - self.args.clip, 1 + self.args.clip) * mb_advs
policy_loss = -torch.min(surr1, surr2).mean()
# final total loss
total_loss = policy_loss + self.args.vloss_coef * value_loss - ent_loss * self.args.ent_coef
# clear the grad buffer
self.optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.max_grad_norm)
# update
self.optimizer.step()
return policy_loss.item(), value_loss.item(), ent_loss.item()
# convert the numpy array to tensors
def _get_tensors(self, obs):
if self.args.env_type == 'atari':
obs_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32)
else:
obs_tensor = torch.tensor(obs, dtype=torch.float32)
# decide if put the tensor on the GPU
if self.args.cuda:
obs_tensor = obs_tensor.cuda()
return obs_tensor
# adjust the learning rate
def _adjust_learning_rate(self, update, num_updates):
lr_frac = 1 - (update / num_updates)
adjust_lr = self.args.lr * lr_frac
for param_group in self.optimizer.param_groups:
param_group['lr'] = adjust_lr
| 11,143 | 50.592593 | 144 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/utils.py | import numpy as np
import torch
from torch.distributions.normal import Normal
from torch.distributions import Distribution
"""
the tanhnormal distributions from rlkit may not stable
"""
class tanh_normal(Distribution):
def __init__(self, normal_mean, normal_std, epsilon=1e-6, cuda=False):
self.normal_mean = normal_mean
self.normal_std = normal_std
self.cuda = cuda
self.normal = Normal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def log_prob(self, value, pre_tanh_value=None):
"""
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
if pre_tanh_value is None:
pre_tanh_value = torch.log((1 + value) / (1 - value)) / 2
return self.normal.log_prob(pre_tanh_value) - torch.log(1 - value * value + self.epsilon)
def sample(self, return_pretanh_value=False):
"""
Gradients will and should *not* pass through this operation.
See https://github.com/pytorch/pytorch/issues/4620 for discussion.
"""
z = self.normal.sample().detach()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def rsample(self, return_pretanh_value=False):
"""
Sampling in the reparameterization case.
"""
sample_mean = torch.zeros(self.normal_mean.size(), dtype=torch.float32, device='cuda' if self.cuda else 'cpu')
sample_std = torch.ones(self.normal_std.size(), dtype=torch.float32, device='cuda' if self.cuda else 'cpu')
z = (self.normal_mean + self.normal_std * Normal(sample_mean, sample_std).sample())
z.requires_grad_()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
# get action_infos
class get_action_info:
def __init__(self, pis, cuda=False):
self.mean, self.std = pis
self.dist = tanh_normal(normal_mean=self.mean, normal_std=self.std, cuda=cuda)
# select actions
def select_actions(self, exploration=True, reparameterize=True):
if exploration:
if reparameterize:
actions, pretanh = self.dist.rsample(return_pretanh_value=True)
return actions, pretanh
else:
actions = self.dist.sample()
else:
actions = torch.tanh(self.mean)
return actions
def get_log_prob(self, actions, pre_tanh_value):
log_prob = self.dist.log_prob(actions, pre_tanh_value=pre_tanh_value)
return log_prob.sum(dim=1, keepdim=True)
| 2,841 | 34.08642 | 118 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/demo.py | from arguments import get_args
import gym
import torch
import numpy as np
from models import tanh_gaussian_actor
if __name__ == '__main__':
args = get_args()
env = gym.make(args.env_name)
# get environment infos
obs_dims = env.observation_space.shape[0]
action_dims = env.action_space.shape[0]
action_max = env.action_space.high[0]
# define the network
actor_net = tanh_gaussian_actor(obs_dims, action_dims, args.hidden_size, args.log_std_min, args.log_std_max)
# load models
model_path = args.save_dir + args.env_name + '/model.pt'
# load the network weights
actor_net.load_state_dict(torch.load(model_path, map_location='cpu'))
for ep in range(5):
obs = env.reset()
reward_sum = 0
# set the maximum timesteps here...
for _ in range(1000):
env.render()
with torch.no_grad():
obs_tensor = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
mean, std = actor_net(obs_tensor)
actions = torch.tanh(mean).detach().numpy().squeeze()
if action_dims == 1:
actions = np.array([actions])
obs_, reward, done, _ = env.step(action_max * actions)
reward_sum += reward
if done:
break
obs = obs_
print('the episode is: {}, the reward is: {}'.format(ep, reward_sum))
env.close()
| 1,433 | 35.769231 | 112 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/sac_agent.py | import numpy as np
import torch
from models import flatten_mlp, tanh_gaussian_actor
from rl_utils.experience_replay.experience_replay import replay_buffer
from utils import get_action_info
from datetime import datetime
import copy
import os
import gym
"""
2019-Nov-12 - start to add the automatically tempature tuning
2019-JUN-05
author: Tianhong Dai
"""
# the soft-actor-critic agent
class sac_agent:
def __init__(self, env, args):
self.args = args
self.env = env
# create eval environment
self.eval_env = gym.make(self.args.env_name)
self.eval_env.seed(args.seed * 2)
# build up the network that will be used.
self.qf1 = flatten_mlp(self.env.observation_space.shape[0], self.args.hidden_size, self.env.action_space.shape[0])
self.qf2 = flatten_mlp(self.env.observation_space.shape[0], self.args.hidden_size, self.env.action_space.shape[0])
# set the target q functions
self.target_qf1 = copy.deepcopy(self.qf1)
self.target_qf2 = copy.deepcopy(self.qf2)
# build up the policy network
self.actor_net = tanh_gaussian_actor(self.env.observation_space.shape[0], self.env.action_space.shape[0], self.args.hidden_size, \
self.args.log_std_min, self.args.log_std_max)
# define the optimizer for them
self.qf1_optim = torch.optim.Adam(self.qf1.parameters(), lr=self.args.q_lr)
self.qf2_optim = torch.optim.Adam(self.qf2.parameters(), lr=self.args.q_lr)
# the optimizer for the policy network
self.actor_optim = torch.optim.Adam(self.actor_net.parameters(), lr=self.args.p_lr)
# entorpy target
self.target_entropy = -np.prod(self.env.action_space.shape).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device='cuda' if self.args.cuda else 'cpu')
# define the optimizer
self.alpha_optim = torch.optim.Adam([self.log_alpha], lr=self.args.p_lr)
# define the replay buffer
self.buffer = replay_buffer(self.args.buffer_size)
# get the action max
self.action_max = self.env.action_space.high[0]
# if use cuda, put tensor onto the gpu
if self.args.cuda:
self.actor_net.cuda()
self.qf1.cuda()
self.qf2.cuda()
self.target_qf1.cuda()
self.target_qf2.cuda()
# automatically create the folders to save models
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# train the agent
def learn(self):
global_timesteps = 0
# before the official training, do the initial exploration to add episodes into the replay buffer
self._initial_exploration(exploration_policy=self.args.init_exploration_policy)
# reset the environment
obs = self.env.reset()
for epoch in range(self.args.n_epochs):
for _ in range(self.args.train_loop_per_epoch):
# for each epoch, it will reset the environment
for t in range(self.args.epoch_length):
# start to collect samples
with torch.no_grad():
obs_tensor = self._get_tensor_inputs(obs)
pi = self.actor_net(obs_tensor)
action = get_action_info(pi, cuda=self.args.cuda).select_actions(reparameterize=False)
action = action.cpu().numpy()[0]
# input the actions into the environment
obs_, reward, done, _ = self.env.step(self.action_max * action)
# store the samples
self.buffer.add(obs, action, reward, obs_, float(done))
# reassign the observations
obs = obs_
if done:
# reset the environment
obs = self.env.reset()
# after collect the samples, start to update the network
for _ in range(self.args.update_cycles):
qf1_loss, qf2_loss, actor_loss, alpha, alpha_loss = self._update_newtork()
# update the target network
if global_timesteps % self.args.target_update_interval == 0:
self._update_target_network(self.target_qf1, self.qf1)
self._update_target_network(self.target_qf2, self.qf2)
global_timesteps += 1
# print the log information
if epoch % self.args.display_interval == 0:
# start to do the evaluation
mean_rewards = self._evaluate_agent()
print('[{}] Epoch: {} / {}, Frames: {}, Rewards: {:.3f}, QF1: {:.3f}, QF2: {:.3f}, AL: {:.3f}, Alpha: {:.5f}, AlphaL: {:.5f}'.format(datetime.now(), \
epoch, self.args.n_epochs, (epoch + 1) * self.args.epoch_length, mean_rewards, qf1_loss, qf2_loss, actor_loss, alpha, alpha_loss))
# save models
torch.save(self.actor_net.state_dict(), self.model_path + '/model.pt')
# do the initial exploration by using the uniform policy
def _initial_exploration(self, exploration_policy='gaussian'):
# get the action information of the environment
obs = self.env.reset()
for _ in range(self.args.init_exploration_steps):
if exploration_policy == 'uniform':
raise NotImplementedError
elif exploration_policy == 'gaussian':
# the sac does not need normalize?
with torch.no_grad():
obs_tensor = self._get_tensor_inputs(obs)
# generate the policy
pi = self.actor_net(obs_tensor)
action = get_action_info(pi).select_actions(reparameterize=False)
action = action.cpu().numpy()[0]
# input the action input the environment
obs_, reward, done, _ = self.env.step(self.action_max * action)
# store the episodes
self.buffer.add(obs, action, reward, obs_, float(done))
obs = obs_
if done:
# if done, reset the environment
obs = self.env.reset()
print("Initial exploration has been finished!")
# get tensors
def _get_tensor_inputs(self, obs):
obs_tensor = torch.tensor(obs, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu').unsqueeze(0)
return obs_tensor
# update the network
def _update_newtork(self):
# smaple batch of samples from the replay buffer
obses, actions, rewards, obses_, dones = self.buffer.sample(self.args.batch_size)
# preprocessing the data into the tensors, will support GPU later
obses = torch.tensor(obses, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu')
actions = torch.tensor(actions, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu')
rewards = torch.tensor(rewards, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu').unsqueeze(-1)
obses_ = torch.tensor(obses_, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu')
inverse_dones = torch.tensor(1 - dones, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu').unsqueeze(-1)
# start to update the actor network
pis = self.actor_net(obses)
actions_info = get_action_info(pis, cuda=self.args.cuda)
actions_, pre_tanh_value = actions_info.select_actions(reparameterize=True)
log_prob = actions_info.get_log_prob(actions_, pre_tanh_value)
# use the automatically tuning
alpha_loss = -(self.log_alpha * (log_prob + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
# get the param
alpha = self.log_alpha.exp()
# get the q_value for new actions
q_actions_ = torch.min(self.qf1(obses, actions_), self.qf2(obses, actions_))
actor_loss = (alpha * log_prob - q_actions_).mean()
# q value function loss
q1_value = self.qf1(obses, actions)
q2_value = self.qf2(obses, actions)
with torch.no_grad():
pis_next = self.actor_net(obses_)
actions_info_next = get_action_info(pis_next, cuda=self.args.cuda)
actions_next_, pre_tanh_value_next = actions_info_next.select_actions(reparameterize=True)
log_prob_next = actions_info_next.get_log_prob(actions_next_, pre_tanh_value_next)
target_q_value_next = torch.min(self.target_qf1(obses_, actions_next_), self.target_qf2(obses_, actions_next_)) - alpha * log_prob_next
target_q_value = self.args.reward_scale * rewards + inverse_dones * self.args.gamma * target_q_value_next
qf1_loss = (q1_value - target_q_value).pow(2).mean()
qf2_loss = (q2_value - target_q_value).pow(2).mean()
# qf1
self.qf1_optim.zero_grad()
qf1_loss.backward()
self.qf1_optim.step()
# qf2
self.qf2_optim.zero_grad()
qf2_loss.backward()
self.qf2_optim.step()
# policy loss
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
return qf1_loss.item(), qf2_loss.item(), actor_loss.item(), alpha.item(), alpha_loss.item()
# update the target network
def _update_target_network(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(self.args.tau * param.data + (1 - self.args.tau) * target_param.data)
# evaluate the agent
def _evaluate_agent(self):
total_reward = 0
for _ in range(self.args.eval_episodes):
obs = self.eval_env.reset()
episode_reward = 0
while True:
with torch.no_grad():
obs_tensor = self._get_tensor_inputs(obs)
pi = self.actor_net(obs_tensor)
action = get_action_info(pi, cuda=self.args.cuda).select_actions(exploration=False, reparameterize=False)
action = action.detach().cpu().numpy()[0]
# input the action into the environment
obs_, reward, done, _ = self.eval_env.step(self.action_max * action)
episode_reward += reward
if done:
break
obs = obs_
total_reward += episode_reward
return total_reward / self.args.eval_episodes
| 10,871 | 49.803738 | 166 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# the flatten mlp
class flatten_mlp(nn.Module):
#TODO: add the initialization method for it
def __init__(self, input_dims, hidden_size, action_dims=None):
super(flatten_mlp, self).__init__()
self.fc1 = nn.Linear(input_dims, hidden_size) if action_dims is None else nn.Linear(input_dims + action_dims, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.q_value = nn.Linear(hidden_size, 1)
def forward(self, obs, action=None):
inputs = torch.cat([obs, action], dim=1) if action is not None else obs
x = F.relu(self.fc1(inputs))
x = F.relu(self.fc2(x))
output = self.q_value(x)
return output
# define the policy network - tanh gaussian policy network
# TODO: Not use the log std
class tanh_gaussian_actor(nn.Module):
def __init__(self, input_dims, action_dims, hidden_size, log_std_min, log_std_max):
super(tanh_gaussian_actor, self).__init__()
self.fc1 = nn.Linear(input_dims, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.mean = nn.Linear(hidden_size, action_dims)
self.log_std = nn.Linear(hidden_size, action_dims)
# the log_std_min and log_std_max
self.log_std_min = log_std_min
self.log_std_max = log_std_max
def forward(self, obs):
x = F.relu(self.fc1(obs))
x = F.relu(self.fc2(x))
mean = self.mean(x)
log_std = self.log_std(x)
# clamp the log std
log_std = torch.clamp(log_std, min=self.log_std_min, max=self.log_std_max)
# the reparameterization trick
# return mean and std
return (mean, torch.exp(log_std))
| 1,745 | 38.681818 | 130 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/seeds/seeds.py | import numpy as np
import random
import torch
# set random seeds for the pytorch, numpy and random
def set_seeds(args, rank=0):
# set seeds for the numpy
np.random.seed(args.seed + rank)
# set seeds for the random.random
random.seed(args.seed + rank)
# set seeds for the pytorch
torch.manual_seed(args.seed + rank)
if args.cuda:
torch.cuda.manual_seed(args.seed + rank)
| 407 | 26.2 | 52 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/mpi_utils/utils.py | from mpi4py import MPI
import numpy as np
import torch
# sync_networks across the different cores
def sync_networks(network):
"""
netowrk is the network you want to sync
"""
comm = MPI.COMM_WORLD
flat_params = _get_flat_params_or_grads(network, mode='params')
comm.Bcast(flat_params, root=0)
# set the flat params back to the network
_set_flat_params_or_grads(network, flat_params, mode='params')
def sync_grads(network):
flat_grads = _get_flat_params_or_grads(network, mode='grads')
comm = MPI.COMM_WORLD
global_grads = np.zeros_like(flat_grads)
comm.Allreduce(flat_grads, global_grads, op=MPI.SUM)
_set_flat_params_or_grads(network, global_grads, mode='grads')
# get the flat grads or params
def _get_flat_params_or_grads(network, mode='params'):
"""
include two kinds: grads and params
"""
attr = 'data' if mode == 'params' else 'grad'
return np.concatenate([getattr(param, attr).cpu().numpy().flatten() for param in network.parameters()])
def _set_flat_params_or_grads(network, flat_params, mode='params'):
"""
include two kinds: grads and params
"""
attr = 'data' if mode == 'params' else 'grad'
# the pointer
pointer = 0
for param in network.parameters():
getattr(param, attr).copy_(torch.tensor(flat_params[pointer:pointer + param.data.numel()]).view_as(param.data))
pointer += param.data.numel()
| 1,427 | 31.454545 | 119 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/running_filter/running_filter.py | from collections import deque
import numpy as np
# this is from the https://github.com/ikostrikov/pytorch-trpo/blob/master/running_state.py
# from https://github.com/joschu/modular_rl
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
| 1,715 | 23.169014 | 90 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/eval.py | """
Run evaluation with saved models.
"""
import random
import argparse
from tqdm import tqdm
import torch
from data.loader import DataLoader
from model.trainer import GCNTrainer
from utils import torch_utils, scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str, help='Directory of the model.')
parser.add_argument('--model', type=str, default='best_model.pt', help='Name of the model file.')
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--dataset', type=str, default='test', help="Evaluate on dev or test.")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
torch.manual_seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# load opt
model_file = args.model_dir + '/' + args.model
print("Loading model from {}".format(model_file))
opt = torch_utils.load_config(model_file)
trainer = GCNTrainer(opt)
trainer.load(model_file)
# load vocab
vocab_file = args.model_dir + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
assert opt['vocab_size'] == vocab.size, "Vocab size must match that in the saved model."
# load data
data_file = opt['data_dir'] + '/{}.json'.format(args.dataset)
print("Loading data from {} with batch size {}...".format(data_file, opt['batch_size']))
batch = DataLoader(data_file, opt['batch_size'], opt, vocab, evaluation=True)
helper.print_config(opt)
label2id = constant.LABEL_TO_ID
id2label = dict([(v,k) for k,v in label2id.items()])
predictions = []
all_probs = []
batch_iter = tqdm(batch)
for i, b in enumerate(batch_iter):
preds, probs, _ = trainer.predict(b)
predictions += preds
all_probs += probs
predictions = [id2label[p] for p in predictions]
p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)
print("{} set evaluate result: {:.2f}\t{:.2f}\t{:.2f}".format(args.dataset,p,r,f1))
print("Evaluation ended.")
| 2,130 | 30.80597 | 97 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/train.py | """
Train a model on TACRED.
"""
import os
import sys
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from data.loader import DataLoader
from model.trainer import GCNTrainer
from utils import torch_utils, scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--vocab_dir', type=str, default='dataset/vocab')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--input_dropout', type=float, default=0.5, help='Input dropout rate.')
parser.add_argument('--gcn_dropout', type=float, default=0.5, help='GCN layer dropout rate.')
parser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')
parser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N word embeddings.')
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--prune_k', default=-1, type=int, help='Prune the dependency tree to <= K distance off the dependency path; set to -1 for no pruning.')
parser.add_argument('--conv_l2', type=float, default=0, help='L2-weight decay on conv layers only.')
parser.add_argument('--pooling', choices=['max', 'avg', 'sum'], default='max', help='Pooling function type. Default max.')
parser.add_argument('--pooling_l2', type=float, default=0, help='L2-penalty for all pooling output.')
parser.add_argument('--mlp_layers', type=int, default=2, help='Number of output mlp layers.')
parser.add_argument('--no_adj', dest='no_adj', action='store_true', help="Zero out adjacency matrix for ablation.")
parser.add_argument('--no-rnn', dest='rnn', action='store_false', help='Do not use RNN layer.')
parser.add_argument('--rnn_hidden', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--rnn_layers', type=int, default=1, help='Number of RNN layers.')
parser.add_argument('--rnn_dropout', type=float, default=0.5, help='RNN dropout rate.')
parser.add_argument('--lr', type=float, default=1.0, help='Applies to sgd and adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate decay rate.')
parser.add_argument('--decay_epoch', type=int, default=5, help='Decay learning rate after this epoch.')
parser.add_argument('--optim', choices=['sgd', 'adagrad', 'adam', 'adamax'], default='sgd', help='Optimizer: sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=100, help='Number of total training epochs.')
parser.add_argument('--batch_size', type=int, default=50, help='Training batch size.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=100, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')
parser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
parser.add_argument('--load', dest='load', action='store_true', help='Load pretrained model.')
parser.add_argument('--model_file', type=str, help='Filename of the pretrained model.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
init_time = time.time()
# make opt
opt = vars(args)
label2id = constant.LABEL_TO_ID
opt['num_class'] = len(label2id)
# load vocab
vocab_file = opt['vocab_dir'] + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
opt['vocab_size'] = vocab.size
emb_file = opt['vocab_dir'] + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == vocab.size
assert emb_matrix.shape[1] == opt['emb_dim']
# load data
print("Loading data from {} with batch size {}...".format(opt['data_dir'], opt['batch_size']))
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
vocab.save(model_save_dir + '/vocab.pkl')
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_score\tbest_dev_score")
# print model info
helper.print_config(opt)
# model
if not opt['load']:
trainer = GCNTrainer(opt, emb_matrix=emb_matrix)
else:
# load pretrained model
model_file = opt['model_file']
print("Loading model from {}".format(model_file))
model_opt = torch_utils.load_config(model_file)
model_opt['optim'] = opt['optim']
trainer = GCNTrainer(model_opt)
trainer.load(model_file)
id2label = dict([(v,k) for k,v in label2id.items()])
dev_score_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = trainer.predict(batch)
predictions += preds
dev_loss += loss
predictions = [id2label[p] for p in predictions]
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
dev_score = dev_f1
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_score, max([dev_score] + dev_score_history)))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
trainer.save(model_file, epoch)
if epoch == 1 or dev_score > max(dev_score_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
file_logger.log("new best model saved at epoch {}: {:.2f}\t{:.2f}\t{:.2f}"\
.format(epoch, dev_p*100, dev_r*100, dev_score*100))
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_score_history) > opt['decay_epoch'] and dev_score <= dev_score_history[-1] and \
opt['optim'] in ['sgd', 'adagrad', 'adadelta']:
current_lr *= opt['lr_decay']
trainer.update_lr(current_lr)
dev_score_history += [dev_score]
print("")
print("Training ended with {} epochs.".format(epoch))
| 8,638 | 44.708995 | 156 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/utils/torch_utils.py | """
Utility functions for torch.
"""
import torch
from torch import nn, optim
from torch.optim import Optimizer
### class
class MyAdagrad(Optimizer):
"""My modification of the Adagrad optimizer that allows to specify an initial
accumulater value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial acculmulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) *\
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
### torch specific functions
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name in ['adagrad', 'myadagrad']:
# use my own adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, weight_decay=l2) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2) # use default lr
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
### model IO
def save(model, optimizer, opt, filename):
params = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'config': opt
}
try:
torch.save(params, filename)
except BaseException:
print("[ Warning: model saving failed. ]")
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
if model is not None:
model.load_state_dict(dump['model'])
if optimizer is not None:
optimizer.load_state_dict(dump['optimizer'])
opt = dump['config']
return model, optimizer, opt
def load_config(filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
return dump['config']
| 5,681 | 33.858896 | 106 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/data/loader.py | """
Data loader for TACRED json files.
"""
import json
import random
import torch
import numpy as np
from utils import constant, helper, vocab
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, filename, batch_size, opt, vocab, evaluation=False):
self.batch_size = batch_size
self.opt = opt
self.vocab = vocab
self.eval = evaluation
self.label2id = constant.LABEL_TO_ID
with open(filename) as infile:
data = json.load(infile)
self.raw_data = data
data = self.preprocess(data, vocab, opt)
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.id2label = dict([(v,k) for k,v in self.label2id.items()])
self.labels = [self.id2label[d[-1]] for d in data]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
print("{} batches created for {}".format(len(data), filename))
def preprocess(self, data, vocab, opt):
""" Preprocess the data and convert to ids. """
processed = []
for d in data:
tokens = list(d['token'])
if opt['lower']:
tokens = [t.lower() for t in tokens]
# anonymize tokens
ss, se = d['subj_start'], d['subj_end']
os, oe = d['obj_start'], d['obj_end']
tokens[ss:se+1] = ['SUBJ-'+d['subj_type']] * (se-ss+1)
tokens[os:oe+1] = ['OBJ-'+d['obj_type']] * (oe-os+1)
tokens = map_to_ids(tokens, vocab.word2id)
pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID)
ner = map_to_ids(d['stanford_ner'], constant.NER_TO_ID)
deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID)
head = [int(x) for x in d['stanford_head']]
assert any([x == 0 for x in head])
l = len(tokens)
subj_positions = get_positions(d['subj_start'], d['subj_end'], l)
obj_positions = get_positions(d['obj_start'], d['obj_end'], l)
subj_type = [constant.SUBJ_NER_TO_ID[d['subj_type']]]
obj_type = [constant.OBJ_NER_TO_ID[d['obj_type']]]
relation = self.label2id[d['relation']]
processed += [(tokens, pos, ner, deprel, head, subj_positions, obj_positions, subj_type, obj_type, relation)]
return processed
def gold(self):
""" Return gold labels as a list. """
return self.labels
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 10
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# word dropout
if not self.eval:
words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]]
else:
words = batch[0]
# convert to tensors
words = get_long_tensor(words, batch_size)
masks = torch.eq(words, 0)
pos = get_long_tensor(batch[1], batch_size)
ner = get_long_tensor(batch[2], batch_size)
deprel = get_long_tensor(batch[3], batch_size)
head = get_long_tensor(batch[4], batch_size)
subj_positions = get_long_tensor(batch[5], batch_size)
obj_positions = get_long_tensor(batch[6], batch_size)
subj_type = get_long_tensor(batch[7], batch_size)
obj_type = get_long_tensor(batch[8], batch_size)
rels = torch.LongTensor(batch[9])
return (words, masks, pos, ner, deprel, head, subj_positions, obj_positions, subj_type, obj_type, rels, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def map_to_ids(tokens, vocab):
ids = [vocab[t] if t in vocab else constant.UNK_ID for t in tokens]
return ids
def get_positions(start_idx, end_idx, length):
""" Get subj/obj position sequence. """
return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + \
list(range(1, length-end_idx))
def get_long_tensor(tokens_list, batch_size):
""" Convert list of list of tokens to a padded LongTensor. """
token_len = max(len(x) for x in tokens_list)
tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
def sort_all(batch, lens):
""" Sort all fields by descending order of lens, and return the original indices. """
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1]
def word_dropout(tokens, dropout):
""" Randomly dropout tokens (IDs) and replace them with <UNK> tokens. """
return [constant.UNK_ID if x != constant.UNK_ID and np.random.random() < dropout \
else x for x in tokens]
| 5,487 | 36.848276 | 121 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/model/gcn.py | """
GCN model for relation extraction.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.tree import Tree, head_to_tree, tree_to_adj
from utils import constant, torch_utils
class GCNClassifier(nn.Module):
""" A wrapper classifier for GCNRelationModel. """
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.gcn_model = GCNRelationModel(opt, emb_matrix=emb_matrix)
in_dim = opt['hidden_dim']
self.classifier = nn.Linear(in_dim, opt['num_class'])
self.opt = opt
def conv_l2(self):
return self.gcn_model.gcn.conv_l2()
def forward(self, inputs):
outputs, pooling_output = self.gcn_model(inputs)
logits = self.classifier(outputs)
return logits, pooling_output
class GCNRelationModel(nn.Module):
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.opt = opt
self.emb_matrix = emb_matrix
# create embedding layers
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim']) if opt['pos_dim'] > 0 else None
self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim']) if opt['ner_dim'] > 0 else None
embeddings = (self.emb, self.pos_emb, self.ner_emb)
self.init_embeddings()
# gcn layer
self.gcn = GCN(opt, embeddings, opt['hidden_dim'], opt['num_layers'])
# output mlp layers
in_dim = opt['hidden_dim']*3
layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]
for _ in range(self.opt['mlp_layers']-1):
layers += [nn.Linear(opt['hidden_dim'], opt['hidden_dim']), nn.ReLU()]
self.out_mlp = nn.Sequential(*layers)
def init_embeddings(self):
if self.emb_matrix is None:
self.emb.weight.data[1:,:].uniform_(-1.0, 1.0)
else:
self.emb_matrix = torch.from_numpy(self.emb_matrix)
self.emb.weight.data.copy_(self.emb_matrix)
# decide finetuning
if self.opt['topn'] <= 0:
print("Do not finetune word embedding layer.")
self.emb.weight.requires_grad = False
elif self.opt['topn'] < self.opt['vocab_size']:
print("Finetune top {} word embeddings.".format(self.opt['topn']))
self.emb.weight.register_hook(lambda x: \
torch_utils.keep_partial_grad(x, self.opt['topn']))
else:
print("Finetune all embeddings.")
def forward(self, inputs):
words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs # unpack
l = (masks.data.cpu().numpy() == 0).astype(np.int64).sum(1)
maxlen = max(l)
def inputs_to_tree_reps(head, words, l, prune, subj_pos, obj_pos):
head, words, subj_pos, obj_pos = head.cpu().numpy(), words.cpu().numpy(), subj_pos.cpu().numpy(), obj_pos.cpu().numpy()
trees = [head_to_tree(head[i], words[i], l[i], prune, subj_pos[i], obj_pos[i]) for i in range(len(l))]
adj = [tree_to_adj(maxlen, tree, directed=False, self_loop=False).reshape(1, maxlen, maxlen) for tree in trees]
adj = np.concatenate(adj, axis=0)
adj = torch.from_numpy(adj)
return Variable(adj.cuda()) if self.opt['cuda'] else Variable(adj)
adj = inputs_to_tree_reps(head.data, words.data, l, self.opt['prune_k'], subj_pos.data, obj_pos.data)
h, pool_mask = self.gcn(adj, inputs)
# pooling
subj_mask, obj_mask = subj_pos.eq(0).eq(0).unsqueeze(2), obj_pos.eq(0).eq(0).unsqueeze(2) # invert mask
pool_type = self.opt['pooling']
h_out = pool(h, pool_mask, type=pool_type)
subj_out = pool(h, subj_mask, type=pool_type)
obj_out = pool(h, obj_mask, type=pool_type)
outputs = torch.cat([h_out, subj_out, obj_out], dim=1)
outputs = self.out_mlp(outputs)
return outputs, h_out
class GCN(nn.Module):
""" A GCN/Contextualized GCN module operated on dependency graphs. """
def __init__(self, opt, embeddings, mem_dim, num_layers):
super(GCN, self).__init__()
self.opt = opt
self.layers = num_layers
self.use_cuda = opt['cuda']
self.mem_dim = mem_dim
self.in_dim = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
self.emb, self.pos_emb, self.ner_emb = embeddings
# rnn layer
if self.opt.get('rnn', False):
input_size = self.in_dim
self.rnn = nn.LSTM(input_size, opt['rnn_hidden'], opt['rnn_layers'], batch_first=True, \
dropout=opt['rnn_dropout'], bidirectional=True)
self.in_dim = opt['rnn_hidden'] * 2
self.rnn_drop = nn.Dropout(opt['rnn_dropout']) # use on last layer output
self.in_drop = nn.Dropout(opt['input_dropout'])
self.gcn_drop = nn.Dropout(opt['gcn_dropout'])
# gcn layer
self.W = nn.ModuleList()
for layer in range(self.layers):
input_dim = self.in_dim if layer == 0 else self.mem_dim
self.W.append(nn.Linear(input_dim, self.mem_dim))
def conv_l2(self):
conv_weights = []
for w in self.W:
conv_weights += [w.weight, w.bias]
return sum([x.pow(2).sum() for x in conv_weights])
def encode_with_rnn(self, rnn_inputs, masks, batch_size):
seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())
h0, c0 = rnn_zero_state(batch_size, self.opt['rnn_hidden'], self.opt['rnn_layers'])
rnn_inputs = nn.utils.rnn.pack_padded_sequence(rnn_inputs, seq_lens, batch_first=True)
rnn_outputs, (ht, ct) = self.rnn(rnn_inputs, (h0, c0))
rnn_outputs, _ = nn.utils.rnn.pad_packed_sequence(rnn_outputs, batch_first=True)
return rnn_outputs
def forward(self, adj, inputs):
words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs # unpack
word_embs = self.emb(words)
embs = [word_embs]
if self.opt['pos_dim'] > 0:
embs += [self.pos_emb(pos)]
if self.opt['ner_dim'] > 0:
embs += [self.ner_emb(ner)]
embs = torch.cat(embs, dim=2)
embs = self.in_drop(embs)
# rnn layer
if self.opt.get('rnn', False):
gcn_inputs = self.rnn_drop(self.encode_with_rnn(embs, masks, words.size()[0]))
else:
gcn_inputs = embs
# gcn layer
denom = adj.sum(2).unsqueeze(2) + 1
mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2)
# zero out adj for ablation
if self.opt.get('no_adj', False):
adj = torch.zeros_like(adj)
for l in range(self.layers):
Ax = adj.bmm(gcn_inputs)
AxW = self.W[l](Ax)
AxW = AxW + self.W[l](gcn_inputs) # self loop
AxW = AxW / denom
gAxW = F.relu(AxW)
gcn_inputs = self.gcn_drop(gAxW) if l < self.layers - 1 else gAxW
return gcn_inputs, mask
def pool(h, mask, type='max'):
if type == 'max':
h = h.masked_fill(mask, -constant.INFINITY_NUMBER)
return torch.max(h, 1)[0]
elif type == 'avg':
h = h.masked_fill(mask, 0)
return h.sum(1) / (mask.size(1) - mask.float().sum(1))
else:
h = h.masked_fill(mask, 0)
return h.sum(1)
def rnn_zero_state(batch_size, hidden_dim, num_layers, bidirectional=True, use_cuda=True):
total_layers = num_layers * 2 if bidirectional else num_layers
state_shape = (total_layers, batch_size, hidden_dim)
h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)
if use_cuda:
return h0.cuda(), c0.cuda()
else:
return h0, c0
| 7,886 | 39.239796 | 131 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/model/trainer.py | """
A trainer class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.gcn import GCNClassifier
from utils import constant, torch_utils
class Trainer(object):
def __init__(self, opt, emb_matrix=None):
raise NotImplementedError
def update(self, batch):
raise NotImplementedError
def predict(self, batch):
raise NotImplementedError
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.opt = checkpoint['config']
def save(self, filename, epoch):
params = {
'model': self.model.state_dict(),
'config': self.opt,
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def unpack_batch(batch, cuda):
if cuda:
inputs = [Variable(b.cuda()) for b in batch[:10]]
labels = Variable(batch[10].cuda())
else:
inputs = [Variable(b) for b in batch[:10]]
labels = Variable(batch[10])
tokens = batch[0]
head = batch[5]
subj_pos = batch[6]
obj_pos = batch[7]
lens = batch[1].eq(0).long().sum(1).squeeze()
return inputs, labels, tokens, head, subj_pos, obj_pos, lens
class GCNTrainer(Trainer):
def __init__(self, opt, emb_matrix=None):
self.opt = opt
self.emb_matrix = emb_matrix
self.model = GCNClassifier(opt, emb_matrix=emb_matrix)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if opt['cuda']:
self.model.cuda()
self.criterion.cuda()
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def update(self, batch):
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch, self.opt['cuda'])
# step forward
self.model.train()
self.optimizer.zero_grad()
logits, pooling_output = self.model(inputs)
loss = self.criterion(logits, labels)
# l2 decay on all conv layers
if self.opt.get('conv_l2', 0) > 0:
loss += self.model.conv_l2() * self.opt['conv_l2']
# l2 penalty on output representations
if self.opt.get('pooling_l2', 0) > 0:
loss += self.opt['pooling_l2'] * (pooling_output ** 2).sum(1).mean()
loss_val = loss.item()
# backward
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, unsort=True):
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch, self.opt['cuda'])
orig_idx = batch[11]
# forward
self.model.eval()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
probs = F.softmax(logits, 1).data.cpu().numpy().tolist()
predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()
if unsort:
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx,\
predictions, probs)))]
return predictions, probs, loss.item()
| 3,659 | 32.888889 | 101 | py |
SwinMR | SwinMR-main/main_test_swinmr_CC.py | '''
# -----------------------------------------
Main Program for Testing
SwinMR for MRI_Recon
Dataset: CC
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import argparse
import cv2
import csv
import sys
import numpy as np
from collections import OrderedDict
import os
import torch
from utils import utils_option as option
from torch.utils.data import DataLoader
from models.network_swinmr import SwinIR as net
from utils import utils_image as util
from data.select_dataset import define_Dataset
import time
from math import ceil
import lpips
import shutil
def main(json_path):
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default=json_path, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = 'cpu'
# set up model
if os.path.exists(opt['model_path']):
print(f"loading model from {opt['model_path']}")
else:
print('can\'t find model.')
model = define_model(opt)
model.eval()
model = model.to(device)
# setup folder and path
save_dir, border, window_size = setup(opt)
os.makedirs(save_dir, exist_ok=True)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['lpips'] = []
test_results['zf_psnr'] = []
test_results['zf_ssim'] = []
test_results['zf_lpips'] = []
with open(os.path.join(save_dir, 'results.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK', 'SSIM', 'PSNR', 'LPIPS'])
with open(os.path.join(save_dir, 'results_ave.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK',
'SSIM', 'SSIM_STD',
'PSNR', 'PSNR_STD',
'LPIPS', 'LPIPS_STD',
'FID'])
with open(os.path.join(save_dir, 'zf_results.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK', 'SSIM', 'PSNR', 'LPIPS'])
with open(os.path.join(save_dir, 'zf_results_ave.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK',
'SSIM', 'SSIM_STD',
'PSNR', 'PSNR_STD',
'LPIPS', 'LPIPS_STD',
'FID'])
# ----------------------------------------
# return None for missing key
# ----------------------------------------
opt = option.dict_to_nonedict(opt)
dataset_opt = opt['datasets']['test']
test_set = define_Dataset(dataset_opt)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=True)
loss_fn_alex = lpips.LPIPS(net='alex').to(device)
for idx, test_data in enumerate(test_loader):
img_gt = test_data['H'].to(device)
img_lq = test_data['L'].to(device)
# inference
with torch.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
# old_size = img_lq.size()
#
# h_pad = ceil(h_old / (window_size * 8)) * (window_size * 8) - h_old
# w_pad = ceil(w_old / (window_size * 8)) * (window_size * 8) - w_old
#
# img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, :h_old + h_pad, :]
# img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, :w_old + w_pad]
#
# img_gt = torch.cat([img_gt, torch.flip(img_gt, [2])], 2)[:, :, :h_old + h_pad, :]
# img_gt = torch.cat([img_gt, torch.flip(img_gt, [3])], 3)[:, :, :, :w_old + w_pad]
#
# print('Padding: {} --> {}; GPU RAM USED: {:2f} G; GPU RAM MAX USED {:2f} G'
# .format(old_size, img_lq.size(), torch.cuda.memory_allocated()*1e-9, torch.cuda.max_memory_allocated()*1e-9))
time_start = time.time()
img_gen = model(img_lq)
time_end = time.time()
time_c = time_end - time_start # time used
print('time cost', time_c, 's')
img_lq = img_lq[..., :h_old * opt['scale'], :w_old * opt['scale']]
img_gt = img_gt[..., :h_old * opt['scale'], :w_old * opt['scale']]
img_gen = img_gen[..., :h_old * opt['scale'], :w_old * opt['scale']]
diff_gen_x10 = torch.mul(torch.abs(torch.sub(img_gt, img_gen)), 10)
diff_lq_x10 = torch.mul(torch.abs(torch.sub(img_gt, img_lq)), 10)
# evaluate lpips
lpips_ = util.calculate_lpips_single(loss_fn_alex, img_gt, img_gen)
lpips_ = lpips_.data.squeeze().float().cpu().numpy()
test_results['lpips'].append(lpips_)
# evaluate lpips zf
zf_lpips_ = util.calculate_lpips_single(loss_fn_alex, img_gt, img_lq)
zf_lpips_ = zf_lpips_.data.squeeze().float().cpu().numpy()
test_results['zf_lpips'].append(zf_lpips_)
# save image
img_lq = img_lq.data.squeeze().float().cpu().numpy()
img_gt = img_gt.data.squeeze().float().cpu().numpy()
img_gen = img_gen.data.squeeze().float().cpu().numpy()
diff_gen_x10 = diff_gen_x10.data.squeeze().float().cpu().clamp_(0, 1).numpy()
diff_lq_x10 = diff_lq_x10.data.squeeze().float().cpu().clamp_(0, 1).numpy()
# evaluate psnr/ssim
psnr = util.calculate_psnr_single(img_gt, img_gen, border=border)
ssim = util.calculate_ssim_single(img_gt, img_gen, border=border)
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
print('Testing {:d} - PSNR: {:.2f} dB; SSIM: {:.4f}; LPIPS: {:.4f} '.format(idx, psnr, ssim, lpips_))
with open(os.path.join(save_dir, 'results.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['SwinMR', dataset_opt['mask'],
test_results['ssim'][idx], test_results['psnr'][idx], test_results['lpips'][idx]])
# evaluate psnr/ssim zf
zf_psnr = util.calculate_psnr_single(img_gt, img_lq, border=border)
zf_ssim = util.calculate_ssim_single(img_gt, img_lq, border=border)
test_results['zf_psnr'].append(zf_psnr)
test_results['zf_ssim'].append(zf_ssim)
print('ZF Testing {:d} - PSNR: {:.2f} dB; SSIM: {:.4f}; LPIPS: {:.4f} '.format(idx, zf_psnr, zf_ssim, zf_lpips_))
with open(os.path.join(save_dir, 'zf_results.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['ZF', dataset_opt['mask'],
test_results['zf_ssim'][idx], test_results['zf_psnr'][idx], test_results['zf_lpips'][idx]])
img_lq = (np.clip(img_lq, 0, 1) * 255.0).round().astype(np.uint8) # float32 to uint8
img_gt = (np.clip(img_gt, 0, 1) * 255.0).round().astype(np.uint8) # float32 to uint8
img_gen = (np.clip(img_gen, 0, 1) * 255.0).round().astype(np.uint8) # float32 to uint8
diff_gen_x10 = (diff_gen_x10 * 255.0).round().astype(np.uint8) # float32 to uint8
diff_lq_x10 = (diff_lq_x10 * 255.0).round().astype(np.uint8) # float32 to uint8
isExists = os.path.exists(os.path.join(save_dir, 'ZF'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'ZF'))
isExists = os.path.exists(os.path.join(save_dir, 'GT'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'GT'))
isExists = os.path.exists(os.path.join(save_dir, 'Recon'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'Recon'))
isExists = os.path.exists(os.path.join(save_dir, 'Different'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'Different'))
cv2.imwrite(os.path.join(save_dir, 'ZF', 'ZF_{:05d}.png'.format(idx)), img_lq)
cv2.imwrite(os.path.join(save_dir, 'GT', 'GT_{:05d}.png'.format(idx)), img_gt)
cv2.imwrite(os.path.join(save_dir, 'Recon', 'Recon_{:05d}.png'.format(idx)), img_gen)
diff_gen_x10_color = cv2.applyColorMap(diff_gen_x10, cv2.COLORMAP_JET)
diff_lq_x10_color = cv2.applyColorMap(diff_lq_x10, cv2.COLORMAP_JET)
cv2.imwrite(os.path.join(save_dir, 'Different', 'Diff_Recon_{:05d}.png'.format(idx)), diff_gen_x10_color)
cv2.imwrite(os.path.join(save_dir, 'Different', 'Diff_ZF_{:05d}.png'.format(idx)), diff_lq_x10_color)
# summarize psnr/ssim
ave_psnr = np.mean(test_results['psnr'])
std_psnr = np.std(test_results['psnr'], ddof=1)
ave_ssim = np.mean(test_results['ssim'])
std_ssim = np.std(test_results['ssim'], ddof=1)
ave_lpips = np.mean(test_results['lpips'])
std_lpips = np.std(test_results['lpips'], ddof=1)
print('\n{} \n-- Average PSNR {:.2f} dB ({:.4f} dB)\n-- Average SSIM {:.4f} ({:.6f})\n-- Average LPIPS {:.4f} ({:.6f})'
.format(save_dir, ave_psnr, std_psnr, ave_ssim, std_ssim, ave_lpips, std_lpips))
# summarize psnr/ssim zf
zf_ave_psnr = np.mean(test_results['zf_psnr'])
zf_std_psnr = np.std(test_results['zf_psnr'], ddof=1)
zf_ave_ssim = np.mean(test_results['zf_ssim'])
zf_std_ssim = np.std(test_results['zf_ssim'], ddof=1)
zf_ave_lpips = np.mean(test_results['zf_lpips'])
zf_std_lpips = np.std(test_results['zf_lpips'], ddof=1)
print('\n{} \n-- ZF Average PSNR {:.2f} dB ({:.4f} dB)\n-- ZF Average SSIM {:.4f} ({:.6f})\n-- ZF Average LPIPS {:.4f} ({:.6f})'
.format(save_dir, zf_ave_psnr, zf_std_psnr, zf_ave_ssim, zf_std_ssim, zf_ave_lpips, zf_std_lpips))
# FID
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
os.path.join(save_dir, 'GT'),
os.path.join(save_dir, 'Recon'))).read()
print(log)
fid = eval(log.replace('FID: ', ''))
with open(os.path.join(save_dir, 'results_ave.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['SwinMR', dataset_opt['mask'],
ave_ssim, std_ssim,
ave_psnr, std_psnr,
ave_lpips, std_lpips,
fid])
# FID ZF
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
os.path.join(save_dir, 'GT'),
os.path.join(save_dir, 'ZF'))).read()
print(log)
zf_fid = eval(log.replace('FID: ', ''))
with open(os.path.join(save_dir, 'zf_results_ave.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['ZF', dataset_opt['mask'],
zf_ave_ssim, zf_std_ssim,
zf_ave_psnr, zf_std_psnr,
zf_ave_lpips, zf_std_lpips,
zf_fid])
def define_model(args):
model = net(upscale=1, in_chans=1, img_size=256, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=args['netG']['embed_dim'], num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
pretrained_model = torch.load(args['model_path'])
model.load_state_dict(pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model, strict=True)
return model
def setup(args):
save_dir = f"results/{args['task']}/{args['model_name']}"
border = 0
window_size = 8
return save_dir, border, window_size
if __name__ == '__main__':
main()
| 11,599 | 40.281139 | 134 | py |
SwinMR | SwinMR-main/main_train_swinmr.py | '''
# -----------------------------------------
Main Program for Training
SwinMR for MRI_Recon
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import os
import sys
import math
import argparse
import random
import cv2
import numpy as np
import logging
import time
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from utils import utils_logger
from utils import utils_image as util
from utils import utils_option as option
from utils.utils_dist import get_dist_info, init_dist
from utils import utils_early_stopping
from data.select_dataset import define_Dataset
from models.select_model import define_Model
from tensorboardX import SummaryWriter
from collections import OrderedDict
from skimage.transform import resize
import lpips
def main(json_path=''):
'''
# ----------------------------------------
# Step--1 (prepare opt)
# ----------------------------------------
'''
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default=json_path, help='Path to option JSON file.')
parser.add_argument('--launcher', default='pytorch', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--dist', default=False)
opt = option.parse(parser.parse_args().opt, is_train=True)
# opt['dist'] = parser.parse_args().dist
# distributed settings
if opt['dist']:
init_dist('pytorch')
opt['rank'], opt['world_size'] = get_dist_info()
if opt['rank'] == 0:
util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key))
# update opt
init_iter_G, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G')
init_iter_E, init_path_E = option.find_last_checkpoint(opt['path']['models'], net_type='E')
opt['path']['pretrained_netG'] = init_path_G
opt['path']['pretrained_netE'] = init_path_E
init_iter_optimizerG, init_path_optimizerG = option.find_last_checkpoint(opt['path']['models'], net_type='optimizerG')
opt['path']['pretrained_optimizerG'] = init_path_optimizerG
current_step = max(init_iter_G, init_iter_E, init_iter_optimizerG)
# save opt to a '../option.json' file
if opt['rank'] == 0:
option.save(opt)
# return None for missing key
opt = option.dict_to_nonedict(opt)
# configure logger
if opt['rank'] == 0:
# logger
logger_name = 'train'
utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name+'.log'))
logger = logging.getLogger(logger_name)
logger.info(option.dict2str(opt))
# tensorbordX log
logger_tensorboard = SummaryWriter(os.path.join(opt['path']['log']))
# set seed
seed = opt['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
print('Random seed: {}'.format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
'''
# ----------------------------------------
# Step--2 (creat dataloader)
# ----------------------------------------
'''
# ----------------------------------------
# 1) create_dataset
# 2) creat_dataloader for train and test
# ----------------------------------------
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = define_Dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size']))
if opt['rank'] == 0:
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
if opt['dist']:
train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'], drop_last=True, seed=seed)
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size']//opt['num_gpu'],
shuffle=False,
num_workers=dataset_opt['dataloader_num_workers']//opt['num_gpu'],
drop_last=True,
pin_memory=False,
sampler=train_sampler)
else:
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size'],
shuffle=dataset_opt['dataloader_shuffle'],
num_workers=dataset_opt['dataloader_num_workers'],
drop_last=True,
pin_memory=False)
elif phase == 'test':
test_set = define_Dataset(dataset_opt)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=False)
else:
raise NotImplementedError("Phase [%s] is not recognized." % phase)
'''
# ----------------------------------------
# Step--3 (initialize model)
# ----------------------------------------
'''
# define model
model = define_Model(opt)
model.init_train()
# define LPIPS function
loss_fn_alex = lpips.LPIPS(net='alex').to(model.device)
# define early stopping
if opt['train']['is_early_stopping']:
early_stopping = utils_early_stopping.EarlyStopping(patience=opt['train']['early_stopping_num'])
# record
if opt['rank'] == 0:
logger.info(model.info_network())
logger.info(model.info_params())
'''
# ----------------------------------------
# Step--4 (main training)
# ----------------------------------------
'''
for epoch in range(100000000): # keep running
if opt['dist']:
train_sampler.set_epoch(epoch)
for i, train_data in enumerate(train_loader):
current_step += 1
# -------------------------------
# 1) update learning rate
# -------------------------------
model.update_learning_rate(current_step)
# -------------------------------
# 2) feed patch pairs
# -------------------------------
model.feed_data(train_data)
# -------------------------------
# 3) optimize parameters
# -------------------------------
model.optimize_parameters(current_step)
# -------------------------------
# 4) training information
# -------------------------------
if current_step % opt['train']['checkpoint_print'] == 0 and opt['rank'] == 0:
logs = model.current_log()
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(epoch, current_step, model.current_learning_rate())
for k, v in logs.items():
message += '{:s}: {:.3e} '.format(k, v)
logger.info(message)
# record train loss
logger_tensorboard.add_scalar('Learning Rate', model.current_learning_rate(), global_step=current_step)
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss', logs['G_loss'], global_step=current_step)
if 'G_loss_image' in logs.keys():
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss_image', logs['G_loss_image'], global_step=current_step)
if 'G_loss_frequency' in logs.keys():
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss_frequency', logs['G_loss_frequency'], global_step=current_step)
if 'G_loss_preceptual' in logs.keys():
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss_preceptual', logs['G_loss_preceptual'], global_step=current_step)
# -------------------------------
# 5) save model
# -------------------------------
if current_step % opt['train']['checkpoint_save'] == 0 and opt['rank'] == 0:
logger.info('Saving the model.')
model.save(current_step)
# -------------------------------
# 6) testing
# -------------------------------
if current_step % opt['train']['checkpoint_test'] == 0 and opt['rank'] == 0:
# create folder for FID
img_dir_tmp_H = os.path.join(opt['path']['images'], 'tempH')
util.mkdir(img_dir_tmp_H)
img_dir_tmp_E = os.path.join(opt['path']['images'], 'tempE')
util.mkdir(img_dir_tmp_E)
img_dir_tmp_L = os.path.join(opt['path']['images'], 'tempL')
util.mkdir(img_dir_tmp_L)
# create result dict
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['lpips'] = []
test_results['G_loss'] = []
test_results['G_loss_image'] = []
test_results['G_loss_frequency'] = []
test_results['G_loss_preceptual'] = []
for idx, test_data in enumerate(test_loader):
with torch.no_grad():
img_info = test_data['img_info'][0]
img_dir = os.path.join(opt['path']['images'], img_info)
# testing and adjust resolution
model.feed_data(test_data)
model.check_windowsize()
model.test()
model.recover_windowsize()
# acquire test result
results = model.current_results_gpu()
# calculate LPIPS (GPU | torch.tensor)
L_img = results['L']
E_img = results['E']
H_img = results['H']
current_lpips = util.calculate_lpips_single(loss_fn_alex, H_img, E_img).data.squeeze().float().cpu().numpy()
# calculate PSNR SSIM (CPU | np.float)
L_img = util.tensor2float(L_img)
E_img = util.tensor2float(E_img)
H_img = util.tensor2float(H_img)
current_psnr = util.calculate_psnr_single(H_img, E_img, border=0)
current_ssim = util.calculate_ssim_single(H_img, E_img, border=0)
# record metrics
test_results['psnr'].append(current_psnr)
test_results['ssim'].append(current_ssim)
test_results['lpips'].append(current_lpips)
# save samples
if idx < 5:
util.mkdir(img_dir)
cv2.imwrite(os.path.join(img_dir, 'ZF_{:05d}.png'.format(current_step)), np.clip(L_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir, 'Recon_{:05d}.png'.format(current_step)), np.clip(E_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir, 'GT_{:05d}.png'.format(current_step)), np.clip(H_img, 0, 1) * 255)
if opt['datasets']['test']['resize_for_fid']:
resize_for_fid = opt['datasets']['test']['resize_for_fid']
cv2.imwrite(os.path.join(img_dir_tmp_L, 'ZF_{:05d}.png'.format(idx)), resize(np.clip(L_img, 0, 1), (resize_for_fid[0], resize_for_fid[1])) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_E, 'Recon_{:05d}.png'.format(idx)), resize(np.clip(E_img, 0, 1), (resize_for_fid[0], resize_for_fid[1])) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_H, 'GT_{:05d}.png'.format(idx)), resize(np.clip(H_img, 0, 1), (resize_for_fid[0], resize_for_fid[1])) * 255)
else:
cv2.imwrite(os.path.join(img_dir_tmp_L, 'ZF_{:05d}.png'.format(idx)), np.clip(L_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_E, 'Recon_{:05d}.png'.format(idx)), np.clip(E_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_H, 'GT_{:05d}.png'.format(idx)), np.clip(H_img, 0, 1) * 255)
# summarize psnr/ssim/lpips
ave_psnr = np.mean(test_results['psnr'])
# std_psnr = np.std(test_results['psnr'], ddof=1)
ave_ssim = np.mean(test_results['ssim'])
# std_ssim = np.std(test_results['ssim'], ddof=1)
ave_lpips = np.mean(test_results['lpips'])
# std_lpips = np.std(test_results['lpips'], ddof=1)
# calculate FID
if opt['dist']:
# DistributedDataParallel (If multiple GPUs are used to train, use the 2nd GPU for FID calculation.)
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
img_dir_tmp_H,
img_dir_tmp_E)).read()
else:
# DataParallel (If multiple GPUs are used to train, use the 2nd GPU for FID calculation for unbalance of GPU menory use.)
if len(opt['gpu_ids']) > 1:
log = os.popen("{} -m pytorch_fid --device cuda:1 {} {} ".format(
sys.executable,
img_dir_tmp_H,
img_dir_tmp_E)).read()
else:
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
img_dir_tmp_H,
img_dir_tmp_E)).read()
print(log)
fid = eval(log.replace('FID: ', ''))
# testing log
logger.info('<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}; Average Average SSIM : {:<.4f}; LPIPS : {:<.4f}; FID : {:<.2f}'
.format(epoch, current_step, ave_psnr, ave_ssim, ave_lpips, fid))
logger_tensorboard.add_scalar('VALIDATION PSNR', ave_psnr, global_step=current_step)
logger_tensorboard.add_scalar('VALIDATION SSIM', ave_ssim, global_step=current_step)
logger_tensorboard.add_scalar('VALIDATION LPIPS', ave_lpips, global_step=current_step)
logger_tensorboard.add_scalar('VALIDATION FID', fid, global_step=current_step)
# # early stopping
# if opt['train']['is_early_stopping']:
# early_stopping(ave_psnr, model, epoch, current_step)
# if early_stopping.is_save:
# logger.info('Saving the model by early stopping')
# model.save(f'best_{current_step}')
# if early_stopping.early_stop:
# print("Early stopping!")
# break
print("Training Stop")
if __name__ == '__main__':
main()
| 15,434 | 43.353448 | 176 | py |
SwinMR | SwinMR-main/models/model_base.py | import os
import torch
import torch.nn as nn
from utils.utils_bnorm import merge_bn, tidy_sequential
from torch.nn.parallel import DataParallel, DistributedDataParallel
class ModelBase():
def __init__(self, opt):
self.opt = opt # opt
self.save_dir = opt['path']['models'] # save models
self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
self.is_train = opt['is_train'] # training or not
self.schedulers = [] # schedulers
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
def init_train(self):
pass
def load(self):
pass
def save(self, label):
pass
def define_loss(self):
pass
def define_optimizer(self):
pass
def define_scheduler(self):
pass
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def current_visuals(self):
pass
def current_losses(self):
pass
def update_learning_rate(self, n):
for scheduler in self.schedulers:
scheduler.step(n)
def current_learning_rate(self):
return self.schedulers[0].get_last_lr()[0]
def requires_grad(self, model, flag=True):
for p in model.parameters():
p.requires_grad = flag
"""
# ----------------------------------------
# Information of net
# ----------------------------------------
"""
def print_network(self):
pass
def info_network(self):
pass
def print_params(self):
pass
def info_params(self):
pass
def get_bare_model(self, network):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(network, (DataParallel, DistributedDataParallel)):
network = network.module
return network
def model_to_device(self, network):
"""Model to device. It also warps models with DistributedDataParallel
or DataParallel.
Args:
network (nn.Module)
"""
network = network.to(self.device)
if self.opt['dist']:
find_unused_parameters = self.opt['find_unused_parameters']
network = DistributedDataParallel(network, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
else:
network = DataParallel(network)
return network
# ----------------------------------------
# network name and number of parameters
# ----------------------------------------
def describe_network(self, network):
network = self.get_bare_model(network)
msg = '\n'
msg += 'Networks name: {}'.format(network.__class__.__name__) + '\n'
msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), network.parameters()))) + '\n'
msg += 'Net structure:\n{}'.format(str(network)) + '\n'
return msg
# ----------------------------------------
# parameters description
# ----------------------------------------
def describe_params(self, network):
network = self.get_bare_model(network)
msg = '\n'
msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n'
for name, param in network.state_dict().items():
if not 'num_batches_tracked' in name:
v = param.data.clone().float()
msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n'
return msg
"""
# ----------------------------------------
# Save prameters
# Load prameters
# ----------------------------------------
"""
# ----------------------------------------
# save the state_dict of the network
# ----------------------------------------
def save_network(self, save_dir, network, network_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(save_dir, save_filename)
network = self.get_bare_model(network)
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
# ----------------------------------------
# load the state_dict of the network
# ----------------------------------------
def load_network(self, load_path, network, strict=True, param_key='params'):
network = self.get_bare_model(network)
if strict:
state_dict = torch.load(load_path)
if param_key in state_dict.keys():
state_dict = state_dict[param_key]
network.load_state_dict(state_dict, strict=strict)
else:
state_dict_old = torch.load(load_path)
if param_key in state_dict_old.keys():
state_dict_old = state_dict_old[param_key]
state_dict = network.state_dict()
for ((key_old, param_old), (key, param)) in zip(state_dict_old.items(), state_dict.items()):
state_dict[key] = param_old
network.load_state_dict(state_dict, strict=True)
del state_dict_old, state_dict
# ----------------------------------------
# save the state_dict of the optimizer
# ----------------------------------------
def save_optimizer(self, save_dir, optimizer, optimizer_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, optimizer_label)
save_path = os.path.join(save_dir, save_filename)
torch.save(optimizer.state_dict(), save_path)
# ----------------------------------------
# load the state_dict of the optimizer
# ----------------------------------------
def load_optimizer(self, load_path, optimizer):
optimizer.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device())))
def update_E(self, decay=0.999):
netG = self.get_bare_model(self.netG)
netG_params = dict(netG.named_parameters())
netE_params = dict(self.netE.named_parameters())
for k in netG_params.keys():
netE_params[k].data.mul_(decay).add_(netG_params[k].data, alpha=1-decay)
"""
# ----------------------------------------
# Merge Batch Normalization for training
# Merge Batch Normalization for testing
# ----------------------------------------
"""
# ----------------------------------------
# merge bn during training
# ----------------------------------------
def merge_bnorm_train(self):
merge_bn(self.netG)
tidy_sequential(self.netG)
self.define_optimizer()
self.define_scheduler()
# ----------------------------------------
# merge bn before testing
# ----------------------------------------
def merge_bnorm_test(self):
merge_bn(self.netG)
tidy_sequential(self.netG)
| 7,442 | 33.299539 | 148 | py |
SwinMR | SwinMR-main/models/select_network.py | '''
# -----------------------------------------
Define Training Network
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import functools
import torch
import torchvision.models
from torch.nn import init
# --------------------------------------------
# Recon Generator, netG, G
# --------------------------------------------
def define_G(opt):
opt_net = opt['netG']
net_type = opt_net['net_type']
# ----------------------------------------
# SwinIR (for SwinMR)
# ----------------------------------------
if net_type == 'swinir':
from models.network_swinmr import SwinIR as net
netG = net(img_size=opt_net['img_size'],
in_chans=opt_net['in_chans'],
out_chans=opt_net['out_chans'],
embed_dim=opt_net['embed_dim'],
depths=opt_net['depths'],
num_heads=opt_net['num_heads'],
window_size=opt_net['window_size'],
mlp_ratio=opt_net['mlp_ratio'],
upscale=opt_net['upscale'],
img_range=opt_net['img_range'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'])
# ----------------------------------------
# initialize weights
# ----------------------------------------
if opt['is_train']:
init_weights(netG,
init_type=opt_net['init_type'],
init_bn_type=opt_net['init_bn_type'],
gain=opt_net['init_gain'])
return netG
# --------------------------------------------
# VGGfeature, netF, F
# --------------------------------------------
def define_F(opt, use_bn=False):
device = torch.device('cuda' if opt['gpu_ids'] else 'cpu')
from models.network_feature import VGGFeatureExtractor
# pytorch pretrained VGG19-54, before ReLU.
if use_bn:
feature_layer = 49
else:
feature_layer = 34
netF = VGGFeatureExtractor(feature_layer=feature_layer,
use_bn=use_bn,
use_input_norm=True,
device=device)
netF.eval() # No need to train, but need BP to input
return netF
# --------------------------------------------
# weights initialization
# --------------------------------------------
def init_weights(net, init_type='xavier_uniform', init_bn_type='uniform', gain=1):
"""
# Kai Zhang, https://github.com/cszn/KAIR
#
# Args:
# init_type:
# default, none: pass init_weights
# normal; normal; xavier_normal; xavier_uniform;
# kaiming_normal; kaiming_uniform; orthogonal
# init_bn_type:
# uniform; constant
# gain:
# 0.2
"""
def init_fn(m, init_type='xavier_uniform', init_bn_type='uniform', gain=1):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
if init_type == 'normal':
init.normal_(m.weight.data, 0, 0.1)
m.weight.data.clamp_(-1, 1).mul_(gain)
elif init_type == 'uniform':
init.uniform_(m.weight.data, -0.2, 0.2)
m.weight.data.mul_(gain)
elif init_type == 'xavier_normal':
init.xavier_normal_(m.weight.data, gain=gain)
m.weight.data.clamp_(-1, 1)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=gain)
elif init_type == 'kaiming_normal':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data.clamp_(-1, 1).mul_(gain)
elif init_type == 'kaiming_uniform':
init.kaiming_uniform_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data.mul_(gain)
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('Initialization method [{:s}] is not implemented'.format(init_type))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm2d') != -1:
if init_bn_type == 'uniform': # preferred
if m.affine:
init.uniform_(m.weight.data, 0.1, 1.0)
init.constant_(m.bias.data, 0.0)
elif init_bn_type == 'constant':
if m.affine:
init.constant_(m.weight.data, 1.0)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError('Initialization method [{:s}] is not implemented'.format(init_bn_type))
if init_type not in ['default', 'none']:
print('Initialization method [{:s} + {:s}], gain is [{:.2f}]'.format(init_type, init_bn_type, gain))
fn = functools.partial(init_fn, init_type=init_type, init_bn_type=init_bn_type, gain=gain)
net.apply(fn)
else:
print('Pass this initialization! Initialization was done during network defination!')
| 5,220 | 35.006897 | 113 | py |
SwinMR | SwinMR-main/models/network_swinmr.py | '''
# -----------------------------------------
Network
SwinMR m.1.3
by Jiahao Huang (j.huang21@imperial.ac.uk)
Thanks:
https://github.com/JingyunLiang/SwinIR
https://github.com/microsoft/Swin-Transformer
# -----------------------------------------
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
'''
Multilayer Perceptron
'''
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads # number of head 6
head_dim = dim // num_heads # head_dim: 180//6=30
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape # B_: 576 number of Windows * Batch_size in a GPU N: 64 patch number in a window C: 180 embedding channel
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# q,k,v (576,6,64,30) (number of Windows * Batch_size in a GPU, number of head, patch number in a window, head_dim)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
def params(self):
# calculate params for 1 window with token length of N
params = 0
# qkv = self.qkv(x)
params += self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
params += 0
# x = (attn @ v)
params += 0
# x = self.proj(x)
params += self.dim * self.dim
return params
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x, x_size):
H, W = x_size # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel) x_size (96,96)
B, L, C = x.shape # B:4 C:180 L:9216
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x) # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel)
x = x.view(B, H, W, C) # x (4,96,96,180) (batch_in_each_GPU, embedding_channel, H, W)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x # shifted_x (4,96,96,180) (batch_in_each_GPU, embedding_channel, H, W)
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # (576,8,8,180) (nW*B, window_size, window_size, C) nW:number of Windows
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # (576,64,180) (nW*B, window_size*window_size, C) nW:number of Windows
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) # (576,64,180) (nW*B, window_size*window_size, C) nW:number of Windows
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # (576,8,8,180) (nW*B, window_size, window_size, C) nW:number of Windows
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C shifted_x (4,96,96,180) (batch_in_each_GPU, embedding_channel, H, W)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C) # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel) x_size (96,96)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel) x_size (96,96)
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
def params(self):
params = 0
# norm1
params += self.dim * 2
# W-MSA/SW-MSA
params += self.attn.params()
# mlp
params += 2 * self.dim * self.dim * self.mlp_ratio
# norm2
params += self.dim * 2
return params
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
def params(self):
params = 2 * self.dim
params += 4 * self.dim * 2 * self.dim
return params
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, x_size):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, x_size)
else:
x = blk(x, x_size)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
def params(self):
params = 0
for blk in self.blocks:
params += blk.params()
if self.downsample is not None:
params += self.downsample.params()
return params
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
img_size: Input image size.
patch_size: Patch size.
resi_connection: The convolutional block before residual connection.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
img_size=224, patch_size=4, resi_connection='1conv'):
super(RSTB, self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.residual_group = BasicLayer(dim=dim,
input_resolution=input_resolution,
depth=depth,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint)
if resi_connection == '1conv':
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif resi_connection == '3conv':
# to save parameters and memory
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1))
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
norm_layer=None)
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
norm_layer=None)
def forward(self, x, x_size):
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
def flops(self):
flops = 0
flops += self.residual_group.flops()
H, W = self.input_resolution
flops += H * W * self.dim * self.dim * 9
flops += self.patch_embed.flops()
flops += self.patch_unembed.flops()
return flops
def params(self):
params = 0
params += self.residual_group.params()
params += self.dim * self.dim * 9
params += self.patch_embed.params()
params += self.patch_unembed.params()
return params
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
flops = 0
H, W = self.img_size
if self.norm is not None:
flops += H * W * self.embed_dim
return flops
def params(self):
params = 0
if self.norm is not None:
params += 2 * self.embed_dim
return params
class PatchUnEmbed(nn.Module):
r""" Image to Patch Unembedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
def forward(self, x, x_size):
B, HW, C = x.shape
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return x
def flops(self):
flops = 0
return flops
def params(self):
params = 0
return params
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
def params(self):
params = self.num_feat * 3 * 9
return params
class SwinIR(nn.Module):
r""" SwinIR
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
Args:
img_size (int | tuple(int)): Input image size. Default 64
patch_size (int | tuple(int)): Patch size. Default: 1
in_chans (int): Number of input image channels. Default: 3
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range: Image range. 1. or 255.
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
"""
def __init__(self, img_size=64, patch_size=1, in_chans=1,
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=1, img_range=1., upsampler='', resi_connection='1conv',
**kwargs):
super(SwinIR, self).__init__()
num_in_ch = in_chans # 1
num_out_ch = in_chans # 1
num_feat = 64
self.img_range = img_range # 1.0
if in_chans == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
self.upscale = upscale # 1
self.upsampler = upsampler # None
self.window_size = window_size # 8
#####################################################################################################
################################### 1, shallow feature extraction ###################################
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) # in 1 out 180
#####################################################################################################
################################### 2, deep feature extraction ######################################
self.num_layers = len(depths) # [6,6,6,6,6,6]
self.embed_dim = embed_dim # 180
self.ape = ape # False
self.patch_norm = patch_norm # True
self.num_features = embed_dim # 180
self.mlp_ratio = mlp_ratio # 2?
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches # num_patcher 65536
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# merge non-overlapping patches into image
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build Residual Swin Transformer blocks (RSTB)
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = RSTB(dim=embed_dim,
input_resolution=(patches_resolution[0],
patches_resolution[1]),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
downsample=None,
use_checkpoint=use_checkpoint,
img_size=img_size,
patch_size=patch_size,
resi_connection=resi_connection
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
if resi_connection == '1conv':
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
elif resi_connection == '3conv':
# to save parameters and memory
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == 'pixelshuffle':
# for classical SR
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
(patches_resolution[0], patches_resolution[1]))
elif self.upsampler == 'nearest+conv':
# for real-world SR (less artifacts)
assert self.upscale == 4, 'only support x4 now.'
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_features(self, x):
x_size = (x.shape[2], x.shape[3]) # x (4,180,96,96) (batch_size_in_each_GPU, embedding_channel180, H (random-crop 96 in traning and 256 in testing), W) x_size (96,96)
x = self.patch_embed(x) # x (4,9216,180) (batch_size_in_each_GPU, H*W, embedding_channel180)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x, x_size)
x = self.norm(x) # B L C # x (4,9216,180) (batch_size_in_each_GPU, H*W, embedding_channel180)
x = self.patch_unembed(x, x_size) # x (4,180,96,96) (batch_size_in_each_GPU, embedding_channel180, H (random-crop 96 in traning and 256 in testing), W) x_size (96,96)
return x
def forward(self, x):
H, W = x.shape[2:] # x (4,1,96,96) (batch_size_in_each_GPU, input_image_channel, H (random-crop 96 in traning and 256 in testing), W)
x = self.check_image_size(x)
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
if self.upsampler == 'pixelshuffle':
# for classical SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.upsample(x)
elif self.upsampler == 'nearest+conv':
# for real-world SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
# for image denoising and JPEG compression artifact reduction
x_first = self.conv_first(x) # x_first (4,180,96,96) (batch_size_in_each_GPU, embedding_channel180, H (random-crop 96 in traning and 256 in testing), W)
res = self.conv_after_body(self.forward_features(x_first)) + x_first # res (4,180,96,96)
x = x + self.conv_last(res)
x = x / self.img_range + self.mean
return x[:, :, :H*self.upscale, :W*self.upscale]
def flops(self):
flops = 0
H, W = self.patches_resolution
flops += H * W * 1 * self.embed_dim * 9
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += H * W * self.embed_dim * self.embed_dim * 9
flops += H * W * self.embed_dim * 1 * 9
return flops
def params(self):
params = 0
params += 1 * self.embed_dim * 9
params += self.patch_embed.params()
for i, layer in enumerate(self.layers):
params += layer.params()
params += self.embed_dim * self.embed_dim * 9
params += self.embed_dim * 1 * 9
return params
if __name__ == '__main__':
from thop import profile
from thop import clever_format
import os
batch = 1
height = 256
width = 256
device = 'cuda'
torch.cuda.empty_cache()
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
print('swinmr')
model = SwinIR(upscale=1,
in_chans=1,
img_size=[256, 256],
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2.0,
upsampler='',
resi_connection='1conv',).to(device)
# print(model)
# print('FLOPs: {}G'.format(round((model.flops() * 1e-9),3)))
# print('PARAMs: {}M'.format(round((model.params() * 1e-6), 3)))
x = torch.randn((batch, 1, height, width)).to(device)
print(f'Input shape: {x.shape}')
with torch.no_grad():
x = model(x)
print(f'Output shape: {x.shape}')
print('-------------------------------')
# macs, params = profile(model, inputs=(x, ))
# macs, params = clever_format([macs, params], "%.3f")
# print(macs)
# print(params) | 41,096 | 41.631743 | 175 | py |
SwinMR | SwinMR-main/models/loss.py | import torch
import torch.nn as nn
import torchvision
from torch.nn import functional as F
from torch import autograd as autograd
import math
"""
Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2*): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace)
(7*): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace)
(16*): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(17): ReLU(inplace)
(18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace)
(23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(24): ReLU(inplace)
(25*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(26): ReLU(inplace)
(27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace)
(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(31): ReLU(inplace)
(32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(33): ReLU(inplace)
(34*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(35): ReLU(inplace)
(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
"""
# --------------------------------------------
# Perceptual loss
# --------------------------------------------
class VGGFeatureExtractor(nn.Module):
def __init__(self, feature_layer=[2,7,16,25,34], use_input_norm=True, use_range_norm=False):
super(VGGFeatureExtractor, self).__init__()
'''
use_input_norm: If True, x: [0, 1] --> (x - mean) / std
use_range_norm: If True, x: [0, 1] --> x: [-1, 1]
'''
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
self.use_range_norm = use_range_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.list_outputs = isinstance(feature_layer, list)
if self.list_outputs:
self.features = nn.Sequential()
feature_layer = [-1] + feature_layer
for i in range(len(feature_layer)-1):
self.features.add_module('child'+str(i), nn.Sequential(*list(model.features.children())[(feature_layer[i]+1):(feature_layer[i+1]+1)]))
else:
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
print(self.features)
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_range_norm:
x = (x + 1.0) / 2.0
if self.use_input_norm:
x = (x - self.mean) / self.std
if self.list_outputs:
output = []
for child_model in self.features.children():
x = child_model(x)
output.append(x.clone())
return output
else:
return self.features(x)
class PerceptualLoss(nn.Module):
"""VGG Perceptual loss
"""
def __init__(self, feature_layer=[2,7,16,25,34], weights=[0.1,0.1,1.0,1.0,1.0], lossfn_type='l1', use_input_norm=True, use_range_norm=False):
super(PerceptualLoss, self).__init__()
self.vgg = VGGFeatureExtractor(feature_layer=feature_layer, use_input_norm=use_input_norm, use_range_norm=use_range_norm)
self.lossfn_type = lossfn_type
self.weights = weights
if self.lossfn_type == 'l1':
self.lossfn = nn.L1Loss()
else:
self.lossfn = nn.MSELoss()
print(f'feature_layer: {feature_layer} with weights: {weights}')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x_vgg, gt_vgg = self.vgg(x), self.vgg(gt.detach())
loss = 0.0
if isinstance(x_vgg, list):
n = len(x_vgg)
for i in range(n):
loss += self.weights[i] * self.lossfn(x_vgg[i], gt_vgg[i])
else:
loss += self.lossfn(x_vgg, gt_vgg.detach())
return loss
# --------------------------------------------
# GAN loss: gan, ragan
# --------------------------------------------
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'gan' or self.gan_type == 'ragan':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
elif self.gan_type == 'softplusgan':
def softplusgan_loss(input, target):
# target is boolean
return F.softplus(-input).mean() if target else F.softplus(input).mean()
self.loss = softplusgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type in ['wgan', 'softplusgan']:
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
# --------------------------------------------
# TV loss
# --------------------------------------------
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
"""
Total variation loss
https://github.com/jxgu1016/Total_Variation_Loss.pytorch
Args:
tv_loss_weight (int):
"""
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
# --------------------------------------------
# Charbonnier loss
# --------------------------------------------
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-9):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.mean(torch.sqrt((diff * diff) + self.eps))
return loss
def r1_penalty(real_pred, real_img):
"""R1 regularization for discriminator. The core idea is to
penalize the gradient on real data alone: when the
generator distribution produces the true data distribution
and the discriminator is equal to 0 on the data manifold, the
gradient penalty ensures that the discriminator cannot create
a non-zero gradient orthogonal to the data manifold without
suffering a loss in the GAN game.
Ref:
Eq. 9 in Which training methods for GANs do actually converge.
"""
grad_real = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3])
grad = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0]
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (
path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_lengths.detach().mean(), path_mean.detach()
def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
weight (Tensor): Weight tensor. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1))
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if weight is not None:
gradients = gradients * weight
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if weight is not None:
gradients_penalty /= torch.mean(weight)
return gradients_penalty
# PyTorch
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
def mask_to_onehot(net_output, gt):
"""
net_output must be (b, c, x, y(, z)))
mask with shape (b, 1, x, y(, z)) OR shape (b, x, y(, z)))
"""
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
# print(y_onehot)
return y_onehot
class DiceLoss(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.shape == target.shape, 'predict & target shape do not match'
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
# predict = F.softmax(predict, dim=1)
# predict = F.sigmoid(predict, dim=1)
for i in range(target.shape[1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[:, i])
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1], \
'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss/target.shape[1] | 14,821 | 37.299742 | 150 | py |
SwinMR | SwinMR-main/models/network_feature.py | import torch
import torch.nn as nn
import torchvision
"""
# --------------------------------------------
# VGG Feature Extractor
# --------------------------------------------
"""
# --------------------------------------------
# VGG features
# Assume input range is [0, 1]
# --------------------------------------------
class VGGFeatureExtractor(nn.Module):
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True,
device=torch.device('cpu')):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
| 1,594 | 32.93617 | 93 | py |
SwinMR | SwinMR-main/models/basicblock.py | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
# --------------------------------------------
# Advanced nn.Sequential
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
'''
# --------------------------------------------
# Useful blocks
# https://github.com/xinntao/BasicSR
# --------------------------------
# conv + normaliation + relu (conv)
# (PixelUnShuffle)
# (ConditionalBatchNorm2d)
# concat (ConcatBlock)
# sum (ShortcutBlock)
# resblock (ResBlock)
# Channel Attention (CA) Layer (CALayer)
# Residual Channel Attention Block (RCABlock)
# Residual Channel Attention Group (RCAGroup)
# Residual Dense Block (ResidualDenseBlock_5C)
# Residual in Residual Dense Block (RRDB)
# --------------------------------------------
'''
# --------------------------------------------
# return nn.Sequantial of (Conv + BN + ReLU)
# --------------------------------------------
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=1e-04, affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False))
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0))
else:
raise NotImplementedError('Undefined type: '.format(t))
return sequential(*L)
# --------------------------------------------
# inverse of pixel_shuffle
# --------------------------------------------
def pixel_unshuffle(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:`(C, rH, rW)` to a
tensor of shape :math:`(*, r^2C, H, W)`.
Authors:
Zhaoyi Yan, https://github.com/Zhaoyi-Yan
Kai Zhang, https://github.com/cszn/FFDNet
Date:
01/Jan/2019
"""
batch_size, channels, in_height, in_width = input.size()
out_height = in_height // upscale_factor
out_width = in_width // upscale_factor
input_view = input.contiguous().view(
batch_size, channels, out_height, upscale_factor,
out_width, upscale_factor)
channels *= upscale_factor ** 2
unshuffle_out = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return unshuffle_out.view(batch_size, channels, out_height, out_width)
class PixelUnShuffle(nn.Module):
r"""Rearranges elements in a Tensor of shape :math:`(C, rH, rW)` to a
tensor of shape :math:`(*, r^2C, H, W)`.
Authors:
Zhaoyi Yan, https://github.com/Zhaoyi-Yan
Kai Zhang, https://github.com/cszn/FFDNet
Date:
01/Jan/2019
"""
def __init__(self, upscale_factor):
super(PixelUnShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
return pixel_unshuffle(input, self.upscale_factor)
def extra_repr(self):
return 'upscale_factor={}'.format(self.upscale_factor)
# --------------------------------------------
# conditional batch norm
# https://github.com/pytorch/pytorch/issues/8985#issuecomment-405080775
# --------------------------------------------
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
def forward(self, x, y):
out = self.bn(x)
gamma, beta = self.embed(y).chunk(2, 1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
# --------------------------------------------
# Concat the output of a submodule to its input
# --------------------------------------------
class ConcatBlock(nn.Module):
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
return self.sub.__repr__() + 'concat'
# --------------------------------------------
# sum the output of a submodule to its input
# --------------------------------------------
class ShortcutBlock(nn.Module):
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
# --------------------------------------------
# Res Block: x + conv(relu(conv(x)))
# --------------------------------------------
class ResBlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', negative_slope=0.2):
super(ResBlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R', 'L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
def forward(self, x):
res = self.res(x)
return x + res
# --------------------------------------------
# simplified information multi-distillation block (IMDB)
# x + conv1(concat(split(relu(conv(x)))x3))
# --------------------------------------------
class IMDBlock(nn.Module):
"""
@inproceedings{hui2019lightweight,
title={Lightweight Image Super-Resolution with Information Multi-distillation Network},
author={Hui, Zheng and Gao, Xinbo and Yang, Yunchu and Wang, Xiumei},
booktitle={Proceedings of the 27th ACM International Conference on Multimedia (ACM MM)},
pages={2024--2032},
year={2019}
}
@inproceedings{zhang2019aim,
title={AIM 2019 Challenge on Constrained Super-Resolution: Methods and Results},
author={Kai Zhang and Shuhang Gu and Radu Timofte and others},
booktitle={IEEE International Conference on Computer Vision Workshops},
year={2019}
}
"""
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.25, negative_slope=0.05):
super(IMDBlock, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = int(in_channels - self.d_nc)
assert mode[0] == 'C', 'convolutional layer first'
self.conv1 = conv(in_channels, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv2 = conv(self.r_nc, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv3 = conv(self.r_nc, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv4 = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias, mode[0], negative_slope)
self.conv1x1 = conv(self.d_nc*4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0], negative_slope=negative_slope)
def forward(self, x):
d1, r1 = torch.split(self.conv1(x), (self.d_nc, self.r_nc), dim=1)
d2, r2 = torch.split(self.conv2(r1), (self.d_nc, self.r_nc), dim=1)
d3, r3 = torch.split(self.conv3(r2), (self.d_nc, self.r_nc), dim=1)
d4 = self.conv4(r3)
res = self.conv1x1(torch.cat((d1, d2, d3, d4), dim=1))
return x + res
# --------------------------------------------
# Enhanced Spatial Attention (ESA)
# --------------------------------------------
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
# -->conv3x3(conv21)-----------------------------------------------------------------------------------------+
# conv1x1(conv1)-->conv3x3-2(conv2)-->maxpool7-3-->conv3x3(conv3)(relu)-->conv3x3(conv4)(relu)-->conv3x3(conv5)-->bilinear--->conv1x1(conv6)-->sigmoid
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3) # 1/6
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode='bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
# return x.mul_(self.sigmoid(x2))
class CFRB(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.5, negative_slope=0.05):
super(CFRB, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels # int(in_channels - self.d_nc)
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc*4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, x):
d1 = self.conv1_d(x)
x = self.act(self.conv1_r(x)+x)
d2 = self.conv2_d(x)
x = self.act(self.conv2_r(x)+x)
d3 = self.conv3_d(x)
x = self.act(self.conv3_r(x)+x)
x = self.conv4_d(x)
x = self.act(torch.cat([d1, d2, d3, x], dim=1))
x = self.esa(self.conv1x1(x))
return x
# --------------------------------------------
# Channel Attention (CA) Layer
# --------------------------------------------
class CALayer(nn.Module):
def __init__(self, channel=64, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_fc(y)
return x * y
# --------------------------------------------
# Residual Channel Attention Block (RCAB)
# --------------------------------------------
class RCABlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, negative_slope=0.2):
super(RCABlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.ca = CALayer(out_channels, reduction)
def forward(self, x):
res = self.res(x)
res = self.ca(res)
return res + x
# --------------------------------------------
# Residual Channel Attention Group (RG)
# --------------------------------------------
class RCAGroup(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, nb=12, negative_slope=0.2):
super(RCAGroup, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
RG = [RCABlock(in_channels, out_channels, kernel_size, stride, padding, bias, mode, reduction, negative_slope) for _ in range(nb)]
RG.append(conv(out_channels, out_channels, mode='C'))
self.rg = nn.Sequential(*RG) # self.rg = ShortcutBlock(nn.Sequential(*RG))
def forward(self, x):
res = self.rg(x)
return res + x
# --------------------------------------------
# Residual Dense Block
# style: 5 convs
# --------------------------------------------
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR', negative_slope=0.2):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel
self.conv1 = conv(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv2 = conv(nc+gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv3 = conv(nc+2*gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv4 = conv(nc+3*gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv5 = conv(nc+4*gc, nc, kernel_size, stride, padding, bias, mode[:-1], negative_slope)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(torch.cat((x, x1), 1))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5.mul_(0.2) + x
# --------------------------------------------
# Residual in Residual Dense Block
# 3x5c
# --------------------------------------------
class RRDB(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR', negative_slope=0.2):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.RDB2 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.RDB3 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out.mul_(0.2) + x
"""
# --------------------------------------------
# Upsampler
# Kai Zhang, https://github.com/cszn/KAIR
# --------------------------------------------
# upsample_pixelshuffle
# upsample_upconv
# upsample_convtranspose
# --------------------------------------------
"""
# --------------------------------------------
# conv + subp (+ relu)
# --------------------------------------------
def upsample_pixelshuffle(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
up1 = conv(in_channels, out_channels * (int(mode[0]) ** 2), kernel_size, stride, padding, bias, mode='C'+mode, negative_slope=negative_slope)
return up1
# --------------------------------------------
# nearest_upsample + conv (+ R)
# --------------------------------------------
def upsample_upconv(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR'
if mode[0] == '2':
uc = 'UC'
elif mode[0] == '3':
uc = 'uC'
elif mode[0] == '4':
uc = 'vC'
mode = mode.replace(mode[0], uc)
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode, negative_slope=negative_slope)
return up1
# --------------------------------------------
# convTranspose (+ relu)
# --------------------------------------------
def upsample_convtranspose(in_channels=64, out_channels=3, kernel_size=2, stride=2, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'T')
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
return up1
'''
# --------------------------------------------
# Downsampler
# Kai Zhang, https://github.com/cszn/KAIR
# --------------------------------------------
# downsample_strideconv
# downsample_maxpool
# downsample_avgpool
# --------------------------------------------
'''
# --------------------------------------------
# strideconv (+ relu)
# --------------------------------------------
def downsample_strideconv(in_channels=64, out_channels=64, kernel_size=2, stride=2, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'C')
down1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
return down1
# --------------------------------------------
# maxpooling + conv (+ relu)
# --------------------------------------------
def downsample_maxpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'MC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0], negative_slope=negative_slope)
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:], negative_slope=negative_slope)
return sequential(pool, pool_tail)
# --------------------------------------------
# averagepooling + conv (+ relu)
# --------------------------------------------
def downsample_avgpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'AC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0], negative_slope=negative_slope)
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:], negative_slope=negative_slope)
return sequential(pool, pool_tail)
'''
# --------------------------------------------
# NonLocalBlock2D:
# embedded_gaussian
# +W(softmax(thetaXphi)Xg)
# --------------------------------------------
'''
# --------------------------------------------
# non-local block with embedded_gaussian
# https://github.com/AlexHex7/Non-local_pytorch
# --------------------------------------------
class NonLocalBlock2D(nn.Module):
def __init__(self, nc=64, kernel_size=1, stride=1, padding=0, bias=True, act_mode='B', downsample=False, downsample_mode='maxpool', negative_slope=0.2):
super(NonLocalBlock2D, self).__init__()
inter_nc = nc // 2
self.inter_nc = inter_nc
self.W = conv(inter_nc, nc, kernel_size, stride, padding, bias, mode='C'+act_mode)
self.theta = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
if downsample:
if downsample_mode == 'avgpool':
downsample_block = downsample_avgpool
elif downsample_mode == 'maxpool':
downsample_block = downsample_maxpool
elif downsample_mode == 'strideconv':
downsample_block = downsample_strideconv
else:
raise NotImplementedError('downsample mode [{:s}] is not found'.format(downsample_mode))
self.phi = downsample_block(nc, inter_nc, kernel_size, stride, padding, bias, mode='2')
self.g = downsample_block(nc, inter_nc, kernel_size, stride, padding, bias, mode='2')
else:
self.phi = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
self.g = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_nc, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_nc, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_nc, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_nc, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
| 24,138 | 39.775338 | 160 | py |
SwinMR | SwinMR-main/models/model_swinmr_pi.py | '''
# -----------------------------------------
Model
SwinMR (PI) m.1.3
by Jiahao Huang (j.huang21@imperial.ac.uk)
Thanks:
https://github.com/JingyunLiang/SwinIR
https://github.com/microsoft/Swin-Transformer
# -----------------------------------------
'''
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam, AdamW
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss import CharbonnierLoss, PerceptualLoss
from models.loss_ssim import SSIMLoss
from utils.utils_model import test_mode
from utils.utils_regularizers import regularizer_orth, regularizer_clip
from utils.utils_swinmr import *
import matplotlib.pyplot as plt
import einops
from math import ceil
import copy
class MRI_SwinMR_PI(ModelBase):
def __init__(self, opt):
super(MRI_SwinMR_PI, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.opt_train = self.opt['train'] # training option
self.opt_dataset = self.opt['datasets']
self.netG = define_G(opt)
self.netG = self.model_to_device(self.netG)
if self.opt_train['freeze_patch_embedding']:
for para in self.netG.module.patch_embed.parameters():
para.requires_grad = False
print("Patch Embedding Frozen (Requires Grad)!")
if self.opt_train['E_decay'] > 0:
self.netE = define_G(opt).to(self.device).eval()
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.load() # load model
self.netG.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.load_optimizers() # load optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G and E model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=self.opt_train['G_param_strict'], param_key='params')
load_path_E = self.opt['path']['pretrained_netE']
if self.opt_train['E_decay'] > 0:
if load_path_E is not None:
print('Loading model for E [{:s}] ...'.format(load_path_E))
self.load_network(load_path_E, self.netE, strict=self.opt_train['E_param_strict'], param_key='params_ema')
else:
print('Copying model for E ...')
self.update_E(0)
self.netE.eval()
# ----------------------------------------
# load optimizer
# ----------------------------------------
def load_optimizers(self):
load_path_optimizerG = self.opt['path']['pretrained_optimizerG']
if load_path_optimizerG is not None and self.opt_train['G_optimizer_reuse']:
print('Loading optimizerG [{:s}] ...'.format(load_path_optimizerG))
self.load_optimizer(load_path_optimizerG, self.G_optimizer)
# ----------------------------------------
# save model / optimizer(optional)
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
if self.opt_train['E_decay'] > 0:
self.save_network(self.save_dir, self.netE, 'E', iter_label)
if self.opt_train['G_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.G_optimizer, 'optimizerG', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
G_lossfn_type = self.opt_train['G_lossfn_type']
if G_lossfn_type == 'l1':
self.G_lossfn = nn.L1Loss().to(self.device)
elif G_lossfn_type == 'l2':
self.G_lossfn = nn.MSELoss().to(self.device)
elif G_lossfn_type == 'l2sum':
self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
elif G_lossfn_type == 'ssim':
self.G_lossfn = SSIMLoss().to(self.device)
elif G_lossfn_type == 'charbonnier':
self.G_lossfn = CharbonnierLoss(self.opt_train['G_charbonnier_eps']).to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
self.G_lossfn_weight = self.opt_train['G_lossfn_weight']
self.perceptual_lossfn = PerceptualLoss().to(self.device)
def total_loss(self):
self.alpha = self.opt_train['alpha']
self.beta = self.opt_train['beta']
self.gamma = self.opt_train['gamma']
# H (1,1,256,256) ---> (1,12,256,256)
# SM (1,12,256,256)
# H_multi (1,12,256,256)
self.H_multi = torch.mul(self.SM, self.H.repeat(1, 12, 1, 1))
self.E_multi = torch.mul(self.SM, self.E.repeat(1, 12, 1, 1))
self.H_k_real, self.H_k_imag = fft_map(self.H_multi)
self.E_k_real, self.E_k_imag = fft_map(self.E_multi)
self.loss_image = self.G_lossfn(self.E_multi, self.H_multi)
self.loss_freq = (self.G_lossfn(self.E_k_real, self.H_k_real) + self.G_lossfn(self.E_k_imag, self.H_k_imag)) / 2
self.loss_perc = self.perceptual_lossfn(self.E, self.H)
return self.alpha * self.loss_image + self.beta * self.loss_freq + self.gamma * self.loss_perc
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
if self.opt_train['G_optimizer_type'] == 'adam':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = Adam(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
elif self.opt_train['G_optimizer_type'] == 'adamw':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = AdamW(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = AdamW(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
else:
raise NotImplementedError
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.H = data['H'].to(self.device)
self.L = data['L'].to(self.device)
self.SM = data['SM'].to(self.device)
# self.mask = data['mask'].to(self.device)
# ----------------------------------------
# feed L to netG
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
self.current_step = current_step
self.G_optimizer.zero_grad()
self.netG_forward()
G_loss = self.G_lossfn_weight * self.total_loss()
G_loss.backward()
# ------------------------------------
# clip_grad
# ------------------------------------
# `clip_grad_norm` helps prevent the exploding gradient problem.
G_optimizer_clipgrad = self.opt_train['G_optimizer_clipgrad'] if self.opt_train['G_optimizer_clipgrad'] else 0
if G_optimizer_clipgrad > 0:
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=self.opt_train['G_optimizer_clipgrad'], norm_type=2)
self.G_optimizer.step()
# ------------------------------------
# regularizer
# ------------------------------------
G_regularizer_orthstep = self.opt_train['G_regularizer_orthstep'] if self.opt_train['G_regularizer_orthstep'] else 0
if G_regularizer_orthstep > 0 and current_step % G_regularizer_orthstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_orth)
G_regularizer_clipstep = self.opt_train['G_regularizer_clipstep'] if self.opt_train['G_regularizer_clipstep'] else 0
if G_regularizer_clipstep > 0 and current_step % G_regularizer_clipstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_clip)
# ------------------------------------
# record log
# ------------------------------------
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
if self.opt_train['E_decay'] > 0:
self.update_E(self.opt_train['E_decay'])
def record_loss_for_val(self):
G_loss = self.G_lossfn_weight * self.total_loss()
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
def check_windowsize(self):
self.window_size = self.opt['netG']['window_size']
_, _, h_old, w_old = self.H.size()
h_pad = ceil(h_old / self.window_size) * self.window_size - h_old # downsampling for 3 times (2^3=8)
w_pad = ceil(w_old / self.window_size) * self.window_size - w_old
self.h_old = h_old
self.w_old = w_old
self.H = torch.cat([self.H, torch.flip(self.H, [2])], 2)[:, :, :h_old + h_pad, :]
self.H = torch.cat([self.H, torch.flip(self.H, [3])], 3)[:, :, :, :w_old + w_pad]
self.L = torch.cat([self.L, torch.flip(self.L, [2])], 2)[:, :, :h_old + h_pad, :]
self.L = torch.cat([self.L, torch.flip(self.L, [3])], 3)[:, :, :, :w_old + w_pad]
def recover_windowsize(self):
self.L = self.L[..., :self.h_old, :self.w_old]
self.H = self.H[..., :self.h_old, :self.w_old]
self.E = self.E[..., :self.h_old, :self.w_old]
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.netG_forward()
self.netG.train()
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H image
# ----------------------------------------
def current_visuals(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float().cpu()
out_dict['E'] = self.E.detach()[0].float().cpu()
if need_H:
out_dict['H'] = self.H.detach()[0].float().cpu()
return out_dict
def current_visuals_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float()
out_dict['E'] = self.E.detach()[0].float()
if need_H:
out_dict['H'] = self.H.detach()[0].float()
return out_dict
# ----------------------------------------
# get L, E, H batch images
# ----------------------------------------
def current_results(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float().cpu()
out_dict['E'] = self.E.detach().float().cpu()
if need_H:
out_dict['H'] = self.H.detach().float().cpu()
return out_dict
def current_results_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float()
out_dict['E'] = self.E.detach().float()
if need_H:
out_dict['H'] = self.H.detach().float()
return out_dict
"""
# ----------------------------------------
# Information of netG
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
| 14,836 | 39.649315 | 176 | py |
SwinMR | SwinMR-main/models/model_swinmr.py | '''
# -----------------------------------------
Model
SwinMR m.1.3
by Jiahao Huang (j.huang21@imperial.ac.uk)
Thanks:
https://github.com/JingyunLiang/SwinIR
https://github.com/microsoft/Swin-Transformer
# -----------------------------------------
'''
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam, AdamW
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss import CharbonnierLoss, PerceptualLoss
from models.loss_ssim import SSIMLoss
from utils.utils_model import test_mode
from utils.utils_regularizers import regularizer_orth, regularizer_clip
from utils.utils_swinmr import *
import matplotlib.pyplot as plt
import einops
from math import ceil
import copy
class MRI_SwinMR_NPI(ModelBase):
def __init__(self, opt):
super(MRI_SwinMR_NPI, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.opt_train = self.opt['train'] # training option
self.opt_dataset = self.opt['datasets']
self.netG = define_G(opt)
self.netG = self.model_to_device(self.netG)
if self.opt_train['freeze_patch_embedding']:
for para in self.netG.module.patch_embed.parameters():
para.requires_grad = False
print("Patch Embedding Frozen (Requires Grad)!")
if self.opt_train['E_decay'] > 0:
self.netE = define_G(opt).to(self.device).eval()
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.load() # load model
self.netG.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.load_optimizers() # load optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G and E model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=self.opt_train['G_param_strict'], param_key='params')
load_path_E = self.opt['path']['pretrained_netE']
if self.opt_train['E_decay'] > 0:
if load_path_E is not None:
print('Loading model for E [{:s}] ...'.format(load_path_E))
self.load_network(load_path_E, self.netE, strict=self.opt_train['E_param_strict'], param_key='params_ema')
else:
print('Copying model for E ...')
self.update_E(0)
self.netE.eval()
# ----------------------------------------
# load optimizer
# ----------------------------------------
def load_optimizers(self):
load_path_optimizerG = self.opt['path']['pretrained_optimizerG']
if load_path_optimizerG is not None and self.opt_train['G_optimizer_reuse']:
print('Loading optimizerG [{:s}] ...'.format(load_path_optimizerG))
self.load_optimizer(load_path_optimizerG, self.G_optimizer)
# ----------------------------------------
# save model / optimizer(optional)
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
if self.opt_train['E_decay'] > 0:
self.save_network(self.save_dir, self.netE, 'E', iter_label)
if self.opt_train['G_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.G_optimizer, 'optimizerG', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
G_lossfn_type = self.opt_train['G_lossfn_type']
if G_lossfn_type == 'l1':
self.G_lossfn = nn.L1Loss().to(self.device)
elif G_lossfn_type == 'l2':
self.G_lossfn = nn.MSELoss().to(self.device)
elif G_lossfn_type == 'l2sum':
self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
elif G_lossfn_type == 'ssim':
self.G_lossfn = SSIMLoss().to(self.device)
elif G_lossfn_type == 'charbonnier':
self.G_lossfn = CharbonnierLoss(self.opt_train['G_charbonnier_eps']).to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
self.G_lossfn_weight = self.opt_train['G_lossfn_weight']
self.perceptual_lossfn = PerceptualLoss().to(self.device)
def total_loss(self):
self.alpha = self.opt_train['alpha']
self.beta = self.opt_train['beta']
self.gamma = self.opt_train['gamma']
# H HR, E Recon, L LR
self.H_k_real, self.H_k_imag = fft_map(self.H)
self.E_k_real, self.E_k_imag = fft_map(self.E)
self.loss_image = self.G_lossfn(self.E, self.H)
self.loss_freq = (self.G_lossfn(self.E_k_real, self.H_k_real) + self.G_lossfn(self.E_k_imag, self.H_k_imag)) / 2
self.loss_perc = self.perceptual_lossfn(self.E, self.H)
return self.alpha * self.loss_image + self.beta * self.loss_freq + self.gamma * self.loss_perc
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
if self.opt_train['G_optimizer_type'] == 'adam':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = Adam(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
elif self.opt_train['G_optimizer_type'] == 'adamw':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = AdamW(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = AdamW(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
else:
raise NotImplementedError
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.H = data['H'].to(self.device)
self.L = data['L'].to(self.device)
# self.mask = data['mask'].to(self.device)
# ----------------------------------------
# feed L to netG
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
self.current_step = current_step
self.G_optimizer.zero_grad()
self.netG_forward()
G_loss = self.G_lossfn_weight * self.total_loss()
G_loss.backward()
# ------------------------------------
# clip_grad
# ------------------------------------
# `clip_grad_norm` helps prevent the exploding gradient problem.
G_optimizer_clipgrad = self.opt_train['G_optimizer_clipgrad'] if self.opt_train['G_optimizer_clipgrad'] else 0
if G_optimizer_clipgrad > 0:
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=self.opt_train['G_optimizer_clipgrad'], norm_type=2)
self.G_optimizer.step()
# ------------------------------------
# regularizer
# ------------------------------------
G_regularizer_orthstep = self.opt_train['G_regularizer_orthstep'] if self.opt_train['G_regularizer_orthstep'] else 0
if G_regularizer_orthstep > 0 and current_step % G_regularizer_orthstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_orth)
G_regularizer_clipstep = self.opt_train['G_regularizer_clipstep'] if self.opt_train['G_regularizer_clipstep'] else 0
if G_regularizer_clipstep > 0 and current_step % G_regularizer_clipstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_clip)
# ------------------------------------
# record log
# ------------------------------------
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
if self.opt_train['E_decay'] > 0:
self.update_E(self.opt_train['E_decay'])
def record_loss_for_val(self):
G_loss = self.G_lossfn_weight * self.total_loss()
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
def check_windowsize(self):
self.window_size = self.opt['netG']['window_size']
_, _, h_old, w_old = self.H.size()
h_pad = ceil(h_old / self.window_size) * self.window_size - h_old # downsampling for 3 times (2^3=8)
w_pad = ceil(w_old / self.window_size) * self.window_size - w_old
self.h_old = h_old
self.w_old = w_old
self.H = torch.cat([self.H, torch.flip(self.H, [2])], 2)[:, :, :h_old + h_pad, :]
self.H = torch.cat([self.H, torch.flip(self.H, [3])], 3)[:, :, :, :w_old + w_pad]
self.L = torch.cat([self.L, torch.flip(self.L, [2])], 2)[:, :, :h_old + h_pad, :]
self.L = torch.cat([self.L, torch.flip(self.L, [3])], 3)[:, :, :, :w_old + w_pad]
def recover_windowsize(self):
self.L = self.L[..., :self.h_old, :self.w_old]
self.H = self.H[..., :self.h_old, :self.w_old]
self.E = self.E[..., :self.h_old, :self.w_old]
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.netG_forward()
self.netG.train()
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H image
# ----------------------------------------
def current_visuals(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float().cpu()
out_dict['E'] = self.E.detach()[0].float().cpu()
if need_H:
out_dict['H'] = self.H.detach()[0].float().cpu()
return out_dict
def current_visuals_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float()
out_dict['E'] = self.E.detach()[0].float()
if need_H:
out_dict['H'] = self.H.detach()[0].float()
return out_dict
# ----------------------------------------
# get L, E, H batch images
# ----------------------------------------
def current_results(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float().cpu()
out_dict['E'] = self.E.detach().float().cpu()
if need_H:
out_dict['H'] = self.H.detach().float().cpu()
return out_dict
def current_results_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float()
out_dict['E'] = self.E.detach().float()
if need_H:
out_dict['H'] = self.H.detach().float()
return out_dict
"""
# ----------------------------------------
# Information of netG
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
| 14,546 | 39.520891 | 176 | py |
SwinMR | SwinMR-main/utils/utils_image.py | import os
import math
import random
import numpy as np
import torch
import cv2
from numpy import Inf
from torchvision.utils import make_grid
from datetime import datetime
# import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import skimage.metrics
import SimpleITK as sitk
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import medpy.metric
'''
# --------------------------------------------
Jiahao Huang (j.huang21@imperial.ac.uk)
https://github.com/JiahaoHuang99/MRI_Recon
# --------------------------------------------
# https://github.com/cszn
# https://github.com/twhui/SRGAN-pyTorch
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
'.ppm', '.PPM',
'.bmp', '.BMP',
'.tif', '.npy', '.mat']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def imshow(x, title=None, cbar=False, figsize=None):
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
if title:
plt.title(title)
if cbar:
plt.colorbar()
plt.show()
def surf(Z, cmap='rainbow', figsize=None):
plt.figure(figsize=figsize)
ax3 = plt.axes(projection='3d')
w, h = Z.shape[:2]
xx = np.arange(0, w, 1)
yy = np.arange(0, h, 1)
X, Y = np.meshgrid(xx, yy)
ax3.plot_surface(X, Y, Z, cmap=cmap)
# ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
plt.show()
'''
# --------------------------------------------
# get image pathes
# --------------------------------------------
'''
def get_image_paths(dataroot):
paths = None # return None if dataroot is None
if isinstance(dataroot, str):
paths = sorted(_get_paths_from_images(dataroot))
elif isinstance(dataroot, list):
paths = []
for i in dataroot:
paths += sorted(_get_paths_from_images(i))
return paths
def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
'''
# --------------------------------------------
# split large images into small images
# --------------------------------------------
'''
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
w, h = img.shape[:2]
patches = []
if w > p_max and h > p_max:
w1 = list(np.arange(0, w - p_size, p_size - p_overlap, dtype=np.int))
h1 = list(np.arange(0, h - p_size, p_size - p_overlap, dtype=np.int))
w1.append(w - p_size)
h1.append(h - p_size)
# print(w1)
# print(h1)
for i in w1:
for j in h1:
patches.append(img[i:i + p_size, j:j + p_size, :])
else:
patches.append(img)
return patches
def imssave(imgs, img_path):
"""
imgs: list, N images of size WxHxC
"""
img_name, ext = os.path.splitext(os.path.basename(img_path))
for i, img in enumerate(imgs):
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
new_path = os.path.join(os.path.dirname(img_path), img_name + str('_{:04d}'.format(i)) + '.png')
cv2.imwrite(new_path, img)
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=512, p_overlap=96, p_max=800):
"""
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
"""
paths = get_image_paths(original_dataroot)
for img_path in paths:
# img_name, ext = os.path.splitext(os.path.basename(img_path))
img = imread_uint(img_path, n_channels=n_channels)
patches = patches_from_image(img, p_size, p_overlap, p_max)
imssave(patches, os.path.join(taget_dataroot, os.path.basename(img_path)))
# if original_dataroot == taget_dataroot:
# del img_path
'''
# --------------------------------------------
# makedir
# --------------------------------------------
'''
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
'''
# --------------------------------------------
# read image from path
# opencv is fast, but read BGR numpy image
# --------------------------------------------
'''
# --------------------------------------------
# get uint8 image of size HxWxn_channles (RGB)
# --------------------------------------------
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
# --------------------------------------------
# matlab's imwrite
# --------------------------------------------
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
def imwrite(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
# --------------------------------------------
# get single image of size HxWxn_channles (BGR)
# --------------------------------------------
def read_img(path):
# read image by cv2
# return: Numpy float32, HWC, BGR, [0,1]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
'''
# --------------------------------------------
# image format conversion
# --------------------------------------------
# numpy(single) <---> numpy(uint)
# numpy(single) <---> tensor
# numpy(uint) <---> tensor
# --------------------------------------------
'''
# --------------------------------------------
# numpy(single) [0, 1] <---> numpy(uint)
# --------------------------------------------
def uint2single(img):
return np.float32(img / 255.)
def single2uint(img):
return np.uint8((img.clip(0, 1) * 255.).round())
def uint162single(img):
return np.float32(img / 65535.)
def single2uint16(img):
return np.uint16((img.clip(0, 1) * 65535.).round())
# --------------------------------------------
# numpy(uint) (HxWxC or HxW) <---> tensor
# --------------------------------------------
# convert uint to 4-dimensional torch tensor
def uint2tensor4(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
# convert uint to 3-dimensional torch tensor
def uint2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
# convert float0~1 to 3-dimensional torch tensor
def float2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert 2/3/4-dimensional torch tensor to uint0~255
def tensor2uint(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return np.uint8((img * 255.0).round())
# convert 2/3/4-dimensional torch tensor to float0~1
def tensor2float(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# --------------------------------------------
# numpy(single) (HxWxC) <---> tensor
# --------------------------------------------
# convert single (HxWxC) to 3-dimensional torch tensor
def single2tensor3(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert single (HxWxC) to 4-dimensional torch tensor
def single2tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
# convert torch tensor to single
def tensor2single(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# convert torch tensor to single
def tensor2single3(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
elif img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
def single2tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
def single32tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
def single42tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
# from skimage.io import imread, imsave
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.uint8() WILL NOT round by default.
return img_np.astype(out_type)
'''
# --------------------------------------------
# Augmentation, flipe and/or rotate
# --------------------------------------------
# The following two are enough.
# (1) augmet_img: numpy image of WxHxC or WxH
# (2) augment_img_tensor4: tensor image 1xCxWxH
# --------------------------------------------
'''
def augment_img_no_rot(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return np.flipud(img)
def augment_img(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3))
def augment_img_tensor4(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2])
def augment_img_tensor(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img)
def augment_img_np3(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.transpose(1, 0, 2)
elif mode == 2:
return img[::-1, :, :]
elif mode == 3:
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 4:
return img[:, ::-1, :]
elif mode == 5:
img = img[:, ::-1, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 6:
img = img[:, ::-1, :]
img = img[::-1, :, :]
return img
elif mode == 7:
img = img[:, ::-1, :]
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
def augment_imgs(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
'''
# --------------------------------------------
# modcrop and shave
# --------------------------------------------
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def shave(img_in, border=0):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
h, w = img.shape[:2]
img = img[border:h - border, border:w - border]
return img
'''
# --------------------------------------------
# image processing process on numpy image
# channel_convert(in_c, tar_type, img_list):
# rgb2ycbcr(img, only_y=True):
# bgr2ycbcr(img, only_y=True):
# ycbcr2rgb(img):
# --------------------------------------------
'''
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
rlt = np.clip(rlt, 0, 255)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
'''
# --------------------------------------------
# metric, PSNR, SSIM and PSNRB
# --------------------------------------------
'''
# --------------------------------------------
# PSNR
# --------------------------------------------
# def calculate_psnr(img1, img2, border=0):
# # img1 and img2 have range [0, 255]
# # img1 = img1.squeeze()
# # img2 = img2.squeeze()
# if not img1.shape == img2.shape:
# raise ValueError('Input images must have the same dimensions.')
# h, w = img1.shape[:2]
# img1 = img1[border:h - border, border:w - border]
# img2 = img2[border:h - border, border:w - border]
#
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# mse = np.mean((img1 - img2) ** 2)
# if mse == 0:
# return float('inf')
# return 20 * math.log10(255.0 / math.sqrt(mse))
def calculate_psnr_single(img1, img2, border=0):
# img1 = np.clip(img1.squeeze(), -1, 1)
# img2 = np.clip(img2.squeeze(), -1, 1)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h - border, border:w - border]
img2 = img2[border:h - border, border:w - border]
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# gt recon
return skimage.metrics.peak_signal_noise_ratio(img1, img2)
# --------------------------------------------
# SSIM
# --------------------------------------------
# def ssim(img1, img2):
# C1 = (0.01 * 255)**2
# C2 = (0.03 * 255)**2
#
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# kernel = cv2.getGaussianKernel(11, 1.5)
# window = np.outer(kernel, kernel.transpose())
#
# mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
# mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
# mu1_sq = mu1**2
# mu2_sq = mu2**2
# mu1_mu2 = mu1 * mu2
# sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
# sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
# sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
#
# ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
# (sigma1_sq + sigma2_sq + C2))
# return ssim_map.mean()
# def calculate_ssim(img1, img2, border=0):
#
# #img1 = img1.squeeze()
# #img2 = img2.squeeze()
# if not img1.shape == img2.shape:
# raise ValueError('Input images must have the same dimensions.')
# h, w = img1.shape[:2]
# img1 = img1[border:h-border, border:w-border]
# img2 = img2[border:h-border, border:w-border]
#
# if img1.ndim == 2:
# return ssim(img1, img2)
# elif img1.ndim == 3:
# if img1.shape[2] == 3:
# ssims = []
# for i in range(3):
# ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
# return np.array(ssims).mean()
# elif img1.shape[2] == 1:
# return ssim(np.squeeze(img1), np.squeeze(img2))
# else:
# raise ValueError('Wrong input image dimensions.')
def calculate_ssim_single(img1, img2, border=0):
# img1 = img1.squeeze()
# img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h - border, border:w - border]
img2 = img2[border:h - border, border:w - border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
return skimage.metrics.structural_similarity(img1, img2)
# --------------------------------------------
# LPIPS
# --------------------------------------------
def calculate_lpips_single(func, img1, img2):
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
# -1 ~ 1
img1 = (img1 * 2 - 1)
img2 = (img2 * 2 - 1)
return func(img1, img2)
def _blocking_effect_factor(im):
block_size = 8
block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8)
block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8)
horizontal_block_difference = (
(im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum(
3).sum(2).sum(1)
vertical_block_difference = (
(im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum(
2).sum(1)
nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions)
nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions)
horizontal_nonblock_difference = (
(im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum(
3).sum(2).sum(1)
vertical_nonblock_difference = (
(im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum(
3).sum(2).sum(1)
n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1)
n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1)
boundary_difference = (horizontal_block_difference + vertical_block_difference) / (
n_boundary_horiz + n_boundary_vert)
n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz
n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert
nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / (
n_nonboundary_horiz + n_nonboundary_vert)
scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]]))
bef = scaler * (boundary_difference - nonboundary_difference)
bef[boundary_difference <= nonboundary_difference] = 0
return bef
def calculate_psnrb(img1, img2, border=0):
"""Calculate PSNR-B (Peak Signal-to-Noise Ratio).
Ref: Quality assessment of deblocked images, for JPEG image deblocking evaluation
# https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
img1, img2 = np.expand_dims(img1, 2), np.expand_dims(img2, 2)
h, w = img1.shape[:2]
img1 = img1[border:h - border, border:w - border]
img2 = img2[border:h - border, border:w - border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
# follow https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255.
img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255.
total = 0
for c in range(img1.shape[1]):
mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none')
bef = _blocking_effect_factor(img1[:, c:c + 1, :, :])
mse = mse.view(mse.shape[0], -1).mean(1)
total += 10 * torch.log10(1 / (mse + bef))
return float(total) / img1.shape[1]
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx ** 2
absx3 = absx ** 3
return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (((absx > 1) * (absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
# ------------------------------------------
# Segmentation Metrics
# ------------------------------------------
def get_dice_medpy(res, ref):
# TODO: SUPPORT 3D DATA
res = np.uint8(res)
ref = np.uint8(ref)
if res.sum() > 0 and ref.sum() > 0:
dice = medpy.metric.binary.dc(res, ref)
elif res.sum() == 0 and ref.sum() == 0:
dice = 1
else:
dice = 0
return dice
def get_hd_medpy(res, ref):
# TODO: SUPPORT 3D DATA
res = np.uint8(res)
ref = np.uint8(ref)
if res.sum() > 0 and ref.sum() > 0:
hd = medpy.metric.binary.hd(res, ref)
elif res.sum() == 0 and ref.sum() == 0:
hd = 0
else:
hd = Inf
return hd
# def get_dice_2d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# dicecomputer = sitk.LabelOverlapMeasuresImageFilter()
# # dicecomputer.Execute(labelTrue, labelPred)
# dicecomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# dice = dicecomputer.GetDiceCoefficient()
#
# return dice
#
# def get_dice_3d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# dicecomputer = sitk.LabelOverlapMeasuresImageFilter()
# # dicecomputer.Execute(labelTrue, labelPred)
# dicecomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# dice = dicecomputer.GetDiceCoefficient()
#
# return dice
#
# def get_iou_2d(pred, gt):
# EPSILON = 1e-7
# dims = (0, *range(1, len(pred.shape)))
# intersection = pred * gt
# union = pred + gt - intersection
# iou = (np.sum(intersection) + EPSILON) / (np.sum(union) + EPSILON)
#
# return iou
#
# def get_iou_3d(pred, gt):
# EPSILON = 1e-7
# dims = (0, *range(1, len(pred.shape)))
# intersection = pred * gt
# union = pred + gt - intersection
# iou = (np.sum(intersection) + EPSILON) / (np.sum(union) + EPSILON)
#
# return iou
#
#
# def get_hausdorff_2d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# hausdorffcomputer = sitk.HausdorffDistanceImageFilter()
# # hausdorffcomputer.Execute(labelTrue, labelPred)
# hausdorffcomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# avgHausdorff = hausdorffcomputer.GetAverageHausdorffDistance()
# Hausdorff = hausdorffcomputer.GetHausdorffDistance()
#
# return avgHausdorff, Hausdorff
#
# def get_hausdorff_3d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# hausdorffcomputer = sitk.HausdorffDistanceImageFilter()
# # hausdorffcomputer.Execute(labelTrue, labelPred)
# hausdorffcomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# avgHausdorff = hausdorffcomputer.GetAverageHausdorffDistance()
# Hausdorff = hausdorffcomputer.GetHausdorffDistance()
#
# return avgHausdorff, Hausdorff
if __name__ == '__main__':
img = imread_uint('test.bmp', 3)
# img = uint2single(img)
# img_bicubic = imresize_np(img, 1/4)
# imshow(single2uint(img_bicubic))
#
# img_tensor = single2tensor4(img)
# for i in range(8):
# imshow(np.concatenate((augment_img(img, i), tensor2single(augment_img_tensor4(img_tensor, i))), 1))
# patches = patches_from_image(img, p_size=128, p_overlap=0, p_max=200)
# imssave(patches,'a.png')
| 38,657 | 31.595278 | 120 | py |
SwinMR | SwinMR-main/utils/utils_dist.py | # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
# ----------------------------------
# init
# ----------------------------------
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None):
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
# specify master port
if port is not None:
os.environ['MASTER_PORT'] = str(port)
elif 'MASTER_PORT' in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ['MASTER_PORT'] = '29500'
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
# ----------------------------------
# get rank and world_size
# ----------------------------------
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
# ----------------------------------
# operation across ranks
# ----------------------------------
def reduce_sum(tensor):
if not dist.is_available():
return tensor
if not dist.is_initialized():
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor
def gather_grad(params):
world_size = get_world_size()
if world_size == 1:
return
for param in params:
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data.div_(world_size)
def all_gather(data):
world_size = get_world_size()
if world_size == 1:
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.IntTensor([tensor.numel()]).to('cuda')
size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
| 5,275 | 25.118812 | 102 | py |
SwinMR | SwinMR-main/utils/utils_swinmr.py | import torch
from torch import nn
import os
import cv2
import gc
import numpy as np
from scipy.io import *
from scipy.fftpack import *
"""
# --------------------------------------------
# Jiahao Huang (j.huang21@imperial.uk.ac)
# 30/Jan/2022
# --------------------------------------------
"""
# Fourier Transform
def fft_map(x):
fft_x = torch.fft.fftn(x)
fft_x_real = fft_x.real
fft_x_imag = fft_x.imag
return fft_x_real, fft_x_imag
| 455 | 15.888889 | 46 | py |
SwinMR | SwinMR-main/utils/utils_model.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from utils import utils_image as util
import re
import glob
import os
'''
# --------------------------------------------
# Model
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
'''
def find_last_checkpoint(save_dir, net_type='G'):
"""
# ---------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# ---------------------------------------
Args:
save_dir: model folder
net_type: 'G' or 'D' or 'optimizerG' or 'optimizerD'
Return:
init_iter: iteration number
init_path: model path
# ---------------------------------------
"""
file_list = glob.glob(os.path.join(save_dir, '*_{}.pth'.format(net_type)))
if file_list:
iter_exist = []
for file_ in file_list:
iter_current = re.findall(r"(\d+)_{}.pth".format(net_type), file_)
iter_exist.append(int(iter_current[0]))
init_iter = max(iter_exist)
init_path = os.path.join(save_dir, '{}_{}.pth'.format(init_iter, net_type))
else:
init_iter = 0
init_path = None
return init_iter, init_path
def test_mode(model, L, mode=0, refield=32, min_size=256, sf=1, modulo=1):
'''
# ---------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# ---------------------------------------
Args:
model: trained model
L: input Low-quality image
mode:
(0) normal: test(model, L)
(1) pad: test_pad(model, L, modulo=16)
(2) split: test_split(model, L, refield=32, min_size=256, sf=1, modulo=1)
(3) x8: test_x8(model, L, modulo=1) ^_^
(4) split and x8: test_split_x8(model, L, refield=32, min_size=256, sf=1, modulo=1)
refield: effective receptive filed of the network, 32 is enough
useful when split, i.e., mode=2, 4
min_size: min_sizeXmin_size image, e.g., 256X256 image
useful when split, i.e., mode=2, 4
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
useful when pad, i.e., mode=1
Returns:
E: estimated image
# ---------------------------------------
'''
if mode == 0:
E = test(model, L)
elif mode == 1:
E = test_pad(model, L, modulo, sf)
elif mode == 2:
E = test_split(model, L, refield, min_size, sf, modulo)
elif mode == 3:
E = test_x8(model, L, modulo, sf)
elif mode == 4:
E = test_split_x8(model, L, refield, min_size, sf, modulo)
return E
'''
# --------------------------------------------
# normal (0)
# --------------------------------------------
'''
def test(model, L):
E = model(L)
return E
'''
# --------------------------------------------
# pad (1)
# --------------------------------------------
'''
def test_pad(model, L, modulo=16, sf=1):
h, w = L.size()[-2:]
paddingBottom = int(np.ceil(h/modulo)*modulo-h)
paddingRight = int(np.ceil(w/modulo)*modulo-w)
L = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(L)
E = model(L)
E = E[..., :h*sf, :w*sf]
return E
'''
# --------------------------------------------
# split (function)
# --------------------------------------------
'''
def test_split_fn(model, L, refield=32, min_size=256, sf=1, modulo=1):
"""
Args:
model: trained model
L: input Low-quality image
refield: effective receptive filed of the network, 32 is enough
min_size: min_sizeXmin_size image, e.g., 256X256 image
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
Returns:
E: estimated result
"""
h, w = L.size()[-2:]
if h*w <= min_size**2:
L = torch.nn.ReplicationPad2d((0, int(np.ceil(w/modulo)*modulo-w), 0, int(np.ceil(h/modulo)*modulo-h)))(L)
E = model(L)
E = E[..., :h*sf, :w*sf]
else:
top = slice(0, (h//2//refield+1)*refield)
bottom = slice(h - (h//2//refield+1)*refield, h)
left = slice(0, (w//2//refield+1)*refield)
right = slice(w - (w//2//refield+1)*refield, w)
Ls = [L[..., top, left], L[..., top, right], L[..., bottom, left], L[..., bottom, right]]
if h * w <= 4*(min_size**2):
Es = [model(Ls[i]) for i in range(4)]
else:
Es = [test_split_fn(model, Ls[i], refield=refield, min_size=min_size, sf=sf, modulo=modulo) for i in range(4)]
b, c = Es[0].size()[:2]
E = torch.zeros(b, c, sf * h, sf * w).type_as(L)
E[..., :h//2*sf, :w//2*sf] = Es[0][..., :h//2*sf, :w//2*sf]
E[..., :h//2*sf, w//2*sf:w*sf] = Es[1][..., :h//2*sf, (-w + w//2)*sf:]
E[..., h//2*sf:h*sf, :w//2*sf] = Es[2][..., (-h + h//2)*sf:, :w//2*sf]
E[..., h//2*sf:h*sf, w//2*sf:w*sf] = Es[3][..., (-h + h//2)*sf:, (-w + w//2)*sf:]
return E
'''
# --------------------------------------------
# split (2)
# --------------------------------------------
'''
def test_split(model, L, refield=32, min_size=256, sf=1, modulo=1):
E = test_split_fn(model, L, refield=refield, min_size=min_size, sf=sf, modulo=modulo)
return E
'''
# --------------------------------------------
# x8 (3)
# --------------------------------------------
'''
def test_x8(model, L, modulo=1, sf=1):
E_list = [test_pad(model, util.augment_img_tensor4(L, mode=i), modulo=modulo, sf=sf) for i in range(8)]
for i in range(len(E_list)):
if i == 3 or i == 5:
E_list[i] = util.augment_img_tensor4(E_list[i], mode=8 - i)
else:
E_list[i] = util.augment_img_tensor4(E_list[i], mode=i)
output_cat = torch.stack(E_list, dim=0)
E = output_cat.mean(dim=0, keepdim=False)
return E
'''
# --------------------------------------------
# split and x8 (4)
# --------------------------------------------
'''
def test_split_x8(model, L, refield=32, min_size=256, sf=1, modulo=1):
E_list = [test_split_fn(model, util.augment_img_tensor4(L, mode=i), refield=refield, min_size=min_size, sf=sf, modulo=modulo) for i in range(8)]
for k, i in enumerate(range(len(E_list))):
if i==3 or i==5:
E_list[k] = util.augment_img_tensor4(E_list[k], mode=8-i)
else:
E_list[k] = util.augment_img_tensor4(E_list[k], mode=i)
output_cat = torch.stack(E_list, dim=0)
E = output_cat.mean(dim=0, keepdim=False)
return E
'''
# ^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-
# _^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^
# ^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-
'''
'''
# --------------------------------------------
# print
# --------------------------------------------
'''
# --------------------------------------------
# print model
# --------------------------------------------
def print_model(model):
msg = describe_model(model)
print(msg)
# --------------------------------------------
# print params
# --------------------------------------------
def print_params(model):
msg = describe_params(model)
print(msg)
'''
# --------------------------------------------
# information
# --------------------------------------------
'''
# --------------------------------------------
# model inforation
# --------------------------------------------
def info_model(model):
msg = describe_model(model)
return msg
# --------------------------------------------
# params inforation
# --------------------------------------------
def info_params(model):
msg = describe_params(model)
return msg
'''
# --------------------------------------------
# description
# --------------------------------------------
'''
# --------------------------------------------
# model name and total number of parameters
# --------------------------------------------
def describe_model(model):
if isinstance(model, torch.nn.DataParallel):
model = model.module
msg = '\n'
msg += 'models name: {}'.format(model.__class__.__name__) + '\n'
msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), model.parameters()))) + '\n'
msg += 'Net structure:\n{}'.format(str(model)) + '\n'
return msg
# --------------------------------------------
# parameters description
# --------------------------------------------
def describe_params(model):
if isinstance(model, torch.nn.DataParallel):
model = model.module
msg = '\n'
msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n'
for name, param in model.state_dict().items():
if not 'num_batches_tracked' in name:
v = param.data.clone().float()
msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n'
return msg
if __name__ == '__main__':
class Net(torch.nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(Net, self).__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1)
def forward(self, x):
x = self.conv(x)
return x
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
model = Net()
model = model.eval()
print_model(model)
print_params(model)
x = torch.randn((2,3,401,401))
torch.cuda.empty_cache()
with torch.no_grad():
for mode in range(5):
y = test_mode(model, x, mode, refield=32, min_size=256, sf=1, modulo=1)
print(y.shape)
# run utils/utils_model.py
| 9,837 | 28.902736 | 148 | py |
SwinMR | SwinMR-main/utils/utils_regularizers.py | import torch
import torch.nn as nn
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
'''
# --------------------------------------------
# SVD Orthogonal Regularization
# --------------------------------------------
def regularizer_orth(m):
"""
# ----------------------------------------
# SVD Orthogonal Regularization
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
# self.netG.apply(svd_orthogonalization)
u, s, v = torch.svd(w)
s[s > 1.5] = s[s > 1.5] - 1e-4
s[s < 0.5] = s[s < 0.5] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
# --------------------------------------------
# SVD Orthogonal Regularization
# --------------------------------------------
def regularizer_orth2(m):
"""
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth2)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
u, s, v = torch.svd(w)
s_mean = s.mean()
s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4
s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
def regularizer_clip(m):
"""
# ----------------------------------------
# usage: net.apply(regularizer_clip)
# ----------------------------------------
"""
eps = 1e-4
c_min = -1.5
c_max = 1.5
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
w = m.weight.data.clone()
w[w > c_max] -= eps
w[w < c_min] += eps
m.weight.data = w
if m.bias is not None:
b = m.bias.data.clone()
b[b > c_max] -= eps
b[b < c_min] += eps
m.bias.data = b
# elif classname.find('BatchNorm2d') != -1:
#
# rv = m.running_var.data.clone()
# rm = m.running_mean.data.clone()
#
# if m.affine:
# m.weight.data
# m.bias.data
| 3,416 | 31.542857 | 87 | py |
SwinMR | SwinMR-main/utils/utils_bnorm.py | import torch
import torch.nn as nn
"""
# --------------------------------------------
# Batch Normalization
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# 01/Jan/2019
# --------------------------------------------
"""
# --------------------------------------------
# remove/delete specified layer
# --------------------------------------------
def deleteLayer(model, layer_type=nn.BatchNorm2d):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if isinstance(m, layer_type):
del model._modules[k]
deleteLayer(m, layer_type)
# --------------------------------------------
# merge bn, "conv+bn" --> "conv"
# --------------------------------------------
def merge_bn(model):
''' Kai Zhang, 11/Jan/2019.
merge all 'Conv+BN' (or 'TConv+BN') into 'Conv' (or 'TConv')
based on https://github.com/pytorch/pytorch/pull/901
'''
prev_m = None
for k, m in list(model.named_children()):
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)) and (isinstance(prev_m, nn.Conv2d) or isinstance(prev_m, nn.Linear) or isinstance(prev_m, nn.ConvTranspose2d)):
w = prev_m.weight.data
if prev_m.bias is None:
zeros = torch.Tensor(prev_m.out_channels).zero_().type(w.type())
prev_m.bias = nn.Parameter(zeros)
b = prev_m.bias.data
invstd = m.running_var.clone().add_(m.eps).pow_(-0.5)
if isinstance(prev_m, nn.ConvTranspose2d):
w.mul_(invstd.view(1, w.size(1), 1, 1).expand_as(w))
else:
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_(-m.running_mean).mul_(invstd)
if m.affine:
if isinstance(prev_m, nn.ConvTranspose2d):
w.mul_(m.weight.data.view(1, w.size(1), 1, 1).expand_as(w))
else:
w.mul_(m.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(m.weight.data).add_(m.bias.data)
del model._modules[k]
prev_m = m
merge_bn(m)
# --------------------------------------------
# add bn, "conv" --> "conv+bn"
# --------------------------------------------
def add_bn(model):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d)):
b = nn.BatchNorm2d(m.out_channels, momentum=0.1, affine=True)
b.weight.data.fill_(1)
new_m = nn.Sequential(model._modules[k], b)
model._modules[k] = new_m
add_bn(m)
# --------------------------------------------
# tidy model after removing bn
# --------------------------------------------
def tidy_sequential(model):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if isinstance(m, nn.Sequential):
if m.__len__() == 1:
model._modules[k] = m.__getitem__(0)
tidy_sequential(m)
| 3,132 | 33.054348 | 187 | py |
SwinMR | SwinMR-main/data/dataset_CCsagpi.py | '''
# -----------------------------------------
Data Loader
CC-SAG-PI d.1.1
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import random
import torch.utils.data as data
import utils.utils_image as util
from utils.utils_swinmr import *
from models.select_mask import define_Mask
class DatasetCCsagpi(data.Dataset):
def __init__(self, opt):
super(DatasetCCsagpi, self).__init__()
print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.')
self.opt = opt
self.n_channels = self.opt['n_channels']
self.patch_size = self.opt['H_size']
self.is_noise = self.opt['is_noise']
self.noise_level = self.opt['noise_level']
self.noise_var = self.opt['noise_var']
self.is_mini_dataset = self.opt['is_mini_dataset']
self.mini_dataset_prec = self.opt['mini_dataset_prec']
# get data path of image & sensitivity map
self.paths_raw = util.get_image_paths(opt['dataroot_H'])
assert self.paths_raw, 'Error: Raw path is empty.'
self.paths_H = []
self.paths_SM = []
for path in self.paths_raw:
if 'imgGT' in path:
self.paths_H.append(path)
elif 'SensitivityMaps' in path:
self.paths_SM.append(path)
else:
raise ValueError('Error: Unknown filename is in raw path')
if self.is_mini_dataset:
pass
# get mask
self.mask = define_Mask(self.opt)
def __getitem__(self, index):
mask = self.mask
is_noise = self.is_noise
noise_level = self.noise_level
noise_var = self.noise_var
# get gt image and sensitivity map
H_path = self.paths_H[index]
SM_path = self.paths_SM[index]
img_H, Sensitivity_Map = self.load_images(H_path, SM_path, isSM=True)
# get zf image
img_L = self.undersample_kspace(img_H, mask, is_noise, noise_level, noise_var)
# get image information
image_name_ext = os.path.basename(H_path)
img_name, ext = os.path.splitext(image_name_ext)
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, _ = img_H.shape
# --------------------------------
# randomly crop the patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_L = img_L[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_SM = Sensitivity_Map[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_L, patch_H, patch_SM= util.augment_img(patch_L, mode=mode), \
util.augment_img(patch_H, mode=mode), \
util.augment_img(patch_SM, mode=mode)
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H, Sensitivity_Map = util.float2tensor3(patch_L), \
util.float2tensor3(patch_H), \
util.float2tensor3(patch_SM)
else:
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.float2tensor3(img_L), util.float2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'H_path': H_path, 'mask': mask, 'SM': Sensitivity_Map, 'img_info': img_name}
def __len__(self):
return len(self.paths_H)
def load_images(self, H_path, SM_path, isSM=True):
# load GT
gt = np.load(H_path).astype(np.float32)
gt = np.reshape(gt, (gt.shape[0], gt.shape[1], 1))
# # 0 ~ 1
gt = (gt - gt.min()) / (gt.max() - gt.min())
# load SM
if isSM == True:
sm = np.load(SM_path).astype(np.float32)[:, :, :, 1]
# sm = np.reshape(sm[:, :, :, 1], (256, 256, 12))
# 0 ~ 1
sm = (sm - sm.min()) / (sm.max() - sm.min())
return gt, sm
else:
return gt, 0
def undersample_kspace(self, x, mask, is_noise, noise_level, noise_var):
fft = fft2(x[:, :, 0])
fft = fftshift(fft)
fft = fft * mask
if is_noise:
fft = fft + self.generate_gaussian_noise(fft, noise_level, noise_var)
fft = ifftshift(fft)
xx = ifft2(fft)
xx = np.abs(xx)
x = xx[:, :, np.newaxis]
return x
def generate_gaussian_noise(self, x, noise_level, noise_var):
spower = np.sum(x ** 2) / x.size
npower = noise_level / (1 - noise_level) * spower
noise = np.random.normal(0, noise_var ** 0.5, x.shape) * np.sqrt(npower)
return noise | 5,361 | 34.045752 | 116 | py |
SwinMR | SwinMR-main/data/dataset_CCsagnpi.py | '''
# -----------------------------------------
Data Loader
CC-SAG-NPI d.1.1
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import random
import torch.utils.data as data
import utils.utils_image as util
from utils.utils_swinmr import *
from models.select_mask import define_Mask
class DatasetCCsagnpi(data.Dataset):
def __init__(self, opt):
super(DatasetCCsagnpi, self).__init__()
print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.')
self.opt = opt
self.n_channels = self.opt['n_channels']
self.patch_size = self.opt['H_size']
self.is_noise = self.opt['is_noise']
self.noise_level = self.opt['noise_level']
self.noise_var = self.opt['noise_var']
self.is_mini_dataset = self.opt['is_mini_dataset']
self.mini_dataset_prec = self.opt['mini_dataset_prec']
# get data path of image & sensitivity map
self.paths_raw = util.get_image_paths(opt['dataroot_H'])
assert self.paths_raw, 'Error: Raw path is empty.'
self.paths_H = []
self.paths_SM = []
for path in self.paths_raw:
if 'imgGT' in path:
self.paths_H.append(path)
elif 'SensitivityMaps' in path:
self.paths_SM.append(path)
else:
raise ValueError('Error: Unknown filename is in raw path')
if self.is_mini_dataset:
pass
# get mask
self.mask = define_Mask(self.opt)
def __getitem__(self, index):
mask = self.mask
is_noise = self.is_noise
noise_level = self.noise_level
noise_var = self.noise_var
# get gt image
H_path = self.paths_H[index]
img_H, _ = self.load_images(H_path, 0, isSM=False)
# get zf image
img_L = self.undersample_kspace(img_H, mask, is_noise, noise_level, noise_var)
# get image information
image_name_ext = os.path.basename(H_path)
img_name, ext = os.path.splitext(image_name_ext)
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, _ = img_H.shape
# --------------------------------
# randomly crop the patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_L = img_L[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_L, patch_H = util.augment_img(patch_L, mode=mode), util.augment_img(patch_H, mode=mode)
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.float2tensor3(patch_L), util.float2tensor3(patch_H)
else:
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.float2tensor3(img_L), util.float2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'H_path': H_path, 'mask': mask, 'SM': _, 'img_info': img_name}
def __len__(self):
return len(self.paths_H)
def load_images(self, H_path, SM_path, isSM=True):
# load GT
gt = np.load(H_path).astype(np.float32)
gt = np.reshape(gt, (gt.shape[0], gt.shape[1], 1))
# # 0 ~ 1
gt = (gt - gt.min()) / (gt.max() - gt.min())
# load SM
if isSM == True:
sm = np.load(SM_path).astype(np.float32)[:, :, :, 1]
# sm = np.reshape(sm[:, :, :, 1], (256, 256, 12))
# 0 ~ 1
sm = (sm - sm.min()) / (sm.max() - sm.min())
return gt, sm
else:
return gt, 0
def undersample_kspace(self, x, mask, is_noise, noise_level, noise_var):
fft = fft2(x[:, :, 0])
fft = fftshift(fft)
fft = fft * mask
if is_noise:
fft = fft + self.generate_gaussian_noise(fft, noise_level, noise_var)
fft = ifftshift(fft)
xx = ifft2(fft)
xx = np.abs(xx)
x = xx[:, :, np.newaxis]
return x
def generate_gaussian_noise(self, x, noise_level, noise_var):
spower = np.sum(x ** 2) / x.size
npower = noise_level / (1 - noise_level) * spower
noise = np.random.normal(0, noise_var ** 0.5, x.shape) * np.sqrt(npower)
return noise | 4,898 | 32.554795 | 105 | py |
ShiftCNN | ShiftCNN-master/shiftcnn_quantization.py | import sys
import os
import numpy as np
import pickle
import matplotlib.pyplot as plt
#
N = 2
B = 4
#
#model = "squeezenet_v1.1"
model = "ResNet-50"
SOURCE_PATH = os.environ["HOME"]+"/github/caffe/models/"+model+"/"
prototxt = SOURCE_PATH+"train_val.prototxt"
source = SOURCE_PATH+model+".caffemodel"
qtarget = SOURCE_PATH+model+"_N"+str(N)+"_B"+str(B)+".caffemodel"
caffe_root = os.environ["CAFFE_ROOT"]
os.chdir(caffe_root)
print caffe_root
sys.path.insert(0, caffe_root + 'python')
import caffe
caffe.set_mode_cpu()
net = caffe.Net(prototxt, source, caffe.TEST)
layers = net.params.keys()
linestyles = ['--', '-']
for idx, layer in enumerate(layers):
#if not('bn' in layer) and not('scale' in layer): # do not include batch normalization and scaling layers in ResNets
wT= 0.0
w = net.params[layer][0].data
wMax = np.max(np.abs(w))
r = w/wMax # normalize
for n in range(0, N):
qSgn = np.sign(r)
qLog = np.log2(abs(r+1e-32))
qIdx = np.floor(qLog)
bLog = qIdx + np.log2(1.5)
bIdx = qLog > bLog # border condition
qIdx[bIdx] = qIdx[bIdx] + 1.0
q = qSgn * 2**(qIdx)
qIdxMem = qSgn * (-(n+1)-qIdx+2)
sIdx = (2-(n+1)-qIdx) > (2**(B-1)-1) # saturation condition
q[sIdx] = 0
qIdxMem[sIdx] = 0
zIdx = q!=0
wT += q
r -= q
np.copyto(net.params[layer][0].data, wT*wMax)
net.save(qtarget)
| 1,514 | 28.134615 | 120 | py |
agd | agd-main/main.py | import sys
import os
import math
import argparse
import pickle
import torch
import importlib
from tqdm import tqdm
from agd import AGD
from architecture.fcn import *
from architecture.vgg import *
from architecture.resnet import *
############################################################################################
######################################### Parse args #######################################
############################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--arch', type=str, default='fcn', choices=['fcn', 'vgg', 'resnet18', 'resnet50'] )
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100', 'mnist', 'imagenet'] )
parser.add_argument('--loss', type=str, default='mse', choices=['mse', 'xent'] )
parser.add_argument('--train_bs', type=int, default=128 )
parser.add_argument('--test_bs', type=int, default=128 )
parser.add_argument('--epochs', type=int, default=200 )
parser.add_argument('--depth', type=int, default=10 )
parser.add_argument('--width', type=int, default=256 )
parser.add_argument('--distribute', action='store_true' )
parser.add_argument('--cpu', action='store_true' )
parser.add_argument('--gain', type=float, default=1.0 )
args = parser.parse_args()
############################################################################################
#################################### Distributed setup #####################################
############################################################################################
local_rank = 0
if args.distribute:
world_size = int(os.getenv('OMPI_COMM_WORLD_SIZE'))
global_rank = int(os.getenv('OMPI_COMM_WORLD_RANK'))
local_rank = global_rank % torch.cuda.device_count()
torch.distributed.init_process_group(backend='nccl', rank=global_rank, world_size=world_size)
print(f'GPU {global_rank} reporting in. Local rank: {local_rank}. CPU threads: {torch.get_num_threads()}.')
torch.distributed.barrier()
if global_rank > 0:
tqdm = lambda x, total : x
sys.stdout = open(os.devnull, 'w')
############################################################################################
####################################### Print args #########################################
############################################################################################
print("{: <39} {: <20}".format("\nArgument", "Value"))
print("{: <39} {: <20}".format(*["=============================="]*2))
for arg in vars(args):
print("{: <39} {: <20}".format(arg, getattr(args, arg)))
print("\nNote: depth and width are only used for fully-connected networks.")
############################################################################################
######################################### Get data #########################################
############################################################################################
print("\nGetting data...")
print("==================================="*2)
data_module = importlib.import_module("data."+args.dataset)
trainset, testset, input_dim, output_dim = data_module.getData()
if args.distribute:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
test_sampler = torch.utils.data.distributed.DistributedSampler(testset, shuffle=False, drop_last=True)
train_loader = torch.utils.data.DataLoader( trainset,
batch_size=int(args.train_bs/world_size),
shuffle=False,
num_workers=8,
pin_memory=True,
sampler=train_sampler )
test_loader = torch.utils.data.DataLoader( testset,
batch_size=int(args.test_bs/world_size),
shuffle=False,
num_workers=8,
pin_memory=True,
sampler=test_sampler )
else:
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.train_bs, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_bs, shuffle=False, pin_memory=True)
############################################################################################
##################################### Set architecture #####################################
############################################################################################
if args.arch == 'fcn':
net = FCN(args.depth, args.width, input_dim, output_dim)
elif args.dataset == 'imagenet' and args.arch == 'resnet50':
net = resnet50(num_classes=1000)
elif 'cifar' not in args.dataset:
raise Exception("That network only works with CIFAR.")
elif args.arch == 'vgg':
net = VGG16(output_dim)
elif args.arch == 'resnet18':
net = PreActResNet18(output_dim)
elif args.arch == 'resnet50':
net = PreActResNet50(output_dim)
if not args.cpu:
net = net.cuda(local_rank)
agd = AGD(net, args.gain)
agd.init_weights()
if args.distribute:
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[local_rank])
print("{: <39} {: <20}".format("\nLayer", "Shape"))
print("{: <39} {: <20}".format(*["=============================="]*2))
for name, p in net.named_parameters():
print("{: <39} {: <20}".format(name, str(list(p.shape))))
############################################################################################
######################################## Define loop #######################################
############################################################################################
def loop(net, dataloader, optim, train):
net.train() if train else net.eval()
num_minibatches = len(dataloader)
epoch_loss = 0
epoch_acc = 0
epoch_log = 0
for data, target in tqdm(dataloader, total=num_minibatches):
if not args.cpu:
data, target = data.cuda(local_rank), target.cuda(local_rank)
output = net(data)
if args.loss == 'mse':
onehot = torch.nn.functional.one_hot(target, num_classes=output.shape[1]).float()
onehot *= math.sqrt(output.shape[1])
loss = (output-onehot).square().mean()
elif args.loss == 'xent':
error = - output[range(target.shape[0]),target] + output.logsumexp(dim=1)
loss = error.mean()
if train: loss.backward()
acc = (output.argmax(dim=1) == target).float().mean()
if args.distribute:
torch.distributed.all_reduce(loss, torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(acc, torch.distributed.ReduceOp.SUM)
loss /= world_size
acc /= world_size
if train:
epoch_log += optim.step()
net.zero_grad()
epoch_acc += acc.item()
epoch_loss += loss.item()
return epoch_loss / num_minibatches, epoch_acc / num_minibatches, epoch_log / num_minibatches
############################################################################################
###################################### Train network #######################################
############################################################################################
results = {}
results['log_list' ] = []
results['train_loss_list'] = []
results['test_loss_list' ] = []
results['train_acc_list' ] = []
results['test_acc_list' ] = []
os.makedirs('logs', exist_ok=True)
filename = ""
for arg in vars(args):
filename += arg + ':' + str(getattr(args,arg)) + '-'
filename = os.path.join('logs', filename[:-1] + '.pickle')
for epoch in range(args.epochs):
print("\nEpoch", epoch)
print("==================================="*2)
if args.distribute: train_loader.sampler.set_epoch(epoch)
train_loss, train_acc, log = loop(net, train_loader, agd, train=True )
test_loss, test_acc, _ = loop(net, test_loader, None, train=False )
print("Log term: \t", log )
print("Train loss:\t", train_loss )
print("Test loss: \t", test_loss )
print("Train acc: \t", train_acc )
print("Test acc: \t", test_acc )
results['log_list' ].append( log )
results['train_loss_list'].append( train_loss )
results['test_loss_list' ].append( test_loss )
results['train_acc_list' ].append( train_acc )
results['test_acc_list' ].append( test_acc )
pickle.dump(results, open( filename, "wb" ) )
| 8,893 | 41.966184 | 122 | py |
agd | agd-main/agd.py | import math
import torch
from torch.optim.optimizer import Optimizer
from torch.nn.init import orthogonal_
def singular_value(p):
sv = math.sqrt(p.shape[0] / p.shape[1])
if p.dim() == 4:
sv /= math.sqrt(p.shape[2] * p.shape[3])
return sv
class AGD(Optimizer):
def __init__(self, net, gain=1.0):
self.net = net
self.depth = len(list(net.parameters()))
self.gain = gain
for p in self.net.parameters():
if p.dim() == 1: raise Exception("Biases are not supported.")
super().__init__(net.parameters(), defaults=dict())
@torch.no_grad()
def init_weights(self):
for p in self.net.parameters():
if p.dim() == 2: orthogonal_(p)
if p.dim() == 4:
for kx in range(p.shape[2]):
for ky in range(p.shape[3]):
orthogonal_(p[:,:,kx,ky])
p *= singular_value(p)
@torch.no_grad()
def step(self):
G = 0
for p in self.net.parameters():
G += singular_value(p) * p.grad.norm(dim=(0,1)).sum()
G /= self.depth
log = math.log(0.5 * (1 + math.sqrt(1 + 4*G)))
for p in self.net.parameters():
factor = singular_value(p) / p.grad.norm(dim=(0,1), keepdim=True)
p -= self.gain * log / self.depth * torch.nan_to_num(factor) * p.grad
return log
| 1,412 | 26.173077 | 81 | py |
agd | agd-main/architecture/fcn.py | import math
import torch.nn as nn
import torch.nn.functional as F
class FCN(nn.Module):
def __init__(self, depth, width, input_dim, output_dim, bias=False):
super(FCN, self).__init__()
self.initial = nn.Linear(input_dim, width, bias=bias)
self.layers = nn.ModuleList([nn.Linear(width, width, bias=bias) for _ in range(depth-2)])
self.final = nn.Linear(width, output_dim, bias=bias)
def forward(self, x):
x = x.view(x.shape[0],-1)
x = self.initial(x)
x = F.relu(x) * math.sqrt(2)
for layer in self.layers:
x = layer(x)
x = F.relu(x) * math.sqrt(2)
return self.final(x)
| 718 | 28.958333 | 97 | py |
agd | agd-main/architecture/resnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
### For CIFAR-10
def PreActResNet18(output_dim): return PreActResNet(PreActBlock, [2,2,2,2], output_dim)
def PreActResNet34(output_dim): return PreActResNet(PreActBlock, [3,4,6,3], output_dim)
def PreActResNet50(output_dim): return PreActResNet(PreActBottleneck, [3,4,6,3], output_dim)
def PreActResNet101(output_dim): return PreActResNet(PreActBottleneck, [3,4,23,3], output_dim)
def PreActResNet152(output_dim): return PreActResNet(PreActBottleneck, [3,8,36,3], output_dim)
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, affine=False)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, affine=False)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, affine=False)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes, bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
### For ImageNet
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
affine=False
) -> None:
super().__init__()
self.affine = affine
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes, affine=self.affine)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes, affine=affine)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition" https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
affine=False
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width, affine=affine)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width, affine=affine)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion, affine=affine)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
bias=False,
affine=False
) -> None:
self.bias=bias
self.affine=affine
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes, affine=self.affine)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes, bias=self.bias)
self.out_dim = num_classes
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if self.affine:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion, affine=False),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
affine=self.affine
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
weights: Optional,
progress: bool,
bias=False,
affine=False,
num_classes=10,
) -> ResNet:
model = ResNet(block, layers, bias=bias, affine=affine, num_classes=num_classes)
return model
def resnet18(num_classes, weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(BasicBlock, [2, 2, 2, 2], weights, progress, bias=bias, affine=affine, num_classes=num_classes)
def resnet34(weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, bias=bias, affine=affine)
def resnet50(num_classes,weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, bias=bias, affine=affine, num_classes=num_classes)
def resnet101(weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, bias=bias, affine=affine)
def resnet152(weights: Optional = None, progress: bool = True, bias=False, affine=False) -> ResNet:
return _resnet(Bottleneck, [3, 8, 36, 3], weights, progress, bias=bias, affine=affine)
| 14,531 | 35.512563 | 118 | py |
agd | agd-main/architecture/vgg.py | import torch.nn as nn
def VGG11(output_dim): return VGG_CIFAR([64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], output_dim)
def VGG13(output_dim): return VGG_CIFAR([64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], output_dim)
def VGG16(output_dim): return VGG_CIFAR([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], output_dim)
def VGG19(output_dim): return VGG_CIFAR([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], output_dim)
class VGG_CIFAR(nn.Module):
def __init__(self, vgg_cfg, output_dim=10, bias=False, affine=False):
super(VGG_CIFAR, self).__init__()
self.bias = bias
self.affine = affine
self.features = self._make_layers(vgg_cfg)
self.classifier = nn.Linear(512, output_dim, bias=self.bias)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias),
nn.BatchNorm2d(x, affine=self.affine),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
| 1,587 | 44.371429 | 156 | py |
agd | agd-main/latex/algorithm/agd.py | import math
import torch
from torch.nn.init import orthogonal_
def singular_value(p):
sv = math.sqrt(p.shape[0] / p.shape[1])
if p.dim() == 4:
sv /= math.sqrt(p.shape[2] * p.shape[3])
return sv
class AGD:
@torch.no_grad()
def __init__(self, net, gain=1.0):
self.net = net
self.depth = len(list(net.parameters()))
self.gain = gain
for p in net.parameters():
if p.dim() == 1: raise Exception("Biases are not supported.")
if p.dim() == 2: orthogonal_(p)
if p.dim() == 4:
for kx in range(p.shape[2]):
for ky in range(p.shape[3]):
orthogonal_(p[:,:,kx,ky])
p *= singular_value(p)
@torch.no_grad()
def step(self):
G = 0
for p in self.net.parameters():
G += singular_value(p) * p.grad.norm(dim=(0,1)).sum()
G /= self.depth
log = math.log(0.5 * (1 + math.sqrt(1 + 4*G)))
for p in self.net.parameters():
factor = singular_value(p) / p.grad.norm(dim=(0,1), keepdim=True)
p -= self.gain * log / self.depth * factor * p.grad
| 1,174 | 26.97619 | 77 | py |
agd | agd-main/data/cifar100.py | from torchvision import datasets, transforms
def getData():
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
trainset = datasets.CIFAR100('./data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR100('./data', train=False, download=True, transform=transform_test)
input_dim = 3*32*32
output_dim = 100
return trainset, testset, input_dim, output_dim
| 764 | 27.333333 | 96 | py |
agd | agd-main/data/cifar10.py | from torchvision import datasets, transforms
def getData():
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
trainset = datasets.CIFAR10('./data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR10('./data', train=False, download=True, transform=transform_test)
input_dim = 3*32*32
output_dim = 10
return trainset, testset, input_dim, output_dim
| 761 | 27.222222 | 95 | py |
agd | agd-main/data/imagenet.py | import os
from torchvision import datasets, transforms
def getData():
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
traindir = os.path.join(os.getenv('IMAGENET_PATH'), "train")
valdir = os.path.join(os.getenv('IMAGENET_PATH'), "val")
trainset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]))
testset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]))
input_dim = 3*224*224
output_dim = 1000
return trainset, testset, input_dim, output_dim
| 887 | 25.117647 | 64 | py |
agd | agd-main/data/mnist.py | from torchvision import datasets, transforms
def getData():
mean = (0.1307,)
std = (0.3081,)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
trainset = datasets.MNIST('./data', train=True, download=True, transform=transform)
testset = datasets.MNIST('./data', train=False, download=True, transform=transform)
input_dim = 1*28*28
output_dim = 10
return trainset, testset, input_dim, output_dim
| 493 | 25 | 87 | py |
aldiplusplus | aldiplusplus-main/aldi_gmm_dyn_none_both.py | from scipy import stats
import math
import torch
#import stumpy
import pyscamp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import calmap # not working with latest pandas
import calplot
import joypy
import sys
import time
import datetime as dt
from sklearn.mixture import GaussianMixture
class ALDI():
def __init__(self,
df_meters,
df_metadata,
m=24,
col_id='building_id',
site_id='',
meter_id='',
verbose=False,
gpu=False,
hourly_processing=False,
aldi_name='aldi_gmm_dyn_none_both',
):
"""
Parameter
----------
df_meters:
sorted NxM dataframe with M buildings and N rows with hourly
timestamp as indices
df_metadata:
dataframe with metadata regarding the buildings
m:
hourly window size, one day = 24
col_id:
string name of the column with building ids in df_meters and df_metadata
site_id:
id of the current portfolio being analyzed
meter_id:
id of the current sensor reading being analyzed
verbose:
boolean value to enable debugging printing
gpu:
TODO: text
hourly_processing:
TODO: text
aldi_name:
TODO: text
"""
self.df_meters = df_meters.copy()
self.df_metadata = df_metadata.copy()
self.base_timestamps = df_meters.copy().index
self.m = m
self.col_id = col_id
self.site_id = site_id
self.meter_id = meter_id
self.verbose = verbose
self.aldi_name = aldi_name
self.hourly = hourly_processing
self.cuda = True and gpu if torch.cuda.is_available() else False
if self.cuda:
print('Using GPU')
# set auxiliary variables
self.list_all_bdg = df_meters.columns.values
# placeholder for upcoming class variables
self.mp_adj = None
self.mp_ind = None
self.df_result = None
self.num_readings = None
self.num_buildings = None
self.df_result_meta = None
self.df_test = None
self.df_test_det = None # placeholder
# start the engine
self.pipeline()
def pipeline(self):
if self.verbose:
print(f'Start ALDI. hourly = {self.hourly}')
##### EXECUTE ALDI
#### STEP 1: get mp-values and -indices
self.mp_adj, self.mp_ind = self.get_mp()
#### STEP 2: select midnight mp-values and base dataframe
self.df_result, self.num_readings, self.num_buildings = self.data_reconstruction()
self.df_result_meta = self.add_metadata()
#### STEP 4: run one KS-tests
self.df_ks_test = self.ks_test()
#### STEP 5: Classification of the results of the stat test
#### (Initiated by the user from the outside)
# self.df_test_det = self.get_result_df()
def get_mp(self):
"""
Calculates matrix profile and matrix profile indices for a time-stamp
sorted dataframe where the columns are buildings from the same site
and rows are meter readings.
Returns:
mp_adj: dataframe with the matrix profile values
mp_ind: dataframe with the matrix profile indices
"""
mp_adj = pd.DataFrame(columns=self.list_all_bdg)
mp_ind = pd.DataFrame(columns=self.list_all_bdg)
for col in self.list_all_bdg:
bldg = self.df_meters[col]
mp_profile, mp_index = pyscamp.selfjoin(bldg, self.m)
#if self.cuda:
# mp = stumpy.gpu_stump(bldg, m=self.m)
#else:
# mp = stumpy.stump(bldg, m=self.m)
# append np.nan to matrix profile to allow plotting against raw data
#madj = np.append(mp[:,0], np.zeros(self.m-1) + np.nan)
#mind = np.append(mp[:,1], np.zeros(self.m-1) + np.nan)
madj = np.append(mp_profile, np.zeros(self.m-1) + np.nan)
mind = np.append(mp_index, np.zeros(self.m-1) + np.nan)
# save mp information
mp_adj[col] = madj
mp_ind[col] = mind
return mp_adj, mp_ind
def data_reconstruction(self):
"""
Puts together calculated values into one single dataframe
Returns:
----------
df_result: pandas.DataFrame
text
num_readings: int
text
num_buildings: int
text
"""
df_result = pd.DataFrame(columns=['raw','mp','mp_ind'])
# Previous get_midnight_values()
df_e, df_mp, df_mpind = self.prepare_mp_values()
num_readings = df_e.shape[0]
num_buildings = df_e.shape[1]
if self.verbose:
print(f'num of readings: {num_readings}') # debug
# combining the matrix profile and indices values
df_result['raw'] = df_e.values.reshape(num_readings * num_buildings)
df_result['mp'] = df_mp.values.reshape(num_readings * num_buildings)
df_result['mp_ind'] = df_mpind.values.reshape(num_readings * num_buildings)
if self.verbose:
print(f'Combining raw and calculated values:\n{df_result}')
# combining the building names and dates
if self.hourly:
# HOURLY SOLUTION
df_names = np.tile(self.list_all_bdg, num_readings)
steps = np.repeat(list(range(num_readings)), len(self.list_all_bdg))
df_interim_dates = (pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq='H')
).to_pydatetime().tolist()
df_dates = np.repeat(df_interim_dates, len(self.list_all_bdg))
else:
# DAYS SOLUTION
df_names = np.tile(self.list_all_bdg, num_readings)
steps = np.repeat(list(range(num_readings)), len(self.list_all_bdg))
df_interim_dates = (pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq='d')
).to_pydatetime().tolist()
df_dates = np.repeat(df_interim_dates, len(self.list_all_bdg))
df_result[self.col_id] = df_names
df_result['date'] = df_dates
if self.verbose:
print(f'Updating the combined values with building names ' +
f'and full dates:\n{df_result}')
# combining the breakdown of the dates
df_result['month'] = df_result['date'].dt.strftime('%b')
df_result['daytype'] = df_result['date'].dt.strftime('%a')
df_result['day'] = df_result['date'].dt.strftime('%d')
df_result['hour'] = (df_result['date'].dt.strftime('%H')).astype('int8')
if self.verbose:
print(f'Updating the combined values with broken down dates:\n{df_result}')
return df_result, num_readings, num_buildings
def prepare_mp_values(self):
"""
Picks daily matrix profile at midnight
Returns:
----------
df_e: pandas.DataFrame
text
df_mp: pandas.DataFrame
text
df_mpind: pandas.DataFrame
text
"""
df_e = self.df_meters.copy()
df_mp = self.mp_adj.set_index(df_e.index)
df_mpind = self.mp_ind.set_index(df_e.index)
if not self.hourly:
df_e = df_e[df_e.index.hour==0]
df_mp = df_mp[df_mp.index.hour==0]
df_mpind = df_mpind[df_mpind.index.hour==0]
if self.verbose:
print(f'Prepared MP values:\n{df_mp}')
print(f'Shape midnight results:')
print(f'raw: {df_e.shape}')
print(f'mp: {df_mp.shape}')
print(f'mpi: {df_mpind.shape}\n')
return df_e, df_mp, df_mpind
def add_metadata(self):
"""
Combines the processed dataframe with matrix profile calculation
alongside the metadata file
Returns:
----------
df_result_meta: pandas.DataFrame
text
"""
df_result_meta = self.df_result.merge(self.df_metadata,
on=self.col_id)
if self.verbose:
print(f'Merging available metadata:\n{df_result_meta.head()}')
return df_result_meta
def ks_test(self):
"""
Computes an statistical test for each daily distribution
Returns:
----------
ks_test: pandas.DataFrame
text
"""
reference_dist = self.daytype_dist()
if self.hourly:
curr_freq = 'H'
else:
curr_freq = 'D'
ks_test = pd.DataFrame(columns=['D','p'],
index=pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq=curr_freq)
)
if self.verbose:
print(f'CAUTION: curr_freq: {curr_freq}')
print(f'Starting to fill the ks_test df: \n{ks_test}')
for i in ks_test.index:
events = self.df_result.mp[self.df_result.date == i]
if self.hourly:
reference = reference_dist[i.strftime('%a')][int(i.strftime('%H'))]
else:
reference = reference_dist[i.strftime('%a')]
test = stats.ks_2samp(events, reference)
ks_test.D[i] = test.statistic
ks_test.p[i] = test.pvalue
if self.verbose:
print(f'KS test dataframe:\n{ks_test}')
return ks_test
def daytype_dist(self):
"""
Computes daytype distributions
Returns:
----------
daytype_dist: dictionary
text
"""
daytype_dist = {}
weekdays = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
if self.hourly:
times = list(range(24))
for curr_day in weekdays:
daytype_dist[curr_day] = {}
for curr_time in times:
daytype_dist[curr_day][curr_time] = self.df_result.mp[
(self.df_result.daytype == curr_day)
& (self.df_result.hour == curr_time) ]
else:
for curr_day in weekdays:
daytype_dist[curr_day] = self.df_result.mp[(
self.df_result.daytype == curr_day)]
return daytype_dist
####################################################################
# #
# ||| Methods that are called from the outside ||| #
# VVV VVV #
####################################################################
def set_gmm_model(self, gmm_data='D', gmm_max_comp=10):
self.gmm_data = gmm_data
self.gmm_max_comp = gmm_max_comp
self.gm_model, self.gmm_components = self._gmm_train()
def get_result_df(self, forecast_out=False):
"""
Calculates the discords
"""
# dynamic share calculation
max_gauss_mean = self.gmm_components['gauss_mean'].max()
share_comp_of_interest = 1 - max_gauss_mean
abs_comp_of_interest = math.trunc( share_comp_of_interest
* self.gm_model.n_components)
sorted_gmm_components = self.gmm_components.sort_values('gauss_mean').copy()
special_gmm_comp = sorted_gmm_components[:abs_comp_of_interest]
if self.verbose:
print(f'Share components of interest: {share_comp_of_interest}')
print(f'Number components of interest: {abs_comp_of_interest}')
gmm_proba = self.gm_model.predict_proba(
self.df_ks_test.D.values.reshape(-1,1))
df_gmm_proba = pd.DataFrame(gmm_proba, index= self.df_ks_test.index)
df_gmm_proba['max_comp'] = df_gmm_proba.idxmax(axis='columns')
if self.gmm_data == 'D':
gmm_proba = self.gm_model.predict_proba(
self.df_ks_test.D.values.reshape(-1,1))
df_gmm_proba = pd.DataFrame(gmm_proba, index= self.df_ks_test.index)
df_gmm_proba['max_comp'] = df_gmm_proba.idxmax(axis='columns')
# *Important comparison* - The max_comp must be inside the list
# for the day to be classified as a "non-discord day"
df_gmm_proba['is_discord'] = np.where(df_gmm_proba['max_comp'].isin(
special_gmm_comp.component), 0, 1)
else:
gmm_proba = self.gm_model.predict_proba(
self.df_ks_test.p.values.reshape(-1,1))
df_gmm_proba = pd.DataFrame(gmm_proba, index= self.df_ks_test.index)
df_gmm_proba['max_comp'] = df_gmm_proba.idxmax(axis='columns')
# *Important comparison* - The max_comp must be inside the list
# for the day to be classified as a "non-discord day"
df_gmm_proba['is_discord'] = np.where(df_gmm_proba['max_comp'].isin(
special_gmm_comp.component), 1, 0)
df_is_discord = pd.DataFrame(index=df_gmm_proba.index)
df_is_discord['is_discord'] = df_gmm_proba['is_discord']
# prepare index and column for resulting dataframes
all_bdg = self.list_all_bdg.copy()
if forecast_out:
columns = all_bdg
else:
columns = [f'is_discord_{x}' for x in all_bdg]
# hand waving specialization (caution) of discords for all bdgs
for col in columns:
df_is_discord[col] = df_is_discord['is_discord']
df_is_discord = df_is_discord.drop(['is_discord'],
axis=1)
if forecast_out:
if not self.hourly:
hourly_timestamps = self.base_timestamps
df_hourly_is_discord = pd.DataFrame(index=hourly_timestamps)
# copy daily dataframe to hourly dataframe
df_hourly_is_discord['day'] = df_hourly_is_discord.index.date
df_is_discord.index = df_is_discord.index.date
df_hourly_is_discord = df_hourly_is_discord.join(df_is_discord,
on='day',
how='left')
df_hourly_is_discord = df_hourly_is_discord.drop(['day'], axis=1)
df_is_discord_hourly = df_hourly_is_discord.astype('int8')
else:
df_is_discord_hourly = df_is_discord
df_is_discord_hourly['timestamp'] = df_is_discord_hourly.index
df_is_discord_hourly = df_is_discord_hourly.melt(
id_vars=['timestamp'],
var_name='building_id',
value_name='is_discord')
# Exportable variable
df_is_discord = df_is_discord_hourly
return df_is_discord
def get_result_using_threshold(self,
ks_type='D',
threshold=0.6,
forecast_out=False):
"""
Method offers additional possibility to get a discore
classification (predicted). For this purpose, all time points
at which a predefined threshold is exceeded are classified as
discord and vice versa.
Parameters:
----------
ks_test: str , required
Describes which result type of the ks test should be used
treshold: float , required
Describes the threshold to be used to distinguish between
discord and non-discord
forecast_out: bool , required
This parameter controls the formatting of the return type.
If False, the resulting dataframe will have columns
identifying the different buildings of the site. The index
consists of timestamps.
If True, the previously described results are formatted
into a single column result. The building ID and timestamp
are then keys and have their own columns.
Returns:
----------
df_result: pandas.DataFrame
see description at parameter 'forecast_out'
"""
if self.hourly:
curr_freq = 'H'
else:
curr_freq = 'D'
df_is_discord = pd.DataFrame(
columns=['is_discord'],
index=pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq=curr_freq)
)
df_is_discord['is_discord'] = np.where(
self.df_ks_test[ks_type] < threshold, 1, 0)
# prepare index and column for resulting dataframes
all_bdg = self.list_all_bdg.copy()
if forecast_out:
columns = all_bdg
else:
columns = [f'is_discord_{x}' for x in all_bdg]
# hand waving specialization (caution) of discords for all bdgs
for col in columns:
df_is_discord[col] = df_is_discord['is_discord']
df_is_discord = df_is_discord.drop(['is_discord'],
axis=1)
if (forecast_out & (not self.hourly)):
df_hourly_is_discord = pd.DataFrame(
index=pd.date_range(start=self.base_timestamps[0],
end=self.base_timestamps[-1],
freq='H')
)
# copy daily dataframe to hourly dataframe
df_hourly_is_discord['day'] = df_hourly_is_discord.index.date
df_is_discord.index = df_is_discord.index.date
df_hourly_is_discord = df_hourly_is_discord.join(
df_is_discord,
on='day', how='left')
df_hourly_is_discord = df_hourly_is_discord.drop(['day'], axis=1)
df_result = df_hourly_is_discord.astype('int8')
else:
df_result = df_is_discord
if forecast_out:
df_result['timestamp'] = df_result.index
df_result = df_result.melt(id_vars=['timestamp'],
var_name='building_id',
value_name='is_discord')
return df_result
def plot_true_n_gmm(self, df_true_labels, df_ks_results=None, gmm=None):
"""
method does something
"""
if df_ks_results is None:
df_ks_results = self.df_ks_test
if gmm is None:
gmm = self.gm_model
df_true_labels_day = df_true_labels.groupby(df_true_labels.index.date).max()
df_ks_results_D = df_ks_results[['D']]
df_ks_results_D_spez = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels_day.columns)
for col in df_ks_results_D_spez.columns:
df_ks_results_D_spez[col] = df_ks_results_D['D']
assert (df_true_labels_day.shape == df_ks_results_D_spez.shape)
df_D_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels_day.columns)
df_D_non_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels_day.columns)
for col in df_D_discord.columns:
df_D_discord[col] = np.where(df_true_labels_day[col] == 1,
df_ks_results_D_spez[col],
math.nan)
df_D_non_discord[col] = np.where(df_true_labels_day[col] == 0,
df_ks_results_D_spez[col],
math.nan)
#### HERE THE PLOTTING BEGINNS ###
x_values = np.linspace(0, 1, 1000)
logprob = gmm.score_samples(x_values.reshape(-1, 1))
responsibilities = gmm.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
number_plot_rows = math.ceil(df_D_discord.shape[1]/2)
figure, axes = plt.subplots(nrows=number_plot_rows,
ncols=2,
figsize=(22, 4*number_plot_rows))
figure.patch.set_facecolor('white')
figure.subplots_adjust(top=0.97)
figure.suptitle(f'D-values of discord and non-discord days from \
site {self.site_id}',
fontsize=20)
next_odd = True
for num, df_col in enumerate(df_D_discord.columns):
if next_odd:
plot_col = 0
next_odd = False
else:
plot_col = 1
next_odd = True
plot_row_num = math.floor(num/2)
try:
axes[plot_row_num, plot_col].hist([df_D_non_discord[df_col], df_D_discord[df_col]],
100,
density=True,
histtype='stepfilled',
alpha=0.7,
label=['non-discord','discord'])
except AttributeError:
print('ooouw that hurts')
axes[plot_row_num,plot_col].plot(x_values, pdf, '-k')
axes[plot_row_num,plot_col].plot(x_values, pdf_individual)
axes[plot_row_num,plot_col].set_title(f'Information about {df_col}')
axes[plot_row_num,plot_col].legend(loc='upper right')
figure.savefig(f'img/D_visualization/{self.aldi_name}/site_{self.site_id}.png',
format='PNG')
plt.clf()
def plot_true_one_gmm(self,
df_true_labels,
agg_type=None,
gmm_data='D',
df_ks_results=None,
gmm=None):
"""
method creates a plot. Two histograms are shown on the plot.
The first histogram shows the distribution of the D-values of
the (true) discords. The second histogram shows the distribution
of the D-values of the (true) non-discords.
Furthermore, the components of the GMM are also visualized.
Parameters:
----------
df_true_labels: pandas.DataFrame , required
text
gmm_data : str , required
text
df_ks_results: pandas.DataFrame , optional
text
gmm: sklearn.mixture.GaussianMixture , optional
text
Returns:
----------
Method saves a plot.
"""
if df_ks_results is None:
df_ks_results = self.df_ks_test
if gmm is None:
gmm = self.gm_model
if agg_type is None:
path_prefix = f'img/D_visualization/{self.aldi_name}/'
else:
path_prefix = f'img/D_visualization/{self.aldi_name}/{agg_type}/'
assert (df_true_labels.shape[0] == df_ks_results.shape[0]), 'same length please'
df_ks_results_D = df_ks_results[[gmm_data]]
df_ks_results_D_spez = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels.columns)
for col in df_ks_results_D_spez.columns:
df_ks_results_D_spez[col] = df_ks_results_D[gmm_data]
assert (df_true_labels.shape == df_ks_results_D_spez.shape)
df_D_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels.columns)
df_D_non_discord = pd.DataFrame(index=df_ks_results_D.index,
columns=df_true_labels.columns)
for col in df_D_discord.columns:
df_D_discord[col] = np.where(df_true_labels[col] == 1,
df_ks_results_D_spez[col],
math.nan)
df_D_non_discord[col] = np.where(df_true_labels[col] == 0,
df_ks_results_D_spez[col],
math.nan)
list_D_non_discord = df_D_non_discord.values.flatten()
list_D_discord = df_D_discord.values.flatten()
cleaned_list_D_non_discord = \
[x for x in list_D_non_discord if str(x) != 'nan']
cleaned_list_D_discord = \
[x for x in list_D_discord if str(x) != 'nan']
#### HERE THE PLOTTING BEGINNS ###
fontsize=22
# first # ONLY HISTOGRAMMS
x_values = np.linspace(0, 1, 1000)
logprob = gmm.score_samples(x_values.reshape(-1, 1))
responsibilities = gmm.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Histogram of the Distance Values from the KS Test'
f' (Site {self.site_id})',
fontsize=fontsize+4)
try:
axes.hist([cleaned_list_D_non_discord,
cleaned_list_D_discord],
100,
density=False,
histtype='stepfilled',
alpha=0.7,
range=(0,1),
stacked=True,
label=['non-discord','discord'])
except AttributeError:
print('ooouw that hurts')
#axes.plot(x_values, pdf, '-k')
#axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Frequency', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}site_{self.site_id}.png',
format='PNG')
plt.clf()
# second # COMBINED PLOT
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Histogram and Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=fontsize+4)
try:
axes.hist([cleaned_list_D_non_discord,
cleaned_list_D_discord],
100,
density=True,
histtype='stepfilled',
alpha=0.7,
range=(0,1),
stacked=True,
label=['non-discord','discord'])
except AttributeError:
print('ooouw that hurts')
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}density_site_{self.site_id}.png',
format='PNG')
plt.clf()
# third # ONLY GMM
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=20)
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}gmm_site_{self.site_id}.png',
format='PNG')
plt.clf()
plt.close('all')
# third # GMM + unlabeled Histo
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=fontsize+4)
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}gmm_site_{self.site_id}.png',
format='PNG')
plt.clf()
# forth # COMBINED PLOT (UNLABELED)
figure, axes = plt.subplots(nrows=1,
ncols=1,
figsize=(18, 6))
figure.patch.set_facecolor('white')
figure.suptitle(f'Histogram and Trained GMM of the Distance Values from the KS Test'
f' (site {self.site_id})',
fontsize=fontsize+4)
try:
axes.hist((cleaned_list_D_non_discord + cleaned_list_D_discord),
100,
density=True,
histtype='stepfilled',
alpha=0.7,
range=(0,1),
stacked=True)
except AttributeError:
print('ooouw that hurts')
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
axes.legend(loc='upper right', prop={'size': fontsize})
axes.tick_params(labelsize=fontsize)
plt.xlabel('Distance Value', fontsize=fontsize+2)
plt.ylabel('Density', fontsize=fontsize+2)
figure.savefig(f'{path_prefix}unlabeled_site_{self.site_id}.png',
format='PNG')
plt.clf()
plt.close('all')
def plot_common_pD( self,
df_true_labels,
agg_type='',
df_ks_results=None):
"""
method does something
Parameters:
----------
df_true_labels: pandas.DataFrame , required
text
agg_type: string , optional
text
df_ks_results: pandas.DataFrame , optional
text
gmm: sklearn.mixture.GaussianMixture , optional
text
Returns:
----------
Method saves a plot.
"""
if df_ks_results is None:
df_ks_results = self.df_ks_test
# columns = ['D','p']
# index = timestamps, DatetimeIndex, either hourly or daily
if agg_type is None:
path_prefix = f'img/D_visualization/{self.aldi_name}/'
else:
path_prefix = f'img/D_visualization/{self.aldi_name}/{agg_type}/'
assert (df_true_labels.shape[0] == df_ks_results.shape[0]), 'same length please'
df_all_dat = pd.DataFrame(columns=['date', 'D', 'p', 'label'])
# build one dataframe with following structure:
# columsn = ['date', 'D', 'p', 'label']
# index = range(N) (N = number of ks-results
# * number of buildings within the site)
# dataframe units all labesl & KS results within single columns
for label_col in df_true_labels.columns:
# Prepare true label df
df_label_tmp = df_true_labels[[label_col]].copy()
df_label_tmp['date'] = df_label_tmp.index
df_label_tmp = df_label_tmp.reset_index(drop=True)
# Prepare KS test result
df_ks_tmp = df_ks_results.copy()
df_ks_tmp['date'] = df_ks_tmp.index
df_ks_tmp['date'] = df_ks_tmp['date'].dt.date
df_ks_tmp = df_ks_tmp.reset_index(drop=True)
df_both_tmp = df_ks_tmp.merge(df_label_tmp, how='inner', on='date')
df_both_tmp = df_both_tmp.rename(columns={label_col: 'label'})
df_all_dat = df_all_dat.append(df_both_tmp, ignore_index=True)
# Create 2D plots
self._creat_single_pD_2D(df_all_dat, path_prefix)
self._creat_common_pD_2D(df_all_dat, path_prefix)
# Create 3D plots
self._creat_pD_3D(df_all_dat, path_prefix)
plt.close('all')
####################################################################
# ||| Support methods for access from outside ||| #
# VVV VVV #
####################################################################
def _creat_single_pD_2D(self, df_all_dat, path_prefix):
# Data preparation
df_ks_true_discord = df_all_dat.query('label == 1')
df_ks_true_non_discord = df_all_dat.query('label == 0')
# set plotting parameters
colors = ['red', 'blue']
markers = ['o', '^']
labels = ['true_discord', 'true_non_discord']
#### FIRST PLOT: DISCORD SCATTER
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,
title=(f'Joint visualisation (2D) of D and p values; '
f'only discords (site {self.site_id})'))
ax.set_xlabel('D-value')
ax.set_ylabel('p-value')
ax.set_xlim(0, 1) #D
ax.set_ylim(0, 1) #p
scatter_dis = ax.scatter(x=df_ks_true_discord['D'],
y=df_ks_true_discord['p'],
color=colors[0],
alpha=0.3,
marker=markers[0])
ax.legend( [scatter_dis],
[labels[0]],
numpoints = 1)
fig.savefig(f'{path_prefix}pD_2D_discord_site_{self.site_id}.png',
format='PNG')
plt.clf()
#### SECOND PLOT: NON DISCORD SCATTER
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,
title=(f'Joint visualisation (2D) of D and p values; '
f'only non-discords (site {self.site_id})'))
ax.set_xlabel('D-value')
ax.set_ylabel('p-value')
ax.set_xlim(0, 1) #D
ax.set_ylim(0, 1) #p
scatter_non_dis = ax.scatter(x=df_ks_true_non_discord['D'],
y=df_ks_true_non_discord['p'],
color=colors[1],
alpha=0.3,
marker=markers[1])
ax.legend( [scatter_non_dis],
[labels[1]],
numpoints = 1)
fig.savefig(f'{path_prefix}pD_2D_non_discord_site_{self.site_id}.png',
format='PNG')
plt.clf()
def _creat_common_pD_2D(self, df_all_dat, path_prefix):
# Data preparation
df_ks_true_discord = df_all_dat.query('label == 1')
df_ks_true_non_discord = df_all_dat.query('label == 0')
# set plotting parameters
colors = ['red', 'blue']
markers = ['o', '^']
labels = ['true_discord', 'true_non_discord']
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,
title=(f'Joint visualisation (2D) of D and p values '
f'(site {self.site_id})'))
ax.set_xlabel('D-value')
ax.set_ylabel('p-value')
ax.set_xlim(0, 1) #D
ax.set_ylim(0, 1) #p
scatter_dis = ax.scatter(x=df_ks_true_discord['D'],
y=df_ks_true_discord['p'],
color=colors[0],
alpha=0.3,
marker=markers[0])
scatter_non_dis = ax.scatter(x=df_ks_true_non_discord['D'],
y=df_ks_true_non_discord['p'],
color=colors[1],
alpha=0.3,
marker=markers[1])
ax.legend( [scatter_dis, scatter_non_dis],
[labels[0], labels[1]],
numpoints = 1)
fig.savefig(f'{path_prefix}pD_2D_site_{self.site_id}.png',
format='PNG')
plt.clf()
def _creat_pD_3D(self, df_all_dat, path_prefix):
# Data preparation
df_ks_true_discord = df_all_dat.query('label == 1')
df_ks_true_non_discord = df_all_dat.query('label == 0')
# set plotting parameters
scale_x = 1 # true discord labels
scale_y = 3 # D value
scale_z = 3 # p value
colors = ['red', 'blue']
markers = ['o', '^']
labels = ['true_discord', 'true_non_discord']
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(111,
title=(f'Joint visualisation of D and p '
f'values (site {self.site_id})'),
projection='3d')
ax.set_xlim(0, 1)
ax.set_xticks([0,1]) # true discord labels
ax.set_ylim(0, 1) # D value
ax.set_zlim(0, 1) # p value
ax.set_xlabel('Discord Label')
ax.set_ylabel('D-value')
ax.set_zlabel('p-value')
# scale the plot if wanted - did not look quite good
#ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([scale_x, scale_y, scale_z, 2]))
# create discord scatter
ax.scatter(ys=df_ks_true_discord['D'],
zs=df_ks_true_discord['p'],
xs=df_ks_true_discord['label'],
color=colors[0],
alpha=0.3,
marker=markers[0])
# create non discord scatter
ax.scatter(ys=df_ks_true_non_discord['D'],
zs=df_ks_true_non_discord['p'],
xs=df_ks_true_non_discord['label'],
color=colors[1],
alpha=0.3,
marker=markers[1])
# Add legend - need some hidden plots -.-
scatter1_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=colors[0], marker = markers[0])
scatter2_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=colors[1], marker = markers[1])
ax.legend([scatter1_proxy, scatter2_proxy], [labels[0], labels[1]], numpoints = 1)
fig.savefig(f'{path_prefix}pD_3D_site_{self.site_id}.png',
format='PNG')
plt.clf()
def _gmm_train(self):
"""
trains several GM models based on the given data (train_data)
and returns the best one (evaluated by AIC) (best_gmm)
Also returns a dataframe with a summary of the different
GMM components (gmm_components)
"""
train_data = self._data_for_gmm_training()
y_values = np.array([[val] for val in train_data])
N = np.arange(1, (self.gmm_max_comp + 1))
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GaussianMixture(N[i]).fit(y_values)
AIC = [m.aic(y_values) for m in models]
#BIC = [m.bic(y_values) for m in models]
best_gmm = models[np.argmin(AIC)]
gmm_components = pd.DataFrame(columns=['component',
'gauss_mean',
'gauss_covariance'])
gmm_components['component'] = list(range(0, best_gmm.n_components))
gmm_components['gauss_mean'] = best_gmm.means_
gmm_components['gauss_covariance'] = best_gmm.covariances_.reshape(-1,1)
if self.verbose:
print(f'calculated GMM')
print(f'components:\n {gmm_components}')
return best_gmm, gmm_components
def _plot_gmm_results(self, gmm):
"""
Method prepares a plot. On it you can see the PDF (Probability
density function) trained by the given GMM (black line). In
addition, the profiles of the individual components of the GMM
are displayed (colored lines).
If the original data on which the GMM was trained are also
given, a histogram is shown in the background.
"""
x_values = np.linspace(0, 1, 1000)
y_values = self._data_for_gmm_training()
logprob = gmm.score_samples(x_values.reshape(-1, 1))
responsibilities = gmm.predict_proba(x_values.reshape(-1, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
figure, axes = plt.subplots(1, 1, figsize=(20, 10))
figure.patch.set_facecolor('white')
axes.set_title(f'Trained GMM on {self.gmm_data}-values from site {self.site_id}',
fontsize=18)
axes.hist(y_values, 100, density=True,
histtype='stepfilled', alpha=0.4)
axes.plot(x_values, pdf, '-k')
axes.plot(x_values, pdf_individual)
figure.savefig(f'img/pD_evaluation/{self.gmm_data}-value_aialdi_gmm_s{self.site_id}_m{self.meter_id}_data-{self.gmm_data}.png',
format='PNG')
plt.clf()
def _data_for_gmm_training(self):
if self.gmm_data == 'D':
y_values = self.df_ks_test.D
else:
y_values = self.df_ks_test.p
return y_values
| 43,203 | 36.865031 | 135 | py |
aldiplusplus | aldiplusplus-main/vae.py | import torch
from torch import nn
from torch.utils.data import DataLoader
class VAE(nn.Module):
def __init__(self, num_input, latent_dim, hidden_size=[300, 200, 100]):
super().__init__()
self.latent_dim = latent_dim
self.num_input = num_input
self.encoder = nn.Sequential(
nn.Linear(num_input, hidden_size[0]),
nn.Tanh(),
nn.Linear(hidden_size[0], hidden_size[1]),
nn.Tanh(),
nn.Linear(hidden_size[1], hidden_size[2]),
nn.Tanh(),
nn.Linear(hidden_size[2], latent_dim),
nn.Tanh(),
)
self.mu = nn.Linear(latent_dim, latent_dim)
self.log_var= nn.Linear(latent_dim, latent_dim)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, hidden_size[2]),
nn.Tanh(),
nn.Linear(hidden_size[2], hidden_size[1]),
nn.Tanh(),
nn.Linear(hidden_size[1], hidden_size[0]),
nn.Tanh(),
nn.Linear(hidden_size[0], num_input),
nn.Tanh(),
)
def reparameterize(self, mu, log_var):
"""Reparameterization trick for backprop"""
if self.training:
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return eps*std + mu
return mu
def encode(self, x):
"""Transform input into latent dimension"""
hidden = self.encoder(x)
mu = self.mu(hidden)
log_var = self.log_var(hidden)
return mu, log_var
def forward(self, x):
mu, log_var = self.encode(x)
z = self.reparameterize(mu, log_var)
return self.decoder(z), mu, log_var
def loss_function(self, x_hat, x, mu, log_var, beta=1):
kl_loss = 0.5 * torch.sum(torch.exp(log_var) - log_var - 1 + mu**2)
mse = nn.MSELoss() # reconstruction loss
recon_loss = mse(x_hat, x)
return recon_loss + beta * kl_loss
| 1,984 | 30.015625 | 75 | py |
aldiplusplus | aldiplusplus-main/anomaly_detection.py | import warnings
import os
import sys
import logging
import yaml
import wandb
import torch
import pandas as pd
from sklearn.cluster import SpectralClustering, KMeans
from sklearn.metrics import silhouette_score
from datetime import timedelta
from collections import Counter
from matplotlib import pyplot as plt
from utils import load_variable, save_variable, get_data_chunks
from vae import VAE
from data_import_ashrae import DataImportAshrae
def warn(*args, **kwargs):
pass
warnings.warn = warn
# load config file from CLI
with open(str(sys.argv[1]), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# extract parameters from config file
name = config["name"]
seed = config["seed"]
meter_type = config["meter_type"]
data_folds = config["data_folds"]
num_input = config["num_input"]
latent_dim = config["latent_dim"]
batch_size = config["batch_size"]
hidden_size = config["hidden_size"]
learning_rate = config["learning_rate"]
epochs = config["epochs"]
# global variables
k_range = range(2, 11)
# starting wandb
wandb.init(project=name, entity="matiasqr")
config = wandb.config
config.data_folds = data_folds
config.latent_dim = latent_dim
config.batch_size = batch_size
config.hidden_size = hidden_size
config.learning_rate = learning_rate
config.epochs = epochs
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load data
print("Loading data ...")
df_metadata = DataImportAshrae().get_meta_data()
site_list = df_metadata["site_id"].unique()
site_list = [1] # DEBUG
for site in site_list:
print(f"Site {site} ...")
df_all = DataImportAshrae().get_daily_profiles(meter_type, [site])
# prepare site data
train_folds, test_folds, scaler = get_data_chunks(df_all, folds=data_folds)
df_exportable = {}
for fold in range(0, data_folds):
train_loader = torch.utils.data.DataLoader(
train_folds[fold].to_numpy(),
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed,
drop_last=True,
)
test_loader = torch.utils.data.DataLoader(
test_folds[fold].to_numpy(),
batch_size=batch_size,
shuffle=True,
worker_init_fn=seed,
drop_last=True,
)
# model
model = VAE(num_input, latent_dim, hidden_size).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
wandb.watch(model)
# training
print("Training model ...")
codes = dict(mu=list(), log_sigma2=list())
for epoch in range(0, epochs + 1):
# Training
if epoch > 0:
model.train()
train_loss = 0
for _, x in enumerate(train_loader):
x = x[:,0:-1].to(device)
# forward
x_hat, mu, logvar = model(x.float())
loss = model.loss_function(x_hat.float(), x.float(), mu, logvar)
train_loss += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log
wandb.log({f"train_loss_site{site}_fold{fold}": train_loss / len(train_loader.dataset)})
# Testing
means, logvars, labels = list(), list(), list()
with torch.no_grad():
model.eval()
test_loss = 0
for _, x in enumerate(test_loader):
x = x[:,0:-1].to(device)
# forward
x_hat, mu, logvar = model(x.float())
test_loss += model.loss_function(
x_hat.float(), x.float(), mu, logvar
).item()
# log
means.append(mu.detach())
logvars.append(logvar.detach())
# log
codes["mu"].append(torch.cat(means))
codes["log_sigma2"].append(torch.cat(logvars))
test_loss /= len(test_loader.dataset)
wandb.log({f"test_loss_site{site}_fold{fold}": test_loss})
# end of training loop
# latent space clustering with different k
print("Latent space clustering ...")
mu, _ = model.encode(torch.from_numpy(test_folds[fold].iloc[:,0:-1].to_numpy()).float())
ssi_list = []
for k in k_range:
clust_algo = KMeans(n_clusters=k, random_state=seed).fit(mu.detach())
labels = clust_algo.predict(mu.detach())
ssi = silhouette_score(mu.detach(), labels)
ssi_list.append(ssi)
wandb.log({f"ssi_site{site}_fold{fold}": ssi, 'k': k})
# latent space clustering with unique k
k = 2 # NOTE: replace accordingly
clust_algo = KMeans(n_clusters=k, random_state=seed).fit(mu.detach())
labels = clust_algo.predict(mu.detach())
# find the cluster with the least number of members
print("Finding anomalies ...")
dict_label_members = Counter(labels)
min_cluster = min(dict_label_members, key=dict_label_members.get)
# get indices of days that are members of this cluster
test_data_label = test_folds[fold].copy()
test_data_label['label'] = labels # append cluster label to data
test_data_label = test_data_label[test_data_label['label'] == min_cluster]
df_pred_labels = test_data_label.copy().reset_index(drop=False).rename(columns={"index":"timestamp"})[["timestamp","building_id"]]
df_pred_labels["is_discord"] = 1
df_pred_labels["meter"] = meter_type
# use proper format following original train data
df_left_keys = DataImportAshrae().get_train_data()
df_left_keys["timestamp"] = df_left_keys["timestamp"].astype("datetime64[ns]")
df_exportable[fold] = pd.merge(df_left_keys, df_pred_labels, how="left", on=["building_id", "meter", "timestamp"])
df_exportable[fold]["is_discord"] = df_exportable[fold]["is_discord"].fillna(0) # NaNs are padded with 0
df_exportable[fold]["is_discord"] = df_exportable[fold]["is_discord"].astype("int8")
print(f"Transforming {(df_exportable[fold][df_exportable[fold]['is_discord'] == 1]).shape[0]} daily discords to hourly ...")
# fill out remaining hours of a discord day as discords
for idx, row in df_exportable[fold][df_exportable[fold]["is_discord"] == 1].iterrows():
for h in range(1, 24):
new_time = row["timestamp"] + timedelta(hours=h)
base_idx = df_exportable[fold].index[(df_exportable[fold]["timestamp"] == new_time) & (df_exportable[fold]["meter"] == row["meter"]) & (df_exportable[fold]["building_id"] == row["building_id"])]
df_exportable[fold].loc[base_idx, "is_discord"] = 1
# end of data_folds loop
print("Merging all folds discords ...")
df_final_exportable = df_exportable[0].copy()
for fold in range(0, data_folds):
df_final_exportable["is_discord"] = df_final_exportable["is_discord"] | df_exportable[fold]["is_discord"]
# export here now the 'is_discord'
df_final_exportable["is_discord"].to_csv(f'data/pred_discord/discords_{name}.csv', index=False)
print(df_final_exportable[df_final_exportable["is_discord"] == 1])
# TODO: only export site for each buildin_site
# end of site loop | 7,508 | 37.116751 | 210 | py |
pytorch_RVAE | pytorch_RVAE-master/sample.py | import argparse
import os
import numpy as np
import torch as t
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae import RVAE
if __name__ == '__main__':
assert os.path.exists('trained_RVAE'), \
'trained model not found'
parser = argparse.ArgumentParser(description='Sampler')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--num-sample', type=int, default=10, metavar='NS',
help='num samplings (default: 10)')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE(parameters)
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
for iteration in range(args.num_sample):
seed = np.random.normal(size=[1, parameters.latent_variable_size])
result = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print(result)
print() | 1,265 | 31.461538 | 78 | py |
pytorch_RVAE | pytorch_RVAE-master/train_word_embeddings.py | import argparse
import numpy as np
import torch as t
from torch.autograd import Variable
from torch.optim import SGD
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from selfModules.neg import NEG_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='word2vec')
parser.add_argument('--num-iterations', type=int, default=1000000, metavar='NI',
help='num iterations (default: 1000000)')
parser.add_argument('--batch-size', type=int, default=10, metavar='BS',
help='batch size (default: 10)')
parser.add_argument('--num-sample', type=int, default=5, metavar='NS',
help='num sample (default: 5)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
args = parser.parse_args()
batch_loader = BatchLoader('')
params = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
neg_loss = NEG_loss(params.word_vocab_size, params.word_embed_size)
if args.use_cuda:
neg_loss = neg_loss.cuda()
# NEG_loss is defined over two embedding matrixes with shape of [params.word_vocab_size, params.word_embed_size]
optimizer = SGD(neg_loss.parameters(), 0.1)
for iteration in range(args.num_iterations):
input_idx, target_idx = batch_loader.next_embedding_seq(args.batch_size)
input = Variable(t.from_numpy(input_idx).long())
target = Variable(t.from_numpy(target_idx).long())
if args.use_cuda:
input, target = input.cuda(), target.cuda()
out = neg_loss(input, target, args.num_sample).mean()
optimizer.zero_grad()
out.backward()
optimizer.step()
if iteration % 500 == 0:
out = out.cpu().data.numpy()[0]
print('iteration = {}, loss = {}'.format(iteration, out))
word_embeddings = neg_loss.input_embeddings()
np.save('data/word_embeddings.npy', word_embeddings)
| 2,183 | 36.016949 | 116 | py |
pytorch_RVAE | pytorch_RVAE-master/train.py | import argparse
import os
import numpy as np
import torch as t
from torch.optim import Adam
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae import RVAE
if __name__ == "__main__":
if not os.path.exists('data/word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
parser = argparse.ArgumentParser(description='RVAE')
parser.add_argument('--num-iterations', type=int, default=120000, metavar='NI',
help='num iterations (default: 120000)')
parser.add_argument('--batch-size', type=int, default=32, metavar='BS',
help='batch size (default: 32)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--learning-rate', type=float, default=0.00005, metavar='LR',
help='learning rate (default: 0.00005)')
parser.add_argument('--dropout', type=float, default=0.3, metavar='DR',
help='dropout (default: 0.3)')
parser.add_argument('--use-trained', type=bool, default=False, metavar='UT',
help='load pretrained model (default: False)')
parser.add_argument('--ce-result', default='', metavar='CE',
help='ce result path (default: '')')
parser.add_argument('--kld-result', default='', metavar='KLD',
help='ce result path (default: '')')
args = parser.parse_args()
batch_loader = BatchLoader('')
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size)
rvae = RVAE(parameters)
if args.use_trained:
rvae.load_state_dict(t.load('trained_RVAE'))
if args.use_cuda:
rvae = rvae.cuda()
optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
train_step = rvae.trainer(optimizer, batch_loader)
validate = rvae.validater(batch_loader)
ce_result = []
kld_result = []
for iteration in range(args.num_iterations):
cross_entropy, kld, coef = train_step(iteration, args.batch_size, args.use_cuda, args.dropout)
if iteration % 5 == 0:
print('\n')
print('------------TRAIN-------------')
print('----------ITERATION-----------')
print(iteration)
print('--------CROSS-ENTROPY---------')
print(cross_entropy.data.cpu().numpy()[0])
print('-------------KLD--------------')
print(kld.data.cpu().numpy()[0])
print('-----------KLD-coef-----------')
print(coef)
print('------------------------------')
if iteration % 10 == 0:
cross_entropy, kld = validate(args.batch_size, args.use_cuda)
cross_entropy = cross_entropy.data.cpu().numpy()[0]
kld = kld.data.cpu().numpy()[0]
print('\n')
print('------------VALID-------------')
print('--------CROSS-ENTROPY---------')
print(cross_entropy)
print('-------------KLD--------------')
print(kld)
print('------------------------------')
ce_result += [cross_entropy]
kld_result += [kld]
if iteration % 20 == 0:
seed = np.random.normal(size=[1, parameters.latent_variable_size])
sample = rvae.sample(batch_loader, 50, seed, args.use_cuda)
print('\n')
print('------------SAMPLE------------')
print('------------------------------')
print(sample)
print('------------------------------')
t.save(rvae.state_dict(), 'trained_RVAE')
np.save('ce_result_{}.npy'.format(args.ce_result), np.array(ce_result))
np.save('kld_result_npy_{}'.format(args.kld_result), np.array(kld_result))
| 4,032 | 37.04717 | 102 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/embedding.py | import numpy as np
import torch as t
import torch.nn as nn
from torch.nn import Parameter
from .tdnn import TDNN
class Embedding(nn.Module):
def __init__(self, params, path='../../../'):
super(Embedding, self).__init__()
self.params = params
word_embed = np.load(path + 'data/word_embeddings.npy')
self.word_embed = nn.Embedding(self.params.word_vocab_size, self.params.word_embed_size)
self.char_embed = nn.Embedding(self.params.char_vocab_size, self.params.char_embed_size)
self.word_embed.weight = Parameter(t.from_numpy(word_embed).float(), requires_grad=False)
self.char_embed.weight = Parameter(
t.Tensor(self.params.char_vocab_size, self.params.char_embed_size).uniform_(-1, 1))
self.TDNN = TDNN(self.params)
def forward(self, word_input, character_input):
"""
:param word_input: [batch_size, seq_len] tensor of Long type
:param character_input: [batch_size, seq_len, max_word_len] tensor of Long type
:return: input embedding with shape of [batch_size, seq_len, word_embed_size + sum_depth]
"""
assert word_input.size()[:2] == character_input.size()[:2], \
'Word input and character input must have the same sizes, but {} and {} found'.format(
word_input.size(), character_input.size())
[batch_size, seq_len] = word_input.size()
word_input = self.word_embed(word_input)
character_input = character_input.view(-1, self.params.max_word_len)
character_input = self.char_embed(character_input)
character_input = character_input.view(batch_size,
seq_len,
self.params.max_word_len,
self.params.char_embed_size)
character_input = self.TDNN(character_input)
result = t.cat([word_input, character_input], 2)
return result
| 2,001 | 37.5 | 98 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/highway.py | import torch.nn as nn
import torch.nn.functional as F
class Highway(nn.Module):
def __init__(self, size, num_layers, f):
super(Highway, self).__init__()
self.num_layers = num_layers
self.nonlinear = [nn.Linear(size, size) for _ in range(num_layers)]
for i, module in enumerate(self.nonlinear):
self._add_to_parameters(module.parameters(), 'nonlinear_module_{}'.format(i))
self.linear = [nn.Linear(size, size) for _ in range(num_layers)]
for i, module in enumerate(self.linear):
self._add_to_parameters(module.parameters(), 'linear_module_{}'.format(i))
self.gate = [nn.Linear(size, size) for _ in range(num_layers)]
for i, module in enumerate(self.gate):
self._add_to_parameters(module.parameters(), 'gate_module_{}'.format(i))
self.f = f
def forward(self, x):
"""
:param x: tensor with shape of [batch_size, size]
:return: tensor with shape of [batch_size, size]
applies σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x)) transformation | G and Q is affine transformation,
f is non-linear transformation, σ(x) is affine transformation with sigmoid non-linearition
and ⨀ is element-wise multiplication
"""
for layer in range(self.num_layers):
gate = F.sigmoid(self.gate[layer](x))
nonlinear = self.f(self.nonlinear[layer](x))
linear = self.linear[layer](x)
x = gate * nonlinear + (1 - gate) * linear
return x
def _add_to_parameters(self, parameters, name):
for i, parameter in enumerate(parameters):
self.register_parameter(name='{}-{}'.format(name, i), param=parameter)
| 1,743 | 33.88 | 105 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/neg.py | import torch as t
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
from utils.functional import *
class NEG_loss(nn.Module):
def __init__(self, num_classes, embed_size):
"""
:param num_classes: An int. The number of possible classes.
:param embed_size: An int. Embedding size
"""
super(NEG_loss, self).__init__()
self.num_classes = num_classes
self.embed_size = embed_size
self.out_embed = nn.Embedding(self.num_classes, self.embed_size)
self.out_embed.weight = Parameter(t.FloatTensor(self.num_classes, self.embed_size).uniform_(-1, 1))
self.in_embed = nn.Embedding(self.num_classes, self.embed_size)
self.in_embed.weight = Parameter(t.FloatTensor(self.num_classes, self.embed_size).uniform_(-1, 1))
def forward(self, input_labes, out_labels, num_sampled):
"""
:param input_labes: Tensor with shape of [batch_size] of Long type
:param out_labels: Tensor with shape of [batch_size] of Long type
:param num_sampled: An int. The number of sampled from noise examples
:return: Loss estimation with shape of [batch_size]
loss defined in Mikolov et al. Distributed Representations of Words and Phrases and their Compositionality
papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf
"""
assert parameters_allocation_check(self), \
"""
Invalid CUDA options. out_embed and in_embed parameters both should be stored in the same memory
got out_embed.is_cuda = {}, in_embed.is_cuda = {}
""".format(self.out_embed.weight.is_cuda, self.in_embed.weight.is_cuda)
use_cuda = self.out_embed.weight.is_cuda
[batch_size] = input_labes.size()
input = self.in_embed(input_labes)
output = self.out_embed(out_labels)
noise = Variable(t.Tensor(batch_size, num_sampled).uniform_(0, self.num_classes - 1).long())
if use_cuda:
noise = noise.cuda()
noise = self.out_embed(noise).neg()
log_target = (input * output).sum(1).squeeze().sigmoid().log()
''' ∑[batch_size, num_sampled, embed_size] * [batch_size, embed_size, 1] ->
∑[batch_size, num_sampled] -> [batch_size] '''
sum_log_sampled = t.bmm(noise, input.unsqueeze(2)).sigmoid().log().sum(1).squeeze()
loss = log_target + sum_log_sampled
return -loss
def input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
| 2,619 | 37.529412 | 118 | py |
pytorch_RVAE | pytorch_RVAE-master/selfModules/tdnn.py | import torch as t
from torch.nn import Parameter
import torch.nn as nn
import torch.nn.functional as F
class TDNN(nn.Module):
def __init__(self, params):
super(TDNN, self).__init__()
self.params = params
self.kernels = [Parameter(t.Tensor(out_dim, self.params.char_embed_size, kW).uniform_(-1, 1))
for kW, out_dim in params.kernels]
self._add_to_parameters(self.kernels, 'TDNN_kernel')
def forward(self, x):
"""
:param x: tensor with shape [batch_size, max_seq_len, max_word_len, char_embed_size]
:return: tensor with shape [batch_size, max_seq_len, depth_sum]
applies multikenrel 1d-conv layer along every word in input with max-over-time pooling
to emit fixed-size output
"""
input_size = x.size()
input_size_len = len(input_size)
assert input_size_len == 4, \
'Wrong input rang, must be equal to 4, but {} found'.format(input_size_len)
[batch_size, seq_len, _, embed_size] = input_size
assert embed_size == self.params.char_embed_size, \
'Wrong embedding size, must be equal to {}, but {} found'.format(self.params.char_embed_size, embed_size)
# leaps with shape
x = x.view(-1, self.params.max_word_len, self.params.char_embed_size).transpose(1, 2).contiguous()
xs = [F.tanh(F.conv1d(x, kernel)) for kernel in self.kernels]
xs = [x.max(2)[0].squeeze(2) for x in xs]
x = t.cat(xs, 1)
x = x.view(batch_size, seq_len, -1)
return x
def _add_to_parameters(self, parameters, name):
for i, parameter in enumerate(parameters):
self.register_parameter(name='{}-{}'.format(name, i), param=parameter)
| 1,769 | 33.038462 | 117 | py |
pytorch_RVAE | pytorch_RVAE-master/utils/functional.py | def fold(f, l, a):
return a if (len(l) == 0) else fold(f, l[1:], f(a, l[0]))
def f_and(x, y):
return x and y
def f_or(x, y):
return x or y
def parameters_allocation_check(module):
parameters = list(module.parameters())
return fold(f_and, parameters, True) or not fold(f_or, parameters, False)
def handle_inputs(inputs, use_cuda):
import torch as t
from torch.autograd import Variable
result = [Variable(t.from_numpy(var)) for var in inputs]
result = [var.cuda() if use_cuda else var for var in result]
return result
def kld_coef(i):
import math
return (math.tanh((i - 3500)/1000) + 1)/2
| 648 | 19.28125 | 77 | py |
pytorch_RVAE | pytorch_RVAE-master/model/rvae.py | import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .decoder import Decoder
from .encoder import Encoder
from selfModules.embedding import Embedding
from utils.functional import kld_coef, parameters_allocation_check, fold
class RVAE(nn.Module):
def __init__(self, params):
super(RVAE, self).__init__()
self.params = params
self.embedding = Embedding(self.params, '')
self.encoder = Encoder(self.params)
self.context_to_mu = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
self.context_to_logvar = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
self.decoder = Decoder(self.params)
def forward(self, drop_prob,
encoder_word_input=None, encoder_character_input=None,
decoder_word_input=None, decoder_character_input=None,
z=None, initial_state=None):
"""
:param encoder_word_input: An tensor with shape of [batch_size, seq_len] of Long type
:param encoder_character_input: An tensor with shape of [batch_size, seq_len, max_word_len] of Long type
:param decoder_word_input: An tensor with shape of [batch_size, max_seq_len + 1] of Long type
:param initial_state: initial state of decoder rnn in order to perform sampling
:param drop_prob: probability of an element of decoder input to be zeroed in sense of dropout
:param z: context if sampling is performing
:return: unnormalized logits of sentence words distribution probabilities
with shape of [batch_size, seq_len, word_vocab_size]
final rnn state with shape of [num_layers, batch_size, decoder_rnn_size]
"""
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
use_cuda = self.embedding.word_embed.weight.is_cuda
assert z is None and fold(lambda acc, parameter: acc and parameter is not None,
[encoder_word_input, encoder_character_input, decoder_word_input],
True) \
or (z is not None and decoder_word_input is not None), \
"Invalid input. If z is None then encoder and decoder inputs should be passed as arguments"
if z is None:
''' Get context from encoder and sample z ~ N(mu, std)
'''
[batch_size, _] = encoder_word_input.size()
encoder_input = self.embedding(encoder_word_input, encoder_character_input)
context = self.encoder(encoder_input)
mu = self.context_to_mu(context)
logvar = self.context_to_logvar(context)
std = t.exp(0.5 * logvar)
z = Variable(t.randn([batch_size, self.params.latent_variable_size]))
if use_cuda:
z = z.cuda()
z = z * std + mu
kld = (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean().squeeze()
else:
kld = None
decoder_input = self.embedding.word_embed(decoder_word_input)
out, final_state = self.decoder(decoder_input, z, drop_prob, initial_state)
return out, final_state, kld
def learnable_parameters(self):
# word_embedding is constant parameter thus it must be dropped from list of parameters for optimizer
return [p for p in self.parameters() if p.requires_grad]
def trainer(self, optimizer, batch_loader):
def train(i, batch_size, use_cuda, dropout):
input = batch_loader.next_batch(batch_size, 'train')
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
logits, _, kld = self(dropout,
encoder_word_input, encoder_character_input,
decoder_word_input, decoder_character_input,
z=None)
logits = logits.view(-1, self.params.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
loss = 79 * cross_entropy + kld_coef(i) * kld
optimizer.zero_grad()
loss.backward()
optimizer.step()
return cross_entropy, kld, kld_coef(i)
return train
def validater(self, batch_loader):
def validate(batch_size, use_cuda):
input = batch_loader.next_batch(batch_size, 'valid')
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
logits, _, kld = self(0.,
encoder_word_input, encoder_character_input,
decoder_word_input, decoder_character_input,
z=None)
logits = logits.view(-1, self.params.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
return cross_entropy, kld
return validate
def sample(self, batch_loader, seq_len, seed, use_cuda):
seed = Variable(t.from_numpy(seed).float())
if use_cuda:
seed = seed.cuda()
decoder_word_input_np, decoder_character_input_np = batch_loader.go_input(1)
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
result = ''
initial_state = None
for i in range(seq_len):
logits, initial_state, _ = self(0., None, None,
decoder_word_input, decoder_character_input,
seed, initial_state)
logits = logits.view(-1, self.params.word_vocab_size)
prediction = F.softmax(logits)
word = batch_loader.sample_word_from_distribution(prediction.data.cpu().numpy()[-1])
if word == batch_loader.end_token:
break
result += ' ' + word
decoder_word_input_np = np.array([[batch_loader.word_to_idx[word]]])
decoder_character_input_np = np.array([[batch_loader.encode_characters(word)]])
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
return result
| 7,319 | 38.567568 | 119 | py |
pytorch_RVAE | pytorch_RVAE-master/model/encoder.py | import torch as t
import torch.nn as nn
import torch.nn.functional as F
from selfModules.highway import Highway
from utils.functional import parameters_allocation_check
class Encoder(nn.Module):
def __init__(self, params):
super(Encoder, self).__init__()
self.params = params
self.hw1 = Highway(self.params.sum_depth + self.params.word_embed_size, 2, F.relu)
self.rnn = nn.LSTM(input_size=self.params.word_embed_size + self.params.sum_depth,
hidden_size=self.params.encoder_rnn_size,
num_layers=self.params.encoder_num_layers,
batch_first=True,
bidirectional=True)
def forward(self, input):
"""
:param input: [batch_size, seq_len, embed_size] tensor
:return: context of input sentenses with shape of [batch_size, latent_variable_size]
"""
[batch_size, seq_len, embed_size] = input.size()
input = input.view(-1, embed_size)
input = self.hw1(input)
input = input.view(batch_size, seq_len, embed_size)
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
''' Unfold rnn with zero initial state and get its final state from the last layer
'''
_, (_, final_state) = self.rnn(input)
final_state = final_state.view(self.params.encoder_num_layers, 2, batch_size, self.params.encoder_rnn_size)
final_state = final_state[-1]
h_1, h_2 = final_state[0], final_state[1]
final_state = t.cat([h_1, h_2], 1)
return final_state
| 1,685 | 34.125 | 115 | py |
pytorch_RVAE | pytorch_RVAE-master/model/decoder.py | import torch as t
import torch.nn as nn
import torch.nn.functional as F
from utils.functional import parameters_allocation_check
class Decoder(nn.Module):
def __init__(self, params):
super(Decoder, self).__init__()
self.params = params
self.rnn = nn.LSTM(input_size=self.params.latent_variable_size + self.params.word_embed_size,
hidden_size=self.params.decoder_rnn_size,
num_layers=self.params.decoder_num_layers,
batch_first=True)
self.fc = nn.Linear(self.params.decoder_rnn_size, self.params.word_vocab_size)
def forward(self, decoder_input, z, drop_prob, initial_state=None):
"""
:param decoder_input: tensor with shape of [batch_size, seq_len, embed_size]
:param z: sequence context with shape of [batch_size, latent_variable_size]
:param drop_prob: probability of an element of decoder input to be zeroed in sense of dropout
:param initial_state: initial state of decoder rnn
:return: unnormalized logits of sentense words distribution probabilities
with shape of [batch_size, seq_len, word_vocab_size]
final rnn state with shape of [num_layers, batch_size, decoder_rnn_size]
"""
assert parameters_allocation_check(self), \
'Invalid CUDA options. Parameters should be allocated in the same memory'
[batch_size, seq_len, _] = decoder_input.size()
'''
decoder rnn is conditioned on context via additional bias = W_cond * z to every input token
'''
decoder_input = F.dropout(decoder_input, drop_prob)
z = t.cat([z] * seq_len, 1).view(batch_size, seq_len, self.params.latent_variable_size)
decoder_input = t.cat([decoder_input, z], 2)
rnn_out, final_state = self.rnn(decoder_input, initial_state)
rnn_out = rnn_out.contiguous().view(-1, self.params.decoder_rnn_size)
result = self.fc(rnn_out)
result = result.view(batch_size, seq_len, self.params.word_vocab_size)
return result, final_state
| 2,142 | 39.433962 | 103 | py |
semantic-abstraction | semantic-abstraction-main/generate_relevancy.py | from typing import List
from pathlib import Path
import h5py
import torch
from tqdm import tqdm
import ray
from utils import write_to_hdf5
from filelock import FileLock
import numpy as np
from CLIP.clip import ClipWrapper, saliency_configs, imagenet_templates
from dataset import synonyms, deref_h5py
import typer
import imageio
from matplotlib import pyplot as plt
import cv2
from time import time
app = typer.Typer()
def resize_and_add_data(dataset, data):
data_shape = np.array(data.shape)
dataset_shape = np.array(dataset.shape)
assert (dataset_shape[1:] == data_shape[1:]).all()
dataset.resize(dataset_shape[0] + data_shape[0], axis=0)
dataset[-data_shape[0] :, ...] = data
return [
dataset.regionref[dataset_shape[0] + i, ...]
for i in np.arange(0, data_shape[0])
]
def get_datastructure(image_shape, relevancy_shape, tsdf_dim, num_output_pts, **kwargs):
image_shape = list(image_shape)
relevancy_shape = list(relevancy_shape)
return {
"rgb": {"dtype": "uint8", "item_shape": image_shape + [3]},
"depth": {"dtype": "f", "item_shape": image_shape},
"seg": {"dtype": "i", "item_shape": image_shape},
"saliencies": {"dtype": "f", "item_shape": relevancy_shape},
"tsdf_value_pts": {"dtype": "f", "item_shape": [np.prod(tsdf_dim)]},
"tsdf_xyz_pts": {"dtype": "f", "item_shape": [np.prod(tsdf_dim), 3]},
"full_xyz_pts": {"dtype": "f", "item_shape": [num_output_pts, 3]},
"full_objid_pts": {"dtype": "i", "item_shape": [num_output_pts]},
}
def init_dataset(file_path, data_structure):
with h5py.File(file_path, mode="w") as file:
# setup
for key, data_info in data_structure.items():
file.create_dataset(
name=key,
shape=tuple([0] + data_info["item_shape"]),
dtype=data_info["dtype"],
chunks=tuple([1] + data_info["item_shape"]),
compression="gzip",
compression_opts=9,
maxshape=tuple([None] + data_info["item_shape"]),
)
@ray.remote
def generate_saliency_helper(
clip_wrapper, rgb_inputs, prompts, text_labels, scene_path, replace
):
saliencies = {
rgb_name: {
saliency_config_name: ray.get(
clip_wrapper.get_clip_saliency.remote(
img=rgb,
text_labels=text_labels,
prompts=prompts
if "imagenet_prompt_ensemble"
not in saliency_config(img_dim=min(rgb.shape[:2]))
or not saliency_config(img_dim=min(rgb.shape[:2]))[
"imagenet_prompt_ensemble"
]
else imagenet_templates,
**saliency_config(img_dim=min(rgb.shape[:2])),
)
)
for saliency_config_name, saliency_config in saliency_configs.items()
}
for rgb_name, rgb in rgb_inputs.items()
}
with FileLock(scene_path + ".lock"):
with h5py.File(scene_path, mode="a") as f:
saliency_group = f["data"].create_group("saliencies")
for rgb_name, rgb_saliencies in saliencies.items():
for (
saliency_config_name,
(config_saliency, text_label_features),
) in rgb_saliencies.items():
storage_dims = np.array(f["saliencies"].shape)[1:]
config_saliency = torch.nn.functional.interpolate(
config_saliency[:, None, :, :],
size=tuple(storage_dims),
mode="nearest-exact"
# mode='bilinear',
# align_corners=False
)[:, 0]
config_saliency = torch.cat(
[config_saliency, config_saliency.mean(dim=0, keepdim=True)],
dim=0,
)
text_label_features = torch.cat(
[
text_label_features,
text_label_features.mean(dim=0, keepdim=True),
],
dim=0,
)
text_label_features /= text_label_features.norm(
dim=-1, keepdim=True
)
write_to_hdf5(
saliency_group,
key=rgb_name
+ "|"
+ saliency_config_name
+ "|saliency_text_labels",
value=np.array(text_labels + ["mean"]).astype("S"),
replace=replace,
)
write_to_hdf5(
saliency_group,
key=rgb_name
+ "|"
+ saliency_config_name
+ "|saliency_text_label_features",
value=text_label_features,
replace=replace,
)
region_references = resize_and_add_data(
dataset=f["saliencies"], data=config_saliency
)
write_to_hdf5(
saliency_group,
key=rgb_name + "|" + saliency_config_name,
dtype=h5py.regionref_dtype,
value=region_references,
replace=replace,
)
return clip_wrapper
@app.command()
def dataset(
file_path: str,
num_processes: int,
local: bool,
prompts: List[str] = ["a render of a {} in a game engine."],
replace=False,
):
if "matterport" in file_path or "nyu" in file_path:
prompts = ["a photograph of a {} in a home."]
print(prompts)
tasks = []
ray.init(log_to_driver=True, local_mode=local)
num_cuda_devices = torch.cuda.device_count()
assert num_cuda_devices > 0
print(f"[INFO] FOUND {num_cuda_devices} CUDA DEVICE")
wrapper_actor_cls = ray.remote(ClipWrapper)
available_clip_wrappers = [
wrapper_actor_cls.options(num_gpus=num_cuda_devices / num_processes).remote(
clip_model_type="ViT-B/32", device="cuda"
)
for _ in range(num_processes)
]
scene_paths = list(reversed(sorted(map(str, Path(file_path).rglob("*.hdf5")))))
if replace:
if input("Replace = True. Delete existing relevancies? [y/n]") != "y":
exit()
for scene_path in tqdm(
scene_paths, dynamic_ncols=True, desc="deleting existing relevancies"
):
try:
with h5py.File(scene_path, mode="a") as f:
for k in f["data"]:
if "salienc" in k:
del f[f"data/{k}"]
if "saliencies" in f:
data_shape = list(f["saliencies"].shape[1:])
del f["saliencies"]
f.create_dataset(
name="saliencies",
shape=tuple([0] + data_shape),
dtype="f",
chunks=tuple([1] + data_shape),
compression="gzip",
compression_opts=9,
maxshape=tuple([None] + data_shape),
)
except Exception as e:
print(e, scene_path)
exit()
for scene_path in tqdm(
scene_paths, dynamic_ncols=True, desc="generating relevancies", smoothing=0.001
):
assert len(available_clip_wrappers) > 0
try:
with h5py.File(scene_path, mode="a") as f:
scene_already_done = "saliencies" in f["data"]
if not scene_already_done or replace:
if scene_already_done:
for k in f["data"]:
if "salienc" in k:
del f[f"data/{k}"]
data_shape = f["saliencies"].shape[1:]
if "saliencies" in f:
del f["saliencies"]
f.create_dataset(
name="saliencies",
shape=tuple([0] + data_shape),
dtype="f",
chunks=tuple([1] + data_shape),
compression="gzip",
compression_opts=9,
maxshape=tuple([None] + data_shape),
)
if "data/visible_scene_obj_labels" in f:
del f["data/visible_scene_obj_labels"]
objid_to_class = np.array(f[f"data/objid_to_class"]).astype(str)
text_labels = objid_to_class.copy()
scene_has_groundtruth = (
"seg" in f["data"] and "full_objid_pts" in f["data"]
)
visible_scene_obj_labels = text_labels.copy()
if scene_has_groundtruth:
objids_in_scene = list(
set(
deref_h5py(
dataset=f["full_objid_pts"],
refs=f["data/full_objid_pts"],
)
.astype(int)
.reshape(-1)
)
- {-1}
) # remove empty
scene_object_labels = text_labels.copy()[objids_in_scene]
# remove objects which are not in view
gt_seg = deref_h5py(dataset=f["seg"], refs=f["data"]["seg"])[0]
visible_obj_ids = list(map(int, set(np.unique(gt_seg)) - {-1}))
visible_obj_labels = text_labels[visible_obj_ids]
visible_scene_obj_labels = list(
set(visible_obj_labels).intersection(
set(scene_object_labels)
)
)
visible_scene_obj_labels = list(
sorted(
set(
map(
lambda c: c.split("[")[0].lstrip().rstrip(),
visible_scene_obj_labels,
)
)
)
)
# visible_scene_obj_labels used to filter
# objects both visible and in scene
text_labels = visible_obj_labels.copy()
text_labels = set(text_labels)
# create saliency maps necessary for descriptions
if (
"descriptions" in f["data"]
and len(np.array(f["data/descriptions/spatial_relation_name"]))
> 0
):
target_obj_names = np.array(
f["data/descriptions/target_obj_name"]
).astype(str)
reference_obj_names = np.array(
f["data/descriptions/reference_obj_name"]
).astype(str)
spatial_relation_names = np.array(
f["data/descriptions/spatial_relation_name"]
).astype(str)
text_labels = text_labels.union(
target_obj_names.tolist() + reference_obj_names.tolist()
)
# gradcam for clip spatial
descriptions = ""
for desc_part in [
target_obj_names,
" ",
spatial_relation_names,
" a ",
reference_obj_names,
]:
descriptions = np.char.add(descriptions, desc_part)
text_labels = text_labels.union(descriptions)
# descriptions with synonyms
descriptions = ""
for desc_part in [
np.array(
list(
map(
lambda x: x
if x not in synonyms.keys()
else synonyms[x],
target_obj_names,
)
)
),
" ",
spatial_relation_names,
" a ",
np.array(
list(
map(
lambda x: x
if x not in synonyms.keys()
else synonyms[x],
reference_obj_names,
)
)
),
]:
descriptions = np.char.add(descriptions, desc_part)
text_labels = text_labels.union(descriptions)
text_labels = set(
map(lambda c: c.split("[")[0].lstrip().rstrip(), text_labels)
)
# do synonyms
text_labels = text_labels.union(
map(
lambda text_label: synonyms[text_label],
filter(
lambda text_label: text_label in synonyms, text_labels
),
)
)
for remove_label in {"unlabelled", "empty", "out of bounds"}:
if remove_label in text_labels:
text_labels.remove(remove_label)
text_labels = list(sorted(text_labels))
rgb_inputs = {"rgb": np.array(f["rgb"][f["data"]["rgb"][0]][0])}
if (
"domain_randomized_rgb" in f["data"]
and len(np.array(f["data/domain_randomized_rgb"])[0].shape) > 1
):
rgb_inputs["domain_randomized_rgb"] = np.array(
f["data/domain_randomized_rgb"]
)[0]
write_to_hdf5(
f["data"],
key="visible_scene_obj_labels",
value=np.array(visible_scene_obj_labels).astype("S"),
replace=replace,
)
clip_wrapper = available_clip_wrappers.pop()
tasks.append(
generate_saliency_helper.remote(
clip_wrapper=clip_wrapper,
scene_path=scene_path,
rgb_inputs=rgb_inputs,
text_labels=text_labels,
prompts=prompts,
replace=replace,
)
)
except Exception as e:
print(e)
print(scene_path, "invalid hdf5 file")
if len(available_clip_wrappers) == 0:
readies, tasks = ray.wait(tasks, num_returns=1)
num_readies = len(readies)
try:
available_clip_wrappers.extend(ray.get(readies))
except Exception as e:
print(e)
available_clip_wrappers.extend(
[
wrapper_actor_cls.options(
num_gpus=num_cuda_devices / num_processes
).remote(clip_model_type="ViT-B/32", device="cuda")
for _ in range(num_readies)
]
)
ray.get(tasks)
@app.command()
def image(
file_path: str = typer.Argument(
default="matterport.png", help="path of image file"
),
labels: List[str] = typer.Option(
default=[
"basketball jersey",
"nintendo switch",
"television",
"ping pong table",
"vase",
"fireplace",
"abstract painting of a vespa",
"carpet",
"wall",
],
help='list of object categories (e.g.: "nintendo switch")',
),
prompts: List[str] = typer.Option(
default=["a photograph of a {} in a home."],
help="prompt template to use with CLIP.",
),
):
"""
Generates a multi-scale relevancy for image at `file_path`.
"""
img = np.array(imageio.imread(file_path))
assert img.dtype == np.uint8
h, w, c = img.shape
start = time()
grads = ClipWrapper.get_clip_saliency(
img=img,
text_labels=np.array(labels),
prompts=prompts,
**saliency_configs["ours"](h),
)[0]
print(f"get gradcam took {float(time() - start)} seconds", grads.shape)
grads -= grads.mean(axis=0)
grads = grads.cpu().numpy()
fig, axes = plt.subplots(3, 3)
axes = axes.flatten()
vmin = 0.002
cmap = plt.get_cmap("jet")
vmax = 0.008
for ax, label_grad, label in zip(axes, grads, labels):
ax.axis("off")
ax.imshow(img)
ax.set_title(label, fontsize=12)
grad = np.clip((label_grad - vmin) / (vmax - vmin), a_min=0.0, a_max=1.0)
colored_grad = cmap(grad)
grad = 1 - grad
colored_grad[..., -1] = grad * 0.7
ax.imshow(colored_grad)
plt.tight_layout(pad=0)
plt.savefig("grads.png")
print("dumped relevancy to grads.png")
plt.show()
if __name__ == "__main__":
app()
| 18,591 | 39.77193 | 88 | py |
semantic-abstraction | semantic-abstraction-main/train_vool.py | from typing import Dict, Tuple, Union
import numpy as np
from dataset import ObjectLocalizationDataset
from net import (
SemAbsVOOL,
ClipSpatialVOOL,
SemanticAwareVOOL,
)
import utils
from torch.nn.functional import binary_cross_entropy_with_logits
import torch
import pandas as pd
def get_detailed_stats(
prediction,
gt_label,
xyz_pts,
scene_ids,
target_obj_names,
reference_obj_names,
spatial_relation_names,
scene_bounds,
ignore_pts,
detailed_analysis=False,
eval_device="cuda",
**kwargs,
):
num_scenes, num_descs = gt_label.shape[:2]
retvals = {
"scene_id": np.array([[scene_id] * num_descs for scene_id in scene_ids])
.reshape(-1)
.tolist(),
"target_obj_name": np.array(target_obj_names).T.reshape(-1).tolist(),
"reference_obj_name": np.array(reference_obj_names).T.reshape(-1).tolist(),
"spatial_relation_name": np.array(spatial_relation_names)
.T.reshape(-1)
.tolist(),
}
retvals.update(
{
f"point_{k}": v
for k, v in utils.prediction_analysis(
prediction=prediction.to(eval_device),
label=gt_label.to(eval_device),
ignore=ignore_pts.to(eval_device),
).items()
}
)
num_desc_b = 10
outputs = []
for i in np.arange(0, num_descs + num_desc_b + 1, num_desc_b):
if np.prod(prediction[:, i : i + num_desc_b].shape) == 0:
continue
outputs.append(
utils.voxelize_points(
prediction=prediction[:, i : i + num_desc_b],
label=gt_label[:, i : i + num_desc_b],
xyz_pts=xyz_pts[:, i : i + num_desc_b],
voxel_shape=(32, 32, 32),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts[:, i : i + num_desc_b],
device=eval_device,
)
)
voxelized_pts = {
k: torch.cat([output[k] for output in outputs], dim=1)
for k in outputs[0].keys()
}
retvals.update(
{
"voxel32x32x32_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
if detailed_analysis:
outputs = []
for i in np.arange(0, num_descs + num_desc_b + 1, num_desc_b):
if np.prod(prediction[:, i : i + num_desc_b].shape) == 0:
continue
outputs.append(
utils.voxelize_points(
prediction=prediction[:, i : i + num_desc_b],
label=gt_label[:, i : i + num_desc_b],
xyz_pts=xyz_pts[:, i : i + num_desc_b],
voxel_shape=(64, 64, 64),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts[:, i : i + num_desc_b],
device=eval_device,
)
)
voxelized_pts = {
k: torch.cat([output[k] for output in outputs], dim=1)
for k in outputs[0].keys()
}
retvals.update(
{
"voxel64x64x64_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
for i, spatial_relation in enumerate(
np.array(spatial_relation_names).T.reshape(-1)
):
if spatial_relation == "[pad]": # skip padding classes
for k in retvals.keys():
if "voxel" in k or "point" in k:
retvals[k][i] = np.NAN
return pd.DataFrame.from_dict(retvals)
def get_losses(
net, batch: dict, cutoffs=[-2.0], balance_positive_negative: bool = False, **kwargs
) -> Tuple[Dict[str, Union[float, torch.Tensor]], pd.DataFrame]:
stats = {}
batch_size, total_num_descs, num_pts = batch["output_label_pts"].shape
if num_pts <= 500000:
outputs = net(**batch)
else:
num_descs = 1
# probably CUDA OOM
outputs = torch.cat(
[
net(
**{
**batch,
"input_target_saliency_pts": batch["input_target_saliency_pts"][
:, desc_i * num_descs : (desc_i + 1) * num_descs, ...
],
"input_reference_saliency_pts": batch[
"input_reference_saliency_pts"
][:, desc_i * num_descs : (desc_i + 1) * num_descs, ...],
"input_description_saliency_pts": batch[
"input_description_saliency_pts"
][:, desc_i * num_descs : (desc_i + 1) * num_descs, ...],
"output_xyz_pts": batch["output_xyz_pts"][
:, desc_i * num_descs : (desc_i + 1) * num_descs, ...
],
"spatial_relation_name": (
np.array(batch["spatial_relation_name"])
.T[:, desc_i * num_descs : (desc_i + 1) * num_descs]
.T
),
}
)
for desc_i in range(total_num_descs // num_descs + 1)
if np.prod(
batch["output_xyz_pts"][
:, desc_i * num_descs : (desc_i + 1) * num_descs, ...
].shape
)
> 0
],
dim=1,
)
padding_mask = torch.from_numpy(
np.array(batch["spatial_relation_name"]).T == "[pad]"
).bool()
ignore_pts_mask = torch.zeros_like(outputs).bool()
# ignore all padding labels
ignore_pts_mask[padding_mask] = True
# ignore all points out of bounds
ignore_pts_mask = torch.logical_or(ignore_pts_mask, batch["out_of_bounds_pts"])
stats["loss"] = binary_cross_entropy_with_logits(
outputs,
batch["output_label_pts"],
weight=utils.get_bce_weight(
output_label_pts=batch["output_label_pts"],
balance_positive_negative=balance_positive_negative,
),
)
with torch.no_grad():
accuracy = ((outputs > 0.0).long() == batch["output_label_pts"]).float()[
~ignore_pts_mask
]
stats["accuracy"] = accuracy.mean()
detailed_stats = [
get_detailed_stats(
prediction=outputs > cutoff,
gt_label=batch["output_label_pts"].bool(),
xyz_pts=batch["output_xyz_pts"],
ignore_pts=ignore_pts_mask,
target_obj_names=batch["target_obj_name"],
reference_obj_names=batch["reference_obj_name"],
spatial_relation_names=batch["spatial_relation_name"],
scene_ids=batch["scene_id"],
eval_device=net.device,
**kwargs,
)
for cutoff in cutoffs
]
for detailed_stat, cutoff in zip(detailed_stats, cutoffs):
detailed_stat["cutoff"] = [cutoff] * len(detailed_stat)
detailed_stats = pd.concat(detailed_stats)
for k in detailed_stats.columns:
if "iou" in k:
stats[k] = detailed_stats[k].mean()
return stats, detailed_stats
approach = {
"semantic_abstraction": SemAbsVOOL,
"semantic_aware": SemanticAwareVOOL,
"clip_spatial": ClipSpatialVOOL,
}
if __name__ == "__main__":
parser = utils.config_parser()
parser.add_argument("--log", type=str, required=True)
parser.add_argument(
"--approach", choices=approach.keys(), default="semantic_abstraction"
)
args = parser.parse_args()
if args.approach == "semantic_aware":
args.network_inputs = ["rgb"]
utils.train(
get_losses_fn=get_losses,
**utils.setup_experiment(
args=args,
net_class=approach[args.approach],
dataset_class=ObjectLocalizationDataset,
split_file_path=args.file_path + "/vool_split.pkl",
),
**vars(args),
)
| 8,243 | 34.230769 | 88 | py |
semantic-abstraction | semantic-abstraction-main/utils.py | from __future__ import annotations
import os
import pickle
import signal
from typing import Optional, Tuple, Type
import numpy as np
import pandas as pd
import torch
from torch.backends import cudnn
from tqdm import tqdm
from transformers import get_scheduler
from argparse import ArgumentParser
import random
from CLIP.clip import saliency_configs
from net import VirtualGrid
from tensorboardX import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from torchtyping import TensorType, patch_typeguard
from arm.optim.lamb import Lamb
from typeguard import typechecked
import logging
from dataset import SceneUnderstandDataset
from rich.logging import RichHandler
logging.basicConfig(
level=logging.INFO, format="%(message)s", datefmt="[%X]", handlers=[RichHandler()]
)
patch_typeguard() # use before @typechecked
def config_parser():
parser = ArgumentParser()
parser.add_argument("--file_path", type=str, required=True)
parser.add_argument("--voxel_shape", type=int, default=[128, 128, 128])
parser.add_argument("--load", type=str)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--num_warmup_steps", type=int, default=1024)
parser.add_argument("--save_freq", type=int, default=1)
parser.add_argument("--eval_freq", type=int, default=5)
parser.add_argument("--gpus", type=str, nargs="+", default="0")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--epochs", type=int, default=200)
parser.add_argument("--num_descs", type=int, default=4)
parser.add_argument("--saliency_vmin", type=float, default=None)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=0.00001)
parser.add_argument("--grad_max_norm", type=float, default=2.0)
parser.add_argument("--xyz_pts_noise", type=float, default=0.0)
parser.add_argument("--num_input_pts", type=int, default=80000)
parser.add_argument("--num_output_pts", type=int, default=400000)
parser.add_argument("--pointing_dim", type=int, default=64)
parser.add_argument("--unet_f_maps", type=int, default=16)
parser.add_argument("--unet_num_channels", type=int, default=16)
parser.add_argument("--unet_num_groups", type=int, default=8)
parser.add_argument("--unet_num_levels", type=int, default=6)
parser.add_argument("--num_patches", type=int, default=4)
parser.add_argument("--patch_mask_cutoff", type=float, default=0.004)
parser.add_argument("--domain_randomization", action="store_true", default=True)
parser.add_argument("--use_pts_feat_extractor", action="store_true", default=True)
parser.add_argument("--pts_feat_extractor_hidden_dim", type=int, default=128)
parser.add_argument("--subtract_mean_relevancy", action="store_true", default=True)
parser.add_argument("--offset_patch_mask", action="store_true", default=False)
parser.add_argument(
"--balance_positive_negative", action="store_true", default=False
)
parser.add_argument(
"--balance_spatial_relations", action="store_true", default=True
)
parser.add_argument(
"--always_replace_subsample_pts", action="store_true", default=False
)
parser.add_argument("--balance_spatial_sampling", action="store_true", default=True)
parser.add_argument("--decoder_concat_xyz_pts", action="store_true", default=True)
parser.add_argument("--use_amp", action="store_true", default=False)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--dr_pos", type=float, default=0.1)
parser.add_argument("--dr_orn", type=float, default=0.3)
parser.add_argument("--dr_scale", type=float, default=0.1)
parser.add_argument(
"--scene_bounds", type=list, default=[[-1.0, -1.0, -0.1], [1.0, 1.0, 1.9]]
)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument(
"--pointing_method",
choices=["cosine_sim", "dot_product", "additive"],
default="cosine_sim",
)
parser.add_argument(
"--saliency_config", choices=saliency_configs.keys(), default="ours"
)
parser.add_argument(
"--network_inputs",
nargs="+",
choices=["patch_masks", "saliency", "rgb", "tsdf"],
default=["saliency"],
)
parser.add_argument(
"--lr_scheduler_type",
choices=[
"constant",
"linear",
"cosine",
"cosine_with_restarts",
"constant_with_warmup",
],
default="cosine_with_restarts",
)
parser.add_argument("--reduce_method", choices=["max", "mean"], default="max")
return parser
def is_main_process():
if dist.is_initialized():
return dist.get_rank() == 0
return True
def setup_experiment(
args,
split_file_path: str,
net_class: Type[torch.nn.Module],
dataset_class,
tsdf_shape: Optional[Tuple[int, int, int]] = None,
return_vis: bool = False,
**kwargs,
):
if len(args.gpus) > 1:
os.environ["NCCL_P2P_DISABLE"] = "1"
dist.init_process_group(backend="nccl", init_method="env://")
signal.signal(signal.SIGINT, lambda sig, frame: dist.destroy_process_group())
if args.device == "cuda":
torch.cuda.set_device(int(args.gpus[dist.get_rank() % len(args.gpus)]))
elif args.device == "cuda":
torch.cuda.set_device(int(args.gpus[0]))
if not is_main_process():
logging.getLogger().setLevel(logging.ERROR)
else:
logging.getLogger().setLevel(logging.INFO)
if tsdf_shape is None:
tsdf_shape = args.voxel_shape
splits = pickle.load(open(split_file_path, "rb"))
logging.info("DATASET AT" + args.file_path)
logging.info(
" | ".join(
[
f"{split_name}: {len(scene_paths)}"
for split_name, scene_paths in splits.items()
]
)
)
loggers = {
k: SummaryWriter(args.log + f"/{k}") if is_main_process() else None
for k in splits.keys()
}
if is_main_process():
if os.path.exists(args.log + "/args.pkl"):
# check if it's very different
prev_args = pickle.load(open(args.log + "/args.pkl", "rb"))
logging.warning(
args.log + "/args.pkl" + " already exists. Differences are;"
)
for arg in set(map(str, vars(prev_args).items())) ^ set(
map(str, vars(args).items())
):
logging.warning(arg)
else:
pickle.dump(args, open(args.log + "/args.pkl", "wb"))
args.scene_bounds = torch.tensor(args.scene_bounds)
datasets = {
k: dataset_class(
scene_paths=splits[k],
tsdf_shape=tsdf_shape,
domain_randomized_rgb=(k == "unseen_instances_dr"),
use_synonyms=(k == "unseen_instances_synonyms"),
**{
**vars(args),
**kwargs,
**{
"domain_randomization": False
if k != "train"
else args.domain_randomization,
"return_vis": k != "train" or return_vis,
},
},
)
for k in splits.keys()
if len(splits[k]) > 0
}
training_detailed_stats = None
if os.path.exists(args.log + "/detailed_stats.pkl"):
training_detailed_stats = pickle.load(
open(args.log + "/detailed_stats.pkl", "rb")
)
net, optimizer, lr_scheduler, start_epoch, scaler = get_net(
train_dataset=datasets.get("train", None), net_class=net_class, **vars(args)
)
return {
"splits": splits,
"loggers": loggers,
"datasets": datasets,
"net": net,
"scaler": scaler,
"optimizer": optimizer,
"lr_scheduler": lr_scheduler,
"start_epoch": start_epoch,
"training_detailed_stats": training_detailed_stats,
}
def seed_all(seed=0):
logging.debug(f"SEEDING WITH {seed}")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
cudnn.benchmark = False
def get_n_params(model):
pp = 0
for p in list(model.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
def get_net(
load: str,
lr: float,
weight_decay: float,
lr_scheduler_type: str,
num_warmup_steps: int,
epochs: int,
seed: int,
net_class: Type[torch.nn.Module],
use_amp: bool,
train_dataset: Optional[SceneUnderstandDataset] = None,
**kwargs,
):
seed_all(seed)
device = kwargs["device"]
batch_size = kwargs["batch_size"]
kwargs["voxel_shape"] = tuple(kwargs["voxel_shape"])
net = net_class(**kwargs).to(device)
if dist.is_initialized():
net = DistributedDataParallel(
module=net, device_ids=[device], find_unused_parameters=True
)
logging.info(f"NUM PARAMS: {get_n_params(net)}")
optimizer = Lamb(
net.parameters(),
lr=lr,
betas=(0.9, 0.999),
weight_decay=weight_decay,
adam=False,
)
lr_scheduler = get_scheduler(
lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=epochs * (len(train_dataset) // batch_size)
if train_dataset is not None
else 1,
)
start_epoch = 0
if load is not None:
logging.info(f"loading from {load}")
ckpt = torch.load(load, map_location=device)
if dist.is_initialized():
net.load_state_dict(ckpt["net"])
else:
net.load_state_dict(
{
"module.".join(k.split("module.")[1:]): v
for k, v in ckpt["net"].items()
}
)
# net.module.steps[...] = 0
optimizer.load_state_dict(ckpt["optimizer"])
start_epoch = ckpt["epochs"]
scaler = None
if use_amp:
scaler = torch.cuda.amp.grad_scaler.GradScaler()
return net, optimizer, lr_scheduler, start_epoch, scaler
def write_to_hdf5(group, key, value, dtype=None, replace=False):
if value is None:
return
if key in group:
if replace:
del group[key]
else:
raise Exception(f"{key} already present")
if type(value) == str or type(value) == int or type(value) == float:
group.attrs[key] = value
elif type(value) == dict:
if key in group:
subgroup = group[key]
else:
subgroup = group.create_group(key)
for subgroup_key, subgroup_value in value.items():
write_to_hdf5(subgroup, subgroup_key, subgroup_value)
else:
group.create_dataset(
name=key, data=value, dtype=dtype, compression="gzip", compression_opts=9
)
def compute_grad_norm(net):
total_norm = 0.0
for p in net.parameters():
if p.grad is not None:
param_norm = p.grad.detach().data.norm(2)
total_norm += param_norm.item() ** 2
return total_norm**0.5
@typechecked
def iou(
prediction: TensorType[..., "num_points"], label: TensorType[..., "num_points"]
):
intersection = torch.logical_and(prediction, label).sum(dim=-1).float()
union = torch.logical_or(prediction, label).sum(dim=-1).float()
return intersection / union
@typechecked
def prediction_analysis(
prediction: TensorType["batch", "num_patches", "num_points"],
label: TensorType["batch", "num_patches", "num_points"],
ignore: TensorType["batch", "num_patches", "num_points"],
):
stats = {
"precision": [],
"recall": [],
"false_negative": [],
"false_positive": [],
"iou": [],
}
for b_i in range(ignore.shape[0]):
for p_i in range(ignore.shape[1]):
mask = ~ignore.bool()[b_i, p_i]
curr_label = label.bool()[b_i, p_i][mask]
positive_labels = curr_label.bool().float().sum(dim=-1)
curr_pred = prediction.bool()[b_i, p_i][mask]
positive_preds = curr_pred.bool().float().sum(dim=-1)
true_positives = (
torch.logical_and(curr_label.bool(), curr_pred.bool())
.float()
.sum(dim=-1)
)
stats["iou"].append(iou(prediction=curr_pred, label=curr_label).item())
stats["precision"].append(
true_positives.item() / positive_preds.item()
if positive_preds.item() != 0
else np.NAN
)
stats["recall"].append(
true_positives.item() / positive_labels.item()
if positive_labels.item() != 0
else np.NAN
)
stats["false_negative"].append(
torch.logical_and(curr_label, ~curr_pred).float().mean(dim=-1).item()
)
stats["false_positive"].append(
torch.logical_and(~curr_label, curr_pred).float().mean(dim=-1).item()
)
return stats
def loop(
net,
loader,
pbar,
get_losses_fn,
logger: Optional[SummaryWriter] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
scaler=None,
grad_max_norm: float = 1e5,
device: torch.device = torch.device("cuda"),
**kwargs,
):
epoch_stats = {}
detailed_stat_df = pd.DataFrame()
for batch in loader:
batch = {
k: (v.to(device) if type(v) == torch.Tensor else v)
for k, v in batch.items()
}
if optimizer:
stats, detailed_stat = get_losses_fn(net=net, batch=batch, **kwargs)
optimizer.zero_grad()
if scaler:
scaler.scale(stats["loss"]).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(net.parameters(), grad_max_norm)
scaler.step(optimizer)
scaler.update()
else:
stats["loss"].backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), grad_max_norm)
optimizer.step()
lr_scheduler.step()
if dist.is_initialized():
net.module.steps += 1
else:
net.steps += 1
stats["gradnorm"] = compute_grad_norm(net)
else:
with torch.no_grad():
stats, detailed_stat = get_losses_fn(net=net, batch=batch, **kwargs)
# sync stats and detailed_stat_df between different processes
if dist.is_initialized():
stats_vector = torch.tensor([stats[k] for k in sorted(stats.keys())]).cuda()
dist.all_reduce(stats_vector)
for k, v in zip(sorted(stats.keys()), stats_vector / dist.get_world_size()):
stats[k] = v.item()
detailed_stats = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(object_list=detailed_stats, obj=detailed_stat)
detailed_stat_df = pd.concat([detailed_stat_df] + detailed_stats)
else:
detailed_stat_df = pd.concat([detailed_stat_df, detailed_stat])
for k, v in stats.items():
v = v.item() if type(v) != float else v
if k not in epoch_stats:
epoch_stats[k] = []
epoch_stats[k].append(v)
if logger is not None and optimizer is not None:
logger.add_scalar(
k, v, net.module.steps if dist.is_initialized() else net.steps
)
if pbar is not None:
pbar.set_description(
"|".join(
f" {k}: {v*100:.02f} "
if any(
_k in k
for _k in {
"iou",
"precision",
"recall",
}
)
else f" {k}: {v:.04e} "
for k, v in stats.items()
)
)
pbar.update()
epoch_stats = {k: np.nanmean(v) for k, v in epoch_stats.items()}
if logger is not None and is_main_process():
for k, v in epoch_stats.items():
logger.add_scalar(
f"{k}_mean", v, net.module.steps if dist.is_initialized() else net.steps
)
return detailed_stat_df
def train(
log: str,
net: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
training_detailed_stats: pd.DataFrame,
start_epoch: int,
epochs: int,
datasets: dict,
loggers: dict,
splits: dict,
save_freq: int,
eval_freq: int,
num_workers: int,
batch_size: int,
get_losses_fn,
use_amp: bool,
**kwargs,
):
for curr_epoch in range(start_epoch, epochs):
if is_main_process():
logging.info(f'{"="*10} EPOCH {curr_epoch} {"="*10}')
for split, dataset in datasets.items():
if split != "train" and curr_epoch % eval_freq != 0:
continue
if split == "train":
net.train()
else:
net.eval()
if split != "train" and split != "unseen_instances":
continue
sampler = None
if dist.is_initialized():
sampler = DistributedSampler(
dataset=dataset,
shuffle=split == "train",
drop_last=split == "train",
)
sampler.set_epoch(curr_epoch)
loader = DataLoader(
dataset=dataset,
sampler=sampler,
num_workers=num_workers,
shuffle=sampler is None and split == "train",
batch_size=batch_size if split == "train" else 1,
persistent_workers=num_workers > 0,
)
try:
with torch.cuda.amp.autocast(enabled=use_amp):
detailed_stats = loop(
net=net,
loader=loader,
get_losses_fn=get_losses_fn,
**{
**kwargs,
"logger": loggers[split],
"optimizer": optimizer if split == "train" else None,
"lr_scheduler": lr_scheduler,
"pbar": tqdm(
total=len(loader),
dynamic_ncols=True,
unit="batch",
smoothing=0.01,
postfix=f"| {split.upper()} ",
)
if is_main_process()
else None,
"detailed_analysis": False,
"cutoffs": [-1.0]
if split == "train"
else np.arange(-2.7, 0, 0.3),
},
)
if is_main_process():
ckpt_path = f"{log}/latest.pth"
torch.save(
{
"net": net.state_dict(),
"optimizer": optimizer.state_dict(),
"epochs": curr_epoch + 1,
},
ckpt_path,
)
detailed_stats["epoch"] = [curr_epoch] * len(detailed_stats)
detailed_stats["split"] = [split] * len(detailed_stats)
training_detailed_stats = pd.concat(
[training_detailed_stats, detailed_stats]
)
training_detailed_stats.to_pickle(log + "/detailed_stats.pkl")
except Exception as e:
print(e)
continue
epoch_stats = training_detailed_stats[
training_detailed_stats.epoch == curr_epoch
]
if not is_main_process():
continue
for split in splits.keys():
split_stats = epoch_stats[epoch_stats.split == split]
if len(split_stats) == 0:
continue
logging.info(split.upper())
for key in filter(
lambda k: any(
metric in k
for metric in {
"iou",
"precision",
"recall",
"false_negative",
"false_positive",
}
),
epoch_stats.columns,
):
if len(split_stats) == 0:
continue
best_cutoff = split_stats.groupby("cutoff").mean()[key].idxmax()
score = split_stats[split_stats.cutoff == best_cutoff][key].mean() * 100
if pd.isna(score):
continue
logging.info(
" " * 4
+ f"[{key.upper():<30}]:"
+ f"{score:>6.02f}"
+ str(best_cutoff).rjust(10)
)
logging.info("\n")
if curr_epoch % save_freq != 0 and curr_epoch != epochs - 1:
continue
ckpt_path = f"{log}/ckpt_{curr_epoch}.pth"
torch.save(
{
"net": net.state_dict(),
"optimizer": optimizer.state_dict(),
"epochs": curr_epoch + 1,
},
ckpt_path,
)
logging.info(f"Saved checkpoint to {ckpt_path}.\n")
def voxelize_points(
prediction: TensorType["batch", "num_patches", "num_points"],
label: TensorType["batch", "num_patches", "num_points"],
xyz_pts: TensorType["batch", "num_patches", "num_points", 3],
voxel_shape: Tuple[int, int, int],
scene_bounds: TensorType[2, 3],
ignore_pts: TensorType["batch", "num_patches", "num_points"],
device="cuda",
):
batch_size, num_patches, num_points = prediction.shape
prediction = prediction.to(device).float()
label = (label.to(device).float() - 0.5) * 2
xyz_pts = xyz_pts.to(device)
xyz_pts = xyz_pts[:, None, ...].view(batch_size * num_patches, num_points, 3)
# voxelize
vg = VirtualGrid(
scene_bounds=scene_bounds,
grid_shape=voxel_shape,
batch_size=batch_size * num_patches,
device=torch.device(device),
reduce_method="max",
)
voxelized_prediction = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=prediction.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
voxelized_label = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=label.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
missing_label = voxelized_label == 0.0
voxelized_label = (voxelized_label > 0).float()
ignore_vol_mask = (
vg.scatter_points(
xyz_pts=xyz_pts,
feature_pts=ignore_pts.to(device)
.float()
.view(batch_size * num_patches, -1, 1),
)
.view(batch_size, num_patches, *voxel_shape)
.bool()
)
ignore_vol_mask = torch.logical_or(ignore_vol_mask, missing_label)
return {
"prediction": (voxelized_prediction > 0).view(
batch_size, num_patches, np.prod(voxel_shape)
),
"label": voxelized_label.view(batch_size, num_patches, np.prod(voxel_shape)),
"ignore": ignore_vol_mask.view(batch_size, num_patches, np.prod(voxel_shape)),
}
@typechecked
def voxel_score(
prediction: TensorType["batch", "num_patches", "num_points"],
label: TensorType["batch", "num_patches", "num_points"],
xyz_pts: TensorType["batch", "num_patches", "num_points", 3],
voxel_shape: Tuple[int, int, int],
scene_bounds: TensorType[2, 3],
ignore_pts: TensorType["batch", "num_patches", "num_points"],
out_of_frustum_pts_mask: TensorType["batch", "num_patches", "num_points"],
score_fn=iou,
device="cuda",
):
batch_size, num_patches, num_points = prediction.shape
prediction = prediction.to(device).float()
label = (label.to(device).float() - 0.5) * 2
xyz_pts = xyz_pts.to(device)
xyz_pts = xyz_pts[:, None, ...].view(batch_size * num_patches, num_points, 3)
# voxelize
vg = VirtualGrid(
scene_bounds=scene_bounds,
grid_shape=voxel_shape,
batch_size=batch_size * num_patches,
device=torch.device(device),
reduce_method="max",
)
voxelized_prediction = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=prediction.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
voxelized_label = vg.scatter_points(
xyz_pts=xyz_pts, feature_pts=label.view(batch_size * num_patches, -1, 1)
).view(batch_size, num_patches, *voxel_shape)
missing_label = voxelized_label == 0.0
voxelized_label = (voxelized_label > 0).float()
ignore_vol_mask = (
vg.scatter_points(
xyz_pts=xyz_pts,
feature_pts=torch.logical_or(
ignore_pts.bool(), out_of_frustum_pts_mask.bool()
)
.to(device)
.float()
.view(batch_size * num_patches, -1, 1),
)
.view(batch_size, num_patches, *voxel_shape)
.bool()
)
ignore_vol_mask = torch.logical_or(ignore_vol_mask, missing_label)
result = torch.zeros((batch_size, num_patches)).float()
for b in range(batch_size):
for p in range(num_patches):
result[b, p] = score_fn(
(voxelized_prediction[b, p] > 0)[~ignore_vol_mask[b, p]].bool(),
(voxelized_label[b, p] > 0)[~ignore_vol_mask[b, p]].bool(),
)
return result
@typechecked
def get_bce_weight(
output_label_pts: TensorType["batch", "num_patches", "num_points"],
balance_positive_negative: bool,
):
weight = torch.ones_like(output_label_pts).float()
if balance_positive_negative:
weight_total = weight.sum()
# per instance
positive_mask = output_label_pts.bool()
# positive_mask.shape = BATCH x NUM PATCH x NUM PTS
batch_size, num_patches, num_pts = positive_mask.shape
percent_positive = positive_mask.float().mean(dim=2).view(-1)
percent_negative = 1 - percent_positive
weight = weight.view(-1, num_pts)
positive_mask = positive_mask.view(-1, num_pts)
# TODO vectorize this
assert len(weight) == batch_size * num_patches
for i in range(len(weight)):
weight[i, positive_mask[i]] = 1.0 / (percent_positive[i] + 1e-10)
weight[i, ~positive_mask[i]] = 1.0 / (percent_negative[i] + 1e-10)
weight = weight.view(output_label_pts.shape)
weight *= weight_total / weight.sum()
return weight
| 27,394 | 35.526667 | 88 | py |
semantic-abstraction | semantic-abstraction-main/dataset.py | import numpy as np
import torch
from torch.utils.data import Dataset
from fusion import TSDFVolume
from point_cloud import (
check_pts_in_frustum,
filter_pts_bounds,
get_pointcloud,
)
from typing import List, Optional, Tuple
import h5py
from transforms3d import affines, euler
from torchtyping import TensorType, patch_typeguard
from typeguard import typechecked
patch_typeguard() # use before @typechecked
def deref_h5py(dataset, refs):
return np.array([dataset[ref][0] for ref in refs]).astype(np.float32)
synonyms = {
"television": "tv",
"sofa": "couch",
"house plant": "plant in a pot",
"bookcase": "bookshelf",
"baseball bat": "rawlings big stick maple bat",
"pillow": "cushion",
"arm chair": "recliner",
"bread": "loaf of sourdough",
"cell phone": "mobile phone",
"desktop": "computer",
"dresser": "wardrobe",
"dumbbell": "gym weights",
"fridge": "refridgerator",
"garbage can": "trash can",
"laptop": "computer",
"outlet": "eletric plug",
"stairs": "staircase",
}
class SceneUnderstandDataset(Dataset):
def __init__(
self,
file_path: str,
scene_bounds: TensorType[2, 3],
network_inputs: List[str],
domain_randomization: bool,
num_input_pts: int,
num_output_pts: int,
return_vis: bool,
scene_paths: List[str],
tsdf_shape: Tuple[int, int, int],
domain_randomized_rgb: bool,
offset_patch_mask: bool,
balance_spatial_relations: bool,
saliency_config: str,
use_synonyms: bool,
subtract_mean_relevancy: bool,
balance_spatial_sampling: bool,
saliency_vmin: float,
dr_pos: float,
dr_orn: float,
dr_scale: float,
xyz_pts_noise: float,
always_replace_subsample_pts: bool,
patch_mask_cutoff: float = 0.004,
load_gt: bool = True,
**kwargs,
):
# setup
self.file_path = file_path
self.keys = list(sorted(scene_paths))
self.num_input_pts = num_input_pts
self.num_output_pts = num_output_pts
self.network_inputs = network_inputs
# 3D scene
self.scene_bounds = np.array(scene_bounds)
self.tsdf_shape = tsdf_shape
# retval customization
self.domain_randomized_rgb = domain_randomized_rgb
self.return_vis = return_vis
self.domain_randomization = domain_randomization
self.subtract_mean_relevancy = subtract_mean_relevancy
self.use_synonyms = use_synonyms
self.offset_patch_mask = offset_patch_mask
self.patch_mask_cutoff = patch_mask_cutoff
self.saliency_config = saliency_config
self.saliency_vmin = saliency_vmin
self.xyz_pts_noise = xyz_pts_noise
self.balance_spatial_relations = balance_spatial_relations
self.balance_spatial_sampling = balance_spatial_sampling
self.dr_pos = dr_pos
self.dr_orn = dr_orn
self.dr_scale = dr_scale
self.load_gt = load_gt
self.always_replace_subsample_pts = always_replace_subsample_pts
def __len__(self):
return len(self.keys)
@staticmethod
@typechecked
def transform_filter_subsample(
xyz_pts,
scene_bounds,
num_subsample_pts,
subsample_probabilities,
alway_replace_pts: bool,
transform_matrix=None,
**kwargs,
):
num_pts = len(xyz_pts)
retval = {"xyz_pts": xyz_pts}
retval.update(kwargs)
if transform_matrix is not None:
# turn into homogeneous coords
xyz_pts = torch.cat((xyz_pts, torch.ones(num_pts)[:, None]), dim=-1)
xyz_pts = torch.matmul(transform_matrix, xyz_pts.permute(1, 0)).permute(
1, 0
)[..., :3]
# filter out of bounds points
in_bounds_mask = filter_pts_bounds(xyz_pts, scene_bounds).bool()
retval["xyz_pts"] = xyz_pts[in_bounds_mask, :]
subsample_probabilities = subsample_probabilities[in_bounds_mask]
subsample_probabilities /= subsample_probabilities.sum()
for k, v in kwargs.items():
if v is None:
retval[k] = None
elif v.shape[0] == len(in_bounds_mask):
retval[k] = v[in_bounds_mask, ...]
elif v.shape[1] == len(in_bounds_mask):
retval[k] = v[:, in_bounds_mask, ...]
else:
raise Exception(k, v.shape, in_bounds_mask.shape)
if num_subsample_pts == -1:
return retval
try:
# bias based on description
indices = np.random.choice(
a=len(retval["xyz_pts"]),
size=num_subsample_pts,
p=subsample_probabilities,
replace=alway_replace_pts,
)
except Exception as e:
indices = np.random.choice(
a=len(retval["xyz_pts"]),
size=num_subsample_pts,
p=subsample_probabilities,
replace=True,
)
return {
k: (
v[indices, ...]
if len(v) == len(retval["xyz_pts"])
else v[:, indices, ...]
)
if v is not None
else None
for k, v in retval.items()
}
class ObjectLocalizationDataset(SceneUnderstandDataset):
def __init__(self, num_descs: int, **kwargs):
super().__init__(**kwargs)
self.num_descs = num_descs
@staticmethod
def get_descriptions(
scene_group,
num_subsample_descs: int,
saliency_config: str,
rgb_key: str,
use_synonyms: bool,
balance_spatial_relations: bool = False,
only_return_num_descs: bool = False,
):
saliency_prefix = f"saliencies/{rgb_key}|{saliency_config}"
descriptions = dict()
desc_group = scene_group["descriptions"]
num_descs = len(desc_group["spatial_relation_name"])
descriptions["target_obj_name"] = np.array(
desc_group["target_obj_name"]
).astype(str)
descriptions["target_obj_id"] = np.array(desc_group["target_obj_id"])
descriptions["reference_obj_name"] = np.array(
desc_group["reference_obj_name"]
).astype(str)
descriptions["spatial_relation_name"] = np.array(
desc_group["spatial_relation_name"]
).astype(str)
description_sentences = ""
for desc_part in [
descriptions["target_obj_name"],
" ",
descriptions["spatial_relation_name"],
" a ",
descriptions["reference_obj_name"],
]:
description_sentences = np.char.add(description_sentences, desc_part)
if use_synonyms:
has_synonym = list(
map(
lambda sentence: any(x in sentence for x in synonyms.keys()),
description_sentences,
)
)
descriptions["target_obj_name"] = descriptions["target_obj_name"][
has_synonym
]
descriptions["target_obj_id"] = descriptions["target_obj_id"][has_synonym]
descriptions["reference_obj_name"] = descriptions["reference_obj_name"][
has_synonym
]
descriptions["spatial_relation_name"] = descriptions[
"spatial_relation_name"
][has_synonym]
description_sentences = np.array(description_sentences)[has_synonym]
num_descs = sum(has_synonym)
if only_return_num_descs:
return num_descs
desc_indices = np.arange(0, num_descs)
if num_subsample_descs != -1 and num_subsample_descs < num_descs:
p = np.ones(num_descs).astype(np.float64)
if balance_spatial_relations:
spatial_relations = np.array(
desc_group["spatial_relation_name"]
).tolist()
unique_relations = list(set(spatial_relations))
spatial_relations_ids = np.array(
list(map(lambda r: unique_relations.index(r), spatial_relations))
)
for spatial_relations_id in range(len(unique_relations)):
mask = spatial_relations_ids == spatial_relations_id
p[mask] = 1 / mask.sum()
p /= p.sum()
desc_indices = np.random.choice(
num_descs, num_subsample_descs, replace=False, p=p
)
desc_indices.sort() # hdf5 indexing must be in order
descriptions["target_obj_name"] = descriptions["target_obj_name"][desc_indices]
descriptions["target_obj_id"] = descriptions["target_obj_id"][desc_indices]
descriptions["reference_obj_name"] = descriptions["reference_obj_name"][
desc_indices
]
descriptions["spatial_relation_name"] = descriptions["spatial_relation_name"][
desc_indices
]
description_sentences = description_sentences[desc_indices]
if use_synonyms:
descriptions["target_obj_name"] = np.array(
list(
map(
lambda x: x if x not in synonyms.keys() else synonyms[x],
descriptions["target_obj_name"],
)
)
)
descriptions["reference_obj_name"] = np.array(
list(
map(
lambda x: x if x not in synonyms.keys() else synonyms[x],
descriptions["reference_obj_name"],
)
)
)
saliency_text_labels = (
np.array(scene_group[f"{saliency_prefix}|saliency_text_labels"])
.astype(str)
.tolist()
)
descriptions["target_obj_saliency_refs"] = [
scene_group[f"{saliency_prefix}"][idx]
for idx in map(
lambda obj_name: saliency_text_labels.index(obj_name),
descriptions["target_obj_name"],
)
]
descriptions["reference_obj_saliency_refs"] = [
scene_group[f"{saliency_prefix}"][idx]
for idx in map(
lambda obj_name: saliency_text_labels.index(obj_name),
descriptions["reference_obj_name"],
)
]
descriptions["description_saliency_refs"] = [
scene_group[f"{saliency_prefix}"][idx]
for idx in map(
lambda desc: saliency_text_labels.index(desc), description_sentences
)
]
num_missing_descs = num_subsample_descs - len(
descriptions["spatial_relation_name"]
)
if num_missing_descs > 0 and num_subsample_descs != -1:
descriptions["target_obj_id"] = np.array(
descriptions["target_obj_id"].tolist() + [-2] * num_missing_descs
)
descriptions["spatial_relation_name"] = np.array(
descriptions["spatial_relation_name"].tolist()
+ ["[pad]"] * num_missing_descs
)
descriptions["target_obj_name"] = np.array(
descriptions["target_obj_name"].tolist() + ["[pad]"] * num_missing_descs
)
descriptions["reference_obj_name"] = np.array(
descriptions["reference_obj_name"].tolist()
+ ["[pad]"] * num_missing_descs
)
descriptions["num_descs"] = len(descriptions["spatial_relation_name"])
return descriptions
def __getitem__(self, idx):
retvals = dict()
scene_path = self.file_path + "/" + self.keys[idx]
with h5py.File(scene_path, "r") as f:
group = f["data"]
depth = deref_h5py(dataset=f["depth"], refs=group["depth"])[0]
cam_intr = np.array(group["cam_intr"])
cam_pose = np.array(group["cam_pose"])
if self.domain_randomized_rgb:
retvals["rgb"] = np.array(group["domain_randomized_rgb"]).astype(
np.float32
)[0]
else:
retvals["rgb"] = deref_h5py(dataset=f["rgb"], refs=group["rgb"])[0]
image_shape = retvals["rgb"].shape[:2]
retvals["rgb"] = torch.from_numpy(retvals["rgb"]) / 255.0
retvals["input_xyz_pts"] = torch.from_numpy(
get_pointcloud(depth, None, cam_intr, cam_pose)[0].astype(np.float32)
)
retvals["full_objid_pts"] = None
if "full_objid_pts" in group:
retvals["output_xyz_pts"] = torch.from_numpy(
deref_h5py(dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"])[0]
)
retvals["full_objid_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_objid_pts"], refs=group["full_objid_pts"]
)[0]
)
retvals["out_of_bounds_pts"] = torch.zeros(
len(retvals["full_objid_pts"])
).float()
descriptions = self.get_descriptions(
scene_group=group,
num_subsample_descs=self.num_descs if not self.return_vis else -1,
saliency_config=self.saliency_config,
rgb_key="domain_randomized_rgb"
if self.domain_randomized_rgb
else "rgb",
use_synonyms=self.use_synonyms,
balance_spatial_relations=self.balance_spatial_relations,
)
retvals["spatial_relation_name"] = descriptions[
"spatial_relation_name"
].tolist()
# gradcam values typically between -0.02 and 0.02
# so multiply by 50
retvals["input_target_saliency_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["saliencies"],
refs=descriptions["target_obj_saliency_refs"],
)
)
retvals["input_reference_saliency_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["saliencies"],
refs=descriptions["reference_obj_saliency_refs"],
)
)
retvals["input_description_saliency_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["saliencies"],
refs=descriptions["description_saliency_refs"],
)
)
saliency_prefix = f'data/saliencies/{"domain_randomized_rgb" if self.domain_randomized_rgb else "rgb"}|{self.saliency_config}'
mean_idx = (
np.array(f[f"{saliency_prefix}|saliency_text_labels"])
.astype(str)
.tolist()
.index("mean")
)
mean_relevancy_map = (
torch.from_numpy(f["saliencies"][mean_idx]).float().squeeze()
)
for k in {
"input_target_saliency_pts",
"input_reference_saliency_pts",
"input_description_saliency_pts",
}:
if self.subtract_mean_relevancy:
retvals[k] -= mean_relevancy_map
if self.saliency_vmin is not None:
retvals[k] -= self.saliency_vmin
retvals[k][retvals[k] < 0] = 0
retvals[k] = (
torch.nn.functional.interpolate(
retvals[k][:, None, :, :],
size=tuple(image_shape),
mode="bilinear",
align_corners=False,
)
.squeeze()
.view(len(retvals[k]), -1, 1)
)
retvals[k] *= 50
if "patch_masks" in self.network_inputs:
assert "saliency" not in self.network_inputs
retvals["input_target_saliency_pts"] = (
retvals["input_target_saliency_pts"] > self.patch_mask_cutoff
).float()
retvals["input_reference_saliency_pts"] = (
retvals["input_reference_saliency_pts"] > self.patch_mask_cutoff
).float()
retvals["input_description_saliency_pts"] = (
retvals["input_description_saliency_pts"] > self.patch_mask_cutoff
).float()
retvals["input_rgb_pts"] = (
retvals["rgb"]
.view(-1, 3)[None, ...]
.repeat(len(descriptions["spatial_relation_name"]), 1, 1)
)
if len(retvals["input_target_saliency_pts"]) < len(
descriptions["spatial_relation_name"]
):
retvals["input_target_saliency_pts"] = torch.cat(
(
retvals["input_target_saliency_pts"],
torch.zeros(
len(descriptions["spatial_relation_name"])
- len(retvals["input_target_saliency_pts"]),
*list(retvals["input_target_saliency_pts"].shape)[1:],
),
),
dim=0,
)
retvals["input_reference_saliency_pts"] = torch.cat(
(
retvals["input_reference_saliency_pts"],
torch.zeros(
len(descriptions["spatial_relation_name"])
- len(retvals["input_reference_saliency_pts"]),
*list(retvals["input_reference_saliency_pts"].shape)[1:],
),
),
dim=0,
)
retvals["input_description_saliency_pts"] = torch.cat(
(
retvals["input_description_saliency_pts"],
torch.zeros(
len(descriptions["spatial_relation_name"])
- len(retvals["input_description_saliency_pts"]),
*list(retvals["input_description_saliency_pts"].shape)[1:],
),
),
dim=0,
)
retvals["output_label_pts"] = None
if "full_objid_pts" in retvals and retvals["full_objid_pts"] is not None:
output_label_pts = torch.zeros(
len(descriptions["target_obj_id"]),
len(retvals["full_objid_pts"]),
dtype=torch.float32,
)
for desc_i, target_obj_id in enumerate(descriptions["target_obj_id"]):
obj_mask = retvals["full_objid_pts"] == target_obj_id
output_label_pts[desc_i, :] = obj_mask
retvals["output_label_pts"] = output_label_pts
retvals["scene_id"] = self.keys[idx].split("/")[-1].split(".")[0]
retvals["target_obj_name"] = descriptions["target_obj_name"].tolist()
retvals["reference_obj_name"] = descriptions["reference_obj_name"].tolist()
if self.return_vis:
retvals["depth"] = depth
retvals["cam_intr"] = cam_intr
retvals["cam_pose"] = cam_pose
retvals["vis_gt_object_labels"] = (
np.array(group["objid_to_class"]).astype(str).tolist()
if "objid_to_class" in group
else []
)
if "matterport" in self.file_path or "arkit" in self.file_path:
vis_xyz_pts, vis_rgb_pts = get_pointcloud(
depth, retvals["rgb"].numpy(), cam_intr, cam_pose
)
retvals["vis_gt_objid_pts"] = torch.from_numpy(vis_rgb_pts)
retvals["vis_gt_xyz_pts"] = torch.from_numpy(vis_xyz_pts)
else:
retvals["vis_gt_objid_pts"] = retvals["full_objid_pts"]
retvals["vis_gt_xyz_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"]
)[0]
)
transform_matrix = None
if self.domain_randomization:
scene_dims = self.scene_bounds[1, :] - self.scene_bounds[0, :]
assert (scene_dims >= 0).all()
translation = torch.randn(3) * scene_dims * self.dr_pos
rotation = euler.euler2mat(
(torch.rand(1)[0] - 0.5) * self.dr_orn,
(torch.rand(1)[0] - 0.5) * self.dr_orn,
(torch.rand(1)[0] - 0.5) * self.dr_orn
# full rotation around z axis
)
scale = torch.rand(3) * self.dr_scale + 1.0
transform_matrix = torch.from_numpy(
affines.compose(T=translation, R=rotation, Z=scale).astype(np.float32)
)
# PROCESS INPUTS
kwargs = {
"transform_matrix": transform_matrix,
"scene_bounds": self.scene_bounds,
"num_subsample_pts": self.num_input_pts,
"subsample_probabilities": np.ones(len(retvals["input_xyz_pts"])).astype(
np.float64
)
/ len(retvals["input_xyz_pts"]),
"alway_replace_pts": self.always_replace_subsample_pts,
}
try:
processed_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["input_xyz_pts"],
input_target_saliency_pts=retvals["input_target_saliency_pts"],
input_reference_saliency_pts=retvals["input_reference_saliency_pts"],
input_description_saliency_pts=retvals[
"input_description_saliency_pts"
],
input_rgb_pts=retvals["input_rgb_pts"],
**kwargs,
)
except Exception as e:
kwargs["transform_matrix"] = None
processed_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["input_xyz_pts"],
input_target_saliency_pts=retvals["input_target_saliency_pts"],
input_reference_saliency_pts=retvals["input_reference_saliency_pts"],
input_description_saliency_pts=retvals[
"input_description_saliency_pts"
],
input_rgb_pts=retvals["input_rgb_pts"],
**kwargs,
)
retvals["input_xyz_pts"] = processed_pts["xyz_pts"]
retvals["input_target_saliency_pts"] = processed_pts[
"input_target_saliency_pts"
]
retvals["input_reference_saliency_pts"] = processed_pts[
"input_reference_saliency_pts"
]
retvals["input_description_saliency_pts"] = processed_pts[
"input_description_saliency_pts"
]
retvals["input_rgb_pts"] = processed_pts["input_rgb_pts"]
if "tsdf" in self.network_inputs:
voxel_size = (
(self.scene_bounds[1] - self.scene_bounds[0]) / self.tsdf_shape
).min()
tsdf_vol = TSDFVolume(vol_bnds=self.scene_bounds.T, voxel_size=voxel_size)
final_transform = cam_pose
if kwargs["transform_matrix"] is not None:
final_transform = kwargs["transform_matrix"] @ cam_pose
tsdf_vol.integrate(
color_im=retvals["rgb"].numpy(),
depth_im=depth,
cam_intr=cam_intr,
cam_pose=final_transform,
)
retvals["tsdf_vol"] = torch.from_numpy(tsdf_vol.get_volume()[0])
else:
retvals["tsdf_vol"] = torch.ones(1)
# PROCESS OUTPUTS
if "output_label_pts" in retvals and retvals["output_label_pts"] != None:
kwargs["num_subsample_pts"] = (
self.num_output_pts if not self.return_vis else -1
)
if self.balance_spatial_sampling:
desc_output_xyz_pts = []
desc_output_label_pts = []
desc_ignore_pts = []
for desc_i in range(len(retvals["output_label_pts"])):
subsample_probabilities = np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
positive_mask = retvals["output_label_pts"][desc_i].bool()
if positive_mask.any() and (not positive_mask.all()):
subsample_probabilities[positive_mask] = (
len(retvals["output_xyz_pts"]) / positive_mask.sum()
)
subsample_probabilities[~positive_mask] = (
len(retvals["output_xyz_pts"]) / (~positive_mask).sum()
)
subsample_probabilities /= subsample_probabilities.sum()
kwargs["subsample_probabilities"] = subsample_probabilities
output_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
output_label_pts=retvals["output_label_pts"][desc_i][None, :],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
**kwargs,
)
desc_output_xyz_pts.append(output_pts["xyz_pts"])
desc_output_label_pts.append(output_pts["output_label_pts"])
desc_ignore_pts.append(output_pts["out_of_bounds_pts"])
retvals["output_xyz_pts"] = torch.stack(desc_output_xyz_pts)
retvals["output_label_pts"] = torch.stack(
desc_output_label_pts
).squeeze(dim=-2)
retvals["out_of_bounds_pts"] = torch.stack(desc_ignore_pts)
else:
kwargs["subsample_probabilities"] = np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
kwargs["subsample_probabilities"] /= kwargs[
"subsample_probabilities"
].sum()
processed_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
output_label_pts=retvals["output_label_pts"],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
**kwargs,
)
retvals["output_xyz_pts"] = processed_pts["xyz_pts"]
retvals["out_of_bounds_pts"] = processed_pts["out_of_bounds_pts"]
retvals["output_xyz_pts"] = retvals["output_xyz_pts"][None].repeat(
len(processed_pts["output_label_pts"]), 1, 1
)
retvals["output_label_pts"] = processed_pts["output_label_pts"]
if self.xyz_pts_noise > 0.0:
retvals["output_xyz_pts"] += (
torch.randn_like(retvals["output_xyz_pts"]) * self.xyz_pts_noise
)
retvals["input_xyz_pts"] += (
torch.randn_like(retvals["input_xyz_pts"]) * self.xyz_pts_noise
)
retvals["out_of_frustum_pts_mask"] = torch.from_numpy(
np.stack(
[
~check_pts_in_frustum(
xyz_pts=desc_xyz_pts,
depth=depth,
cam_pose=cam_pose,
cam_intr=cam_intr,
)
for desc_xyz_pts in retvals["output_xyz_pts"]
],
axis=0,
)
).bool()
return retvals
class SceneCompletionDataset(SceneUnderstandDataset):
def __init__(self, num_patches: int, **kwargs):
super().__init__(**kwargs)
self.num_patches = num_patches
@staticmethod
def get_scene_patches(
file,
num_subsample_patches: int,
rgb_key: str,
saliency_config: str,
use_synonyms: bool,
subtract_mean_relevancy: bool,
full_objid_pts: Optional[torch.Tensor] = None,
out_of_frustum_pts_mask: Optional[torch.Tensor] = None,
only_return_num_patches: bool = False,
use_gt_seg: bool = False,
):
assert only_return_num_patches or saliency_config is not None
saliency_prefix = f"data/saliencies/{rgb_key}|{saliency_config}"
has_groundtruth = full_objid_pts is not None
scene_patches = dict()
scene_object_labels = np.array(file[f"data/objid_to_class"]).astype(str)
scene_patches["patch_labels"] = np.array(
list(map(lambda s: s.split("[")[0], scene_object_labels))
)
if has_groundtruth:
if out_of_frustum_pts_mask is not None:
scene_obj_ids = set(
full_objid_pts[~out_of_frustum_pts_mask].view(-1).long().tolist()
)
else:
scene_obj_ids = set(full_objid_pts.view(-1).long().tolist())
visible_obj_ids = set(
np.unique(
deref_h5py(dataset=file["seg"], refs=file["data/seg"])[0]
).astype(int)
) - {-1}
scene_obj_ids = scene_obj_ids.intersection(visible_obj_ids)
scene_patches["patch_labels"] = list(
set(scene_patches["patch_labels"][list(scene_obj_ids)])
- {"empty", "out of bounds", "unlabelled"}
)
if use_synonyms:
scene_patches["patch_labels"] = list(
map(
lambda x: x if x not in synonyms.keys() else synonyms[x],
scene_patches["patch_labels"],
)
)
if use_gt_seg:
assert has_groundtruth
assert not subtract_mean_relevancy
assert not use_synonyms
patch_objids = dict()
for patch_label in scene_patches["patch_labels"]:
patch_objids[patch_label] = set(
map(
lambda objid: int(objid.split("[")[1].split("]")[0]),
filter(
lambda objid: objid.split("[")[0] == patch_label,
scene_object_labels.tolist(),
),
)
)
patch_labels = np.array(list(patch_objids.keys()))
num_patches = len(patch_objids)
if num_subsample_patches != -1 and num_patches > num_subsample_patches:
indices = np.random.choice(
num_patches, num_subsample_patches, replace=False
)
patch_labels = patch_labels[indices]
patch_objids = {k: patch_objids[k] for k in patch_labels}
num_patches = len(patch_objids)
seg = deref_h5py(dataset=file["seg"], refs=file["data/seg"])[0]
scene_patches["patch_saliencies"] = []
for patch_label, objids in patch_objids.items():
# take or of all object segs
mask = np.zeros_like(seg)
for objid in objids:
mask = np.logical_or(mask, (seg == objid))
scene_patches["patch_saliencies"].append(mask)
scene_patches["patch_saliencies"] = (
torch.from_numpy(np.stack(scene_patches["patch_saliencies"])).float()
- 0.5
) / 50 # because it will be multiplied by 50 later
scene_patches["patch_labels"] = patch_labels
scene_patches["patch_objmatches"] = np.array(
[
"|".join(map(str, patch_objids[patch_label]))
for patch_label in scene_patches["patch_labels"]
]
)
scene_patches["num_patches"] = num_patches
# NOTE HARDCODED, only meant for testing ours, not semaware
scene_patches["patch_label_features"] = torch.zeros(
size=(num_patches, 512)
).float()
return scene_patches
saliency_text_labels = np.array(
file[f"{saliency_prefix}|saliency_text_labels"]
).astype(str)
# make sure saliencies for scene object labels have been generated
assert set(scene_patches["patch_labels"]).issubset(saliency_text_labels)
saliency_indices = np.array(
list(
map(
lambda l: l[0],
# only get index, not actual saliency label
filter(
lambda l: l[1] in scene_patches["patch_labels"],
# make sure saliency text label is in
# set of valid patch mask labels
enumerate(saliency_text_labels),
),
)
)
)
num_patches = len(saliency_indices)
if only_return_num_patches:
return num_patches
if num_subsample_patches != -1 and num_patches > num_subsample_patches:
saliency_indices = np.random.choice(
saliency_indices, num_subsample_patches, replace=False
)
num_patches = num_subsample_patches
# hdf5 indexing must be in order
saliency_indices.sort()
scene_patches["patch_labels"] = np.array(
file[f"{saliency_prefix}|saliency_text_labels"]
).astype(str)[saliency_indices]
scene_patches["patch_saliencies"] = torch.from_numpy(
deref_h5py(
dataset=file[f"saliencies"],
refs=file[saliency_prefix][saliency_indices],
)
).float()
if subtract_mean_relevancy:
mean_idx = (
np.array(file[f"{saliency_prefix}|saliency_text_labels"])
.astype(str)
.tolist()
.index("mean")
)
mean_relevancy = (
torch.from_numpy(file[f"saliencies"][mean_idx]).float().squeeze()
)
scene_patches["patch_saliencies"] -= mean_relevancy
scene_patches["patch_label_features"] = torch.from_numpy(
np.array(file[f"{saliency_prefix}|saliency_text_label_features"])
).float()[saliency_indices]
scene_patches["num_patches"] = num_patches
if has_groundtruth:
original_patch_labels = scene_patches["patch_labels"]
if use_synonyms:
inv_synonyms = {v: k for k, v in synonyms.items()}
original_patch_labels = map(
lambda l: l if l not in synonyms.values() else inv_synonyms[l],
original_patch_labels,
)
scene_patches["patch_objmatches"] = np.array(
[
"|".join(
[
str(objid)
for objid, obj_label in enumerate(scene_object_labels)
if obj_label.split("[")[0] == patch_label
]
)
for patch_label in original_patch_labels
]
)
else:
# matterport
scene_patches["patch_objmatches"] = np.array([""] * num_patches)
image_shape = file["rgb"].shape[1:-1]
scene_patches["patch_saliencies"] = torch.nn.functional.interpolate(
scene_patches["patch_saliencies"][:, None, :, :],
size=tuple(image_shape),
mode="bilinear",
align_corners=False,
)[:, 0]
return scene_patches
@classmethod
def transform_retvals(
cls,
retvals: dict,
num_output_pts: int,
balance_spatial_sampling: bool,
scene_bounds: np.ndarray,
tsdf_shape,
rgb,
depth,
cam_intr,
cam_pose,
network_inputs,
**kwargs,
):
input_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["input_xyz_pts"],
input_feature_pts=retvals["input_feature_pts"],
subsample_probabilities=np.ones(len(retvals["input_xyz_pts"])).astype(
np.float64
)
/ len(retvals["input_xyz_pts"]),
scene_bounds=scene_bounds,
**kwargs,
)
kwargs["num_subsample_pts"] = -1
# PROCESS OUTPUTS
if "output_label_pts" in retvals:
kwargs["num_subsample_pts"] = num_output_pts
if balance_spatial_sampling:
patch_output_xyz_pts = []
patch_output_label_pts = []
patch_ignore_pts = []
for patch_i in range(len(retvals["output_label_pts"])):
subsample_probabilities = np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
positive_mask = retvals["output_label_pts"][patch_i].bool()
if positive_mask.any() and (not positive_mask.all()):
subsample_probabilities[positive_mask] = (
len(retvals["output_xyz_pts"]) / positive_mask.sum()
)
subsample_probabilities[~positive_mask] = (
len(retvals["output_xyz_pts"]) / (~positive_mask).sum()
)
subsample_probabilities /= subsample_probabilities.sum()
output_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
output_label_pts=retvals["output_label_pts"][patch_i][None, :],
subsample_probabilities=subsample_probabilities,
scene_bounds=scene_bounds,
**kwargs,
)
patch_output_xyz_pts.append(output_pts["xyz_pts"])
patch_output_label_pts.append(output_pts["output_label_pts"])
patch_ignore_pts.append(output_pts["out_of_bounds_pts"])
retvals["output_xyz_pts"] = torch.stack(patch_output_xyz_pts)
retvals["out_of_bounds_pts"] = torch.stack(patch_ignore_pts)
retvals["output_label_pts"] = torch.stack(
patch_output_label_pts
).squeeze(dim=-2)
else:
output_pts = SceneUnderstandDataset.transform_filter_subsample(
xyz_pts=retvals["output_xyz_pts"],
output_label_pts=retvals["output_label_pts"],
out_of_bounds_pts=retvals["out_of_bounds_pts"],
subsample_probabilities=np.ones(
len(retvals["output_xyz_pts"])
).astype(np.float64)
/ len(retvals["output_xyz_pts"]),
scene_bounds=scene_bounds,
**kwargs,
)
retvals["output_xyz_pts"] = output_pts["xyz_pts"][None, ...].repeat(
len(output_pts["output_label_pts"]), 1, 1
)
retvals["out_of_bounds_pts"] = output_pts["out_of_bounds_pts"][
None, ...
].repeat(len(output_pts["output_label_pts"]), 1, 1)
retvals["output_label_pts"] = output_pts["output_label_pts"]
retvals["input_xyz_pts"] = input_pts["xyz_pts"]
retvals["input_feature_pts"] = input_pts["input_feature_pts"]
# construct the tsdf vol
if "tsdf" in network_inputs:
voxel_size = ((scene_bounds[1] - scene_bounds[0]) / tsdf_shape).min()
tsdf_vol = TSDFVolume(vol_bnds=scene_bounds.T, voxel_size=voxel_size)
final_transform = cam_pose
if kwargs["transform_matrix"] is not None:
final_transform = kwargs["transform_matrix"] @ cam_pose
tsdf_vol.integrate(
color_im=rgb.numpy(),
depth_im=depth,
cam_intr=cam_intr,
cam_pose=final_transform,
)
retvals["tsdf_vol"] = torch.from_numpy(tsdf_vol.get_volume()[0])
else:
retvals["tsdf_vol"] = torch.ones(1)
def __getitem__(self, idx):
retvals = dict()
scene_path = self.file_path + "/" + self.keys[idx]
with h5py.File(scene_path, "r") as f:
group = f["data"]
depth = deref_h5py(dataset=f["depth"], refs=group["depth"])[0]
cam_intr = np.array(group["cam_intr"])
cam_pose = np.array(group["cam_pose"])
if self.domain_randomized_rgb:
retvals["rgb"] = np.array(group["domain_randomized_rgb"][0])
else:
retvals["rgb"] = np.array(f["rgb"][group["rgb"][0]][0])
retvals["rgb"] = torch.from_numpy(retvals["rgb"]).float()
retvals["input_xyz_pts"] = torch.from_numpy(
get_pointcloud(depth, None, cam_intr, cam_pose)[0]
).float()
retvals["full_objid_pts"] = None
if "full_objid_pts" in group:
retvals["output_xyz_pts"] = torch.from_numpy(
deref_h5py(dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"])[0]
).float()
retvals["full_objid_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_objid_pts"], refs=group["full_objid_pts"]
)[0]
).long()
retvals["out_of_frustum_pts_mask"] = ~check_pts_in_frustum(
xyz_pts=retvals["output_xyz_pts"],
depth=depth,
cam_pose=cam_pose,
cam_intr=cam_intr,
)
scene_patches = self.get_scene_patches(
file=f,
num_subsample_patches=self.num_patches if not self.return_vis else -1,
full_objid_pts=retvals["full_objid_pts"],
out_of_frustum_pts_mask=retvals["out_of_frustum_pts_mask"]
if "out_of_frustum_pts_mask" in retvals
else None,
saliency_config=self.saliency_config,
subtract_mean_relevancy=self.subtract_mean_relevancy,
use_synonyms=self.use_synonyms,
rgb_key="domain_randomized_rgb"
if self.domain_randomized_rgb
else "rgb",
)
feature_pts = []
feature_dim = 0
if "rgb" in self.network_inputs:
# if rgb is in network inputs, then approach must be semantic aware
# therefore, no other inputs
feature_pts.append(retvals["rgb"][None, ...] / 255.0)
feature_dim += 3
else:
if "patch_masks" in self.network_inputs:
if self.offset_patch_mask:
feature_pts.append(
(
scene_patches["patch_saliencies"][..., None]
> self.patch_mask_cutoff
)
* 2
- 1
)
else:
feature_pts.append(
(
scene_patches["patch_saliencies"][..., None]
> self.patch_mask_cutoff
)
)
feature_dim += 1
if "saliency" in self.network_inputs:
patch_saliencies = scene_patches["patch_saliencies"][..., None]
if self.saliency_vmin is not None:
patch_saliencies -= self.saliency_vmin
patch_saliencies[patch_saliencies < 0] = 0
feature_pts.append(patch_saliencies * 50)
# gradcam values typically between -0.02 and 0.02
feature_dim += 1
retvals["input_feature_pts"] = torch.cat(feature_pts, dim=-1)
retvals["input_feature_pts"] = retvals["input_feature_pts"].view(
len(retvals["input_feature_pts"]), -1, feature_dim
)
if (
self.num_patches > len(retvals["input_feature_pts"])
and not self.return_vis
and "rgb" not in self.network_inputs
):
retvals["input_feature_pts"] = torch.cat(
(
retvals["input_feature_pts"],
torch.zeros(
self.num_patches - len(retvals["input_feature_pts"]),
*list(retvals["input_feature_pts"].shape[1:]),
),
),
dim=0,
)
retvals["semantic_class_features"] = scene_patches["patch_label_features"]
if (
self.num_patches > len(scene_patches["patch_label_features"])
and not self.return_vis
):
retvals["semantic_class_features"] = torch.cat(
(
retvals["semantic_class_features"],
torch.randn(
[self.num_patches - len(retvals["semantic_class_features"])]
+ list(retvals["semantic_class_features"].shape[1:]),
),
),
dim=0,
)
if (
self.load_gt
and "full_objid_pts" in retvals
and retvals["full_objid_pts"] is not None
):
gt_seg = deref_h5py(dataset=f["seg"], refs=group["seg"])[0]
retvals["seg"] = gt_seg
output_label_pts = torch.zeros(
len(retvals["semantic_class_features"]),
len(retvals["full_objid_pts"]),
dtype=float,
)
for patch_i, patch_matches in enumerate(
scene_patches["patch_objmatches"]
):
for objid in patch_matches.split("|"):
if objid == "":
continue
output_label_pts[
patch_i, retvals["full_objid_pts"] == int(objid)
] = 1.0
retvals["output_label_pts"] = output_label_pts
retvals["out_of_bounds_pts"] = torch.zeros(
len(retvals["full_objid_pts"])
).float()
object_labels = np.array(group["objid_to_class"]).astype(str).tolist()
if "out of bounds" in object_labels:
oob_idx = object_labels.index("out of bounds")
retvals["out_of_bounds_pts"] = (
retvals["full_objid_pts"] == oob_idx
).float()
retvals["patch_labels"] = scene_patches["patch_labels"].tolist()
assert all(map(lambda l: l != "", retvals["patch_labels"]))
retvals["patch_labels"] += (
[""]
* max(self.num_patches - len(retvals["patch_labels"]), 0)
* int(not self.return_vis)
)
retvals["scene_id"] = self.keys[idx].split("/")[-1].split(".")[0]
if self.return_vis:
retvals["depth"] = depth
retvals["cam_intr"] = cam_intr
retvals["cam_pose"] = cam_pose
retvals["patch_objmatches"] = scene_patches["patch_objmatches"].tolist()
retvals["vis_gt_object_labels"] = (
np.array(group["objid_to_class"]).astype(str).tolist()
if "objid_to_class" in group
else []
)
if "matterport" in self.file_path or "arkit" in self.file_path:
vis_xyz_pts, vis_rgb_pts = get_pointcloud(
depth, retvals["rgb"].numpy(), cam_intr, cam_pose
)
retvals["vis_gt_objid_pts"] = torch.from_numpy(vis_rgb_pts).float()
retvals["vis_gt_xyz_pts"] = torch.from_numpy(vis_xyz_pts).float()
else:
retvals["vis_gt_objid_pts"] = retvals["full_objid_pts"]
retvals["vis_gt_xyz_pts"] = torch.from_numpy(
deref_h5py(
dataset=f["full_xyz_pts"], refs=group["full_xyz_pts"]
)[0]
).float()
empty_mask = (
retvals["vis_gt_objid_pts"] == retvals["vis_gt_objid_pts"].max()
)
retvals["vis_gt_objid_pts"] = retvals["vis_gt_objid_pts"][
~empty_mask
]
retvals["vis_gt_xyz_pts"] = retvals["vis_gt_xyz_pts"][~empty_mask]
retvals["patch_saliencies"] = scene_patches["patch_saliencies"]
transform_matrix = None
if self.domain_randomization:
scene_dims = self.scene_bounds[1, :] - self.scene_bounds[0, :]
assert (scene_dims >= 0).all()
translation = torch.randn(3) * scene_dims * 0.05
rotation = euler.euler2mat(
(torch.rand(1)[0] - 0.5) * 0.3,
(torch.rand(1)[0] - 0.5) * 0.3,
(torch.rand(1)[0] - 0.5) * 0.3
# full rotation around z axis
)
scale = torch.rand(3) * 0.1 + 1.0
transform_matrix = torch.tensor(
affines.compose(T=translation, R=rotation, Z=scale)
).float()
# filter out points with invalid depth
if (depth == 0.0).any():
invalid_depth_mask = (depth == 0.0).reshape(-1)
for k in retvals.keys():
if "input" in k:
if retvals[k].shape[0] == len(invalid_depth_mask):
retvals[k] = retvals[k][~invalid_depth_mask]
elif retvals[k].shape[1] == len(invalid_depth_mask):
retvals[k] = retvals[k][:, ~invalid_depth_mask]
else:
raise Exception()
# PROCESS INPUTS
kwargs = {
"transform_matrix": transform_matrix,
"scene_bounds": self.scene_bounds,
"num_subsample_pts": self.num_input_pts,
"alway_replace_pts": self.always_replace_subsample_pts,
"depth": depth,
"cam_intr": cam_intr,
"cam_pose": cam_pose,
"balance_spatial_sampling": self.balance_spatial_sampling,
"tsdf_shape": self.tsdf_shape,
"retvals": retvals,
"num_output_pts": self.num_output_pts if not self.return_vis else -1,
"rgb": retvals["rgb"],
"network_inputs": self.network_inputs,
}
try:
self.transform_retvals(**kwargs)
except Exception as e:
kwargs["transform_matrix"] = None
self.transform_retvals(**kwargs)
if "output_xyz_pts" in retvals:
retvals["out_of_frustum_pts_mask"] = ~torch.from_numpy(
np.stack(
[
check_pts_in_frustum(
xyz_pts=xyz_pts,
depth=depth,
cam_pose=cam_pose,
cam_intr=cam_intr,
)
for xyz_pts in retvals["output_xyz_pts"].cpu().numpy()
]
)
)
if self.xyz_pts_noise > 0.0:
retvals["output_xyz_pts"] += (
torch.randn_like(retvals["output_xyz_pts"]) * self.xyz_pts_noise
)
retvals["input_xyz_pts"] += (
torch.randn_like(retvals["input_xyz_pts"]) * self.xyz_pts_noise
)
return {
k: v.float() if type(v) == torch.Tensor else v
for k, v in retvals.items()
if v is not None
}
| 52,891 | 41.689266 | 138 | py |
semantic-abstraction | semantic-abstraction-main/net.py | from typing import List, Tuple
import torch
from torch.nn import (
Sequential,
LeakyReLU,
Linear,
Module,
Dropout,
ParameterDict,
)
from torch.nn.parameter import Parameter
from torch.nn.functional import grid_sample
from torch_scatter import scatter
import numpy as np
from unet3d import ResidualUNet3D
from CLIP.clip import ClipWrapper
from torchtyping import TensorType, patch_typeguard
from typeguard import typechecked
patch_typeguard() # use before @typechecked
@typechecked
class VirtualGrid:
def __init__(
self,
scene_bounds,
grid_shape: Tuple[int, int, int] = (32, 32, 32),
batch_size: int = 8,
device: torch.device = torch.device("cpu"),
int_dtype: torch.dtype = torch.int64,
float_dtype: torch.dtype = torch.float32,
reduce_method: str = "mean",
):
self.lower_corner = tuple(scene_bounds[0])
self.upper_corner = tuple(scene_bounds[1])
self.grid_shape = tuple(grid_shape)
self.batch_size = int(batch_size)
self.device = device
self.int_dtype = int_dtype
self.float_dtype = float_dtype
self.reduce_method = reduce_method
@property
def num_grids(self):
grid_shape = self.grid_shape
batch_size = self.batch_size
return int(np.prod((batch_size,) + grid_shape))
def get_grid_idxs(self, include_batch=True):
batch_size = self.batch_size
grid_shape = self.grid_shape
device = self.device
int_dtype = self.int_dtype
dims = grid_shape
if include_batch:
dims = (batch_size,) + grid_shape
axis_coords = [torch.arange(0, x, device=device, dtype=int_dtype) for x in dims]
coords_per_axis = torch.meshgrid(*axis_coords, indexing="ij")
grid_idxs = torch.stack(coords_per_axis, dim=-1)
return grid_idxs
def get_grid_points(self, include_batch=True):
lower_corner = self.lower_corner
upper_corner = self.upper_corner
grid_shape = self.grid_shape
float_dtype = self.float_dtype
device = self.device
grid_idxs = self.get_grid_idxs(include_batch=include_batch)
lc = torch.tensor(lower_corner, dtype=float_dtype, device=device)
uc = torch.tensor(upper_corner, dtype=float_dtype, device=device)
idx_scale = torch.tensor(grid_shape, dtype=float_dtype, device=device) - 1
scales = (uc - lc) / idx_scale
offsets = lc
grid_idxs_no_batch = grid_idxs
if include_batch:
grid_idxs_no_batch = grid_idxs[:, :, :, :, 1:]
grid_idxs_f = grid_idxs_no_batch.to(float_dtype)
grid_points = grid_idxs_f * scales + offsets
return grid_points
def get_points_grid_idxs(self, points, cast_to_int=True, batch_idx=None):
lower_corner = self.lower_corner
upper_corner = self.upper_corner
grid_shape = self.grid_shape
int_dtype = self.int_dtype
float_dtype = self.float_dtype
device = self.device
lc = torch.tensor(lower_corner, dtype=float_dtype, device=device)
uc = torch.tensor(upper_corner, dtype=float_dtype, device=device)
idx_scale = torch.tensor(grid_shape, dtype=float_dtype, device=device) - 1
offsets = -lc
scales = idx_scale / (uc - lc)
points_idxs_i = (points + offsets) * scales
if cast_to_int:
points_idxs_i = points_idxs_i.to(dtype=int_dtype)
points_idxs = torch.empty_like(points_idxs_i)
for i in range(3):
points_idxs[..., i] = torch.clamp(
points_idxs_i[..., i], min=0, max=grid_shape[i] - 1
)
final_points_idxs = points_idxs
if batch_idx is not None:
final_points_idxs = torch.cat(
[
batch_idx.view(*points.shape[:-1], 1).to(dtype=points_idxs.dtype),
points_idxs,
],
dim=-1,
)
return final_points_idxs
def flatten_idxs(self, idxs, keepdim=False):
grid_shape = self.grid_shape
batch_size = self.batch_size
coord_size = idxs.shape[-1]
target_shape = None
if coord_size == 4:
# with batch
target_shape = (batch_size,) + grid_shape
elif coord_size == 3:
# without batch
target_shape = grid_shape
else:
raise RuntimeError("Invalid shape {}".format(str(idxs.shape)))
target_stride = tuple(np.cumprod(np.array(target_shape)[::-1])[::-1])[1:] + (1,)
flat_idxs = (
idxs * torch.tensor(target_stride, dtype=idxs.dtype, device=idxs.device)
).sum(dim=-1, keepdim=keepdim, dtype=idxs.dtype)
return flat_idxs
def unflatten_idxs(self, flat_idxs, include_batch=True):
grid_shape = self.grid_shape
batch_size = self.batch_size
target_shape = grid_shape
if include_batch:
target_shape = (batch_size,) + grid_shape
target_stride = tuple(np.cumprod(np.array(target_shape)[::-1])[::-1])[1:] + (1,)
source_shape = tuple(flat_idxs.shape)
if source_shape[-1] == 1:
source_shape = source_shape[:-1]
flat_idxs = flat_idxs[..., 0]
source_shape += (4,) if include_batch else (3,)
idxs = torch.empty(
size=source_shape, dtype=flat_idxs.dtype, device=flat_idxs.device
)
mod = flat_idxs
for i in range(source_shape[-1]):
idxs[..., i] = mod / target_stride[i]
mod = mod % target_stride[i]
return idxs
def idxs_to_points(self, idxs):
lower_corner = self.lower_corner
upper_corner = self.upper_corner
grid_shape = self.grid_shape
float_dtype = self.float_dtype
int_dtype = idxs.dtype
device = idxs.device
source_shape = idxs.shape
point_idxs = None
if source_shape[-1] == 4:
# has batch idx
point_idxs = idxs[..., 1:]
elif source_shape[-1] == 3:
point_idxs = idxs
else:
raise RuntimeError("Invalid shape {}".format(tuple(source_shape)))
lc = torch.tensor(lower_corner, dtype=float_dtype, device=device)
uc = torch.tensor(upper_corner, dtype=float_dtype, device=device)
idx_scale = torch.tensor(grid_shape, dtype=float_dtype, device=device) - 1
offsets = lc
scales = (uc - lc) / idx_scale
idxs_points = point_idxs * scales + offsets
return idxs_points
def scatter_points(self, xyz_pts, feature_pts, reduce_method=None, **kwargs):
if reduce_method is None:
reduce_method = self.reduce_method
batch_size = feature_pts.shape[0]
idxs = self.get_points_grid_idxs(xyz_pts)
# idxs.shape = [B, num_pts, 3]
flat_idxs = self.flatten_idxs(idxs, keepdim=False)
# flat_idxs.shape = [B, num_pts]
vol_features = scatter(
src=feature_pts,
index=flat_idxs,
dim=-2,
dim_size=np.prod(self.grid_shape),
reduce=self.reduce_method,
**kwargs
).view(batch_size, *self.grid_shape, -1)
return vol_features.permute(0, 4, 1, 2, 3).contiguous()
class ImplicitVolumetricDecoder(Module):
def __init__(self, hidden_size: int, output_dim: int, concat_xyz_pts: bool = False):
super().__init__()
self.concat_xyz_pts = concat_xyz_pts
self.mlp = Sequential(
Linear(hidden_size + int(self.concat_xyz_pts) * 3, hidden_size),
LeakyReLU(),
Linear(hidden_size, output_dim),
)
self.output_dim = output_dim
def forward(
self,
features_vol: TensorType["batch", "channel", "width", "height", "length"],
virtual_grid: VirtualGrid,
query_points: TensorType["batch", "num_points", 3],
) -> TensorType["batch", "num_points", "channel"]:
query_points = virtual_grid.get_points_grid_idxs(
query_points, cast_to_int=False
).float()
for i in range(len(virtual_grid.grid_shape)):
query_points[..., i] = query_points[..., i] / virtual_grid.grid_shape[i]
# query_points now between 0 and 1
# normalize query points to (-1, 1), which is
# required by grid_sample
query_points_normalized = 2.0 * query_points - 1.0
query_points = query_points_normalized.view(
*(query_points_normalized.shape[:2] + (1, 1, 3))
)
sampled_features = grid_sample(
input=features_vol,
grid=query_points,
mode="bilinear",
padding_mode="border",
align_corners=True,
)
sampled_features = (
sampled_features.view(sampled_features.shape[:3])
.permute(0, 2, 1)
.contiguous()
)
B, L, C = sampled_features.shape
# return sampled_features
sampled_features = sampled_features.view(B * L, C).contiguous()
if self.concat_xyz_pts:
sampled_features = torch.cat(
(sampled_features, query_points.view(B * L, 3)), dim=-1
)
out_features = (
self.mlp(sampled_features).view(B, L, self.output_dim).contiguous()
)
return out_features
class PointingAttention(Module):
def __init__(self, pointing_dim, method="dot_product", pointing_temperature=0.07):
super().__init__()
self.method = method
self.pointing_dim = pointing_dim
if method == "dot_product":
self.forward = self.dot_product
elif method == "cosine_sim":
self.cosine_sim_temp = pointing_temperature
self.forward = self.cosine_sim
elif method == "additive":
self.pointer_v = Linear(pointing_dim, 1, bias=False)
self.forward = self.additive
else:
raise Exception()
@staticmethod
def prep_input(key, query):
"""
key.shape = BxKx[ABC]xD
query.shape = BxQx[XYZ]xD
output attention should be: Bx[ABC]x[XYZ]xD
"""
if key.shape == query.shape:
return key, query
for _ in range(len(key.shape) - 3):
query = query.unsqueeze(2)
# Now, query.shape = BxQx[1,1,1]x[XYZ]xD
for _ in range(len(query.shape) - len(key.shape)):
key = key.unsqueeze(-2)
# Now, key.shape = BxKx[ABC]x[1,1,1]xD
key = key.unsqueeze(dim=2)
query = query.unsqueeze(dim=1)
return key, query
def dot_product(self, key, query):
key, query = self.prep_input(key, query)
dotprod = (query * key).sum(dim=-1)
pointing_attn = dotprod / np.sqrt(self.pointing_dim)
return pointing_attn
def cosine_sim(self, key, query):
"""
key.shape = BxDxKx...
query.shape = BxDxQx...
"""
key, query = self.prep_input(key, query)
pointing_attn = (
torch.cosine_similarity(key, query, dim=-1) / self.cosine_sim_temp
)
return pointing_attn
def additive(self, key, query):
key, query = self.prep_input(key, query)
additive_kq = query + key
additive_kq = torch.tanh(additive_kq)
pointing_attn = self.pointer_v(additive_kq).squeeze(dim=-1)
return pointing_attn
class SemAbs3D(Module):
def __init__(
self,
voxel_shape: Tuple[int, int, int],
scene_bounds: Tuple[Tuple[float, float, float], Tuple[float, float, float]],
unet_num_channels: int,
unet_f_maps: int,
unet_num_groups: int,
unet_num_levels: int,
network_inputs: List[str],
use_pts_feat_extractor: bool,
pts_feat_extractor_hidden_dim: int,
reduce_method: str,
output_dim=1,
device: str = "cuda",
decoder_concat_xyz_pts: bool = False,
**kwargs
):
super().__init__()
self.device = device
self.vg = VirtualGrid(
scene_bounds=np.array(scene_bounds),
batch_size=kwargs["batch_size"],
grid_shape=voxel_shape,
device=torch.device(device),
)
self.register_buffer("steps", torch.zeros(1))
self.network_inputs = network_inputs
self.use_pts_feat_extractor = use_pts_feat_extractor
self.reduce_method = reduce_method
self.pts_feature_dim = (
("saliency" in self.network_inputs)
+ ("rgb" in self.network_inputs) * 3
+ ("patch_masks" in self.network_inputs)
)
vol_feature_extractor_input_channels = self.pts_feature_dim + (
"tsdf" in self.network_inputs
)
if self.use_pts_feat_extractor:
self.pts_feat_extractor = Sequential(
Linear(self.pts_feature_dim + 3, pts_feat_extractor_hidden_dim),
LeakyReLU(),
Linear(pts_feat_extractor_hidden_dim, pts_feat_extractor_hidden_dim),
LeakyReLU(),
Linear(
pts_feat_extractor_hidden_dim,
unet_num_channels - int("tsdf" in self.network_inputs),
),
)
vol_feature_extractor_input_channels = unet_num_channels
assert self.reduce_method == "max"
self.vol_feature_extractor = ResidualUNet3D(
in_channels=vol_feature_extractor_input_channels,
out_channels=unet_num_channels,
f_maps=unet_f_maps,
num_groups=unet_num_groups,
num_levels=unet_num_levels,
)
self.visual_sampler = ImplicitVolumetricDecoder(
hidden_size=unet_num_channels,
output_dim=output_dim,
concat_xyz_pts=decoder_concat_xyz_pts,
)
def forward(
self, input_xyz_pts, input_feature_pts, tsdf_vol, output_xyz_pts, **kwargs
):
batch_size, num_patches, input_num_pts = input_feature_pts.shape[:3]
input_xyz_pts = (
input_xyz_pts.unsqueeze(dim=1)
.repeat(1, num_patches, 1, 1)
.view(batch_size * num_patches, input_num_pts, 3)
)
input_feature_pts = input_feature_pts.view(
batch_size * num_patches, input_num_pts, self.pts_feature_dim
)
if self.use_pts_feat_extractor:
input_feature_pts = self.pts_feat_extractor(
torch.cat(
(
input_xyz_pts,
input_feature_pts,
),
dim=-1,
)
)
visual_volumetric_features = self.vg.scatter_points(
xyz_pts=input_xyz_pts,
feature_pts=input_feature_pts,
reduce_method=self.reduce_method,
)
batch_size, num_patches, num_output_pts = output_xyz_pts.shape[:3]
if visual_volumetric_features.shape[0] < batch_size * num_patches:
visual_volumetric_features = (
visual_volumetric_features[:, None, ...]
.repeat(1, num_patches, 1, 1, 1, 1)
.view(batch_size * num_patches, *visual_volumetric_features.shape[1:])
)
if "tsdf" in self.network_inputs:
visual_volumetric_features = torch.cat(
(
tsdf_vol.unsqueeze(dim=1).repeat(num_patches, 1, 1, 1, 1),
visual_volumetric_features,
),
dim=1,
)
self.visual_volumetric_features = self.vol_feature_extractor(
visual_volumetric_features
)
output_xyz_pts = output_xyz_pts.view(
batch_size * num_patches, num_output_pts, 3
)
return (
self.visual_sampler(
features_vol=self.visual_volumetric_features,
virtual_grid=self.vg,
query_points=output_xyz_pts,
)
.view(batch_size, num_patches, num_output_pts, -1)
.squeeze(dim=-1)
)
class SemanticAwareOVSSC(SemAbs3D):
def __init__(self, pointing_method: str, clip_hidden_dim: int = 512, **kwargs):
super().__init__(output_dim=clip_hidden_dim, **kwargs)
self.semantic_class_pointer = PointingAttention(
pointing_dim=clip_hidden_dim, method=pointing_method
)
def forward(self, semantic_class_features, **kwargs):
sampled_features = super().forward(**kwargs)
assert sampled_features.shape[1] == semantic_class_features.shape[1]
num_patches = semantic_class_features.shape[1]
return (
torch.stack(
[
self.semantic_class_pointer(
key=semantic_class_features[:, patch_i, ...][:, None, ...],
query=sampled_features[:, patch_i, ...][:, None, ...],
)
for patch_i in range(num_patches)
],
dim=1,
)
.squeeze(dim=2)
.squeeze(dim=2)
)
class SemAbsVOOL(Module):
def __init__(
self,
pointing_method: str,
pointing_dim: int,
device: str,
decoder_concat_xyz_pts: bool,
**kwargs
):
super().__init__()
self.register_buffer("steps", torch.zeros(1))
self.device = device
self.completion_net = SemAbs3D(device=device, **kwargs).to(device)
self.spatial_sampler = ImplicitVolumetricDecoder(
hidden_size=2 * kwargs["unet_num_channels"],
output_dim=pointing_dim,
concat_xyz_pts=decoder_concat_xyz_pts,
)
self.pointer = PointingAttention(
method=pointing_method, pointing_dim=pointing_dim
)
self.relation_embeddings = ParameterDict(
{
k: Parameter(torch.randn(pointing_dim))
for k in [
"in",
"behind",
"in front of",
"on the left of",
"on the right of",
"on",
"[pad]",
]
}
)
def get_region_pointing_features(self, spatial_relation_name, **kwargs):
# spatial_relation_name.shape NUMDESCxBATCHxWORD
region_pointing_features = (
torch.stack(
[
torch.stack(
[
self.relation_embeddings[
spatial_relation_name[desc_i][batch_i]
]
for batch_i in range(len(spatial_relation_name[desc_i]))
],
dim=0,
)
for desc_i in range(len(spatial_relation_name))
],
dim=0,
)
.permute(1, 0, 2)
.contiguous()
)
return region_pointing_features
def get_feature_vol(
self,
input_xyz_pts,
input_target_saliency_pts,
input_reference_saliency_pts,
tsdf_vol,
num_descs,
**kwargs
):
place_holder_output_xyz_pts = torch.zeros_like(input_xyz_pts)[
..., None, 0:1, :
].repeat(1, num_descs, 1, 1)
self.completion_net(
input_xyz_pts=input_xyz_pts,
input_feature_pts=input_target_saliency_pts,
tsdf_vol=tsdf_vol,
# placeholder
output_xyz_pts=place_holder_output_xyz_pts,
)
target_feature_vol = self.completion_net.visual_volumetric_features
self.completion_net(
input_xyz_pts=input_xyz_pts,
input_feature_pts=input_reference_saliency_pts,
tsdf_vol=tsdf_vol,
# placeholder
output_xyz_pts=place_holder_output_xyz_pts,
)
reference_feature_vol = self.completion_net.visual_volumetric_features
feature_vol = torch.cat((target_feature_vol, reference_feature_vol), dim=1)
return feature_vol
def forward(self, output_xyz_pts, spatial_relation_name, **kwargs):
batch_size, num_descs = np.array(spatial_relation_name).T.shape
feature_vol = self.get_feature_vol(num_descs=num_descs, **kwargs)
num_output_pts = output_xyz_pts.shape[-2]
sampled_locator_feature_pts = self.spatial_sampler(
features_vol=feature_vol,
virtual_grid=self.completion_net.vg,
query_points=output_xyz_pts.view(batch_size * num_descs, num_output_pts, 3),
)
# region_pointing_features.shape BATCH x NUMDESC x WORD
region_pointing_features = self.get_region_pointing_features(
spatial_relation_name=spatial_relation_name
)
return self.pointer(
key=sampled_locator_feature_pts,
query=region_pointing_features.contiguous().view(
batch_size * num_descs, 1, -1
),
).view(batch_size, num_descs, num_output_pts)
class SemanticAwareVOOL(SemAbsVOOL):
def __init__(self, pointing_dim: int, clip_hidden_dim=512, **kwargs):
super().__init__(output_dim=pointing_dim, pointing_dim=pointing_dim, **kwargs)
self.mlp = Linear(clip_hidden_dim * 2 + pointing_dim, pointing_dim)
def get_region_pointing_features(
self, target_obj_name, reference_obj_name, **kwargs
):
with torch.no_grad():
target_obj_name = np.array(target_obj_name).T
reference_obj_name = np.array(reference_obj_name).T
batch_size, num_descs = target_obj_name.shape
target_obj_feature_names = torch.from_numpy(
ClipWrapper.get_clip_text_feature(target_obj_name.reshape(-1))
).to(self.device)
target_obj_feature_names = target_obj_feature_names.view(
batch_size, num_descs, -1
)
reference_obj_feature_names = torch.from_numpy(
ClipWrapper.get_clip_text_feature(reference_obj_name.reshape(-1))
).to(self.device)
reference_obj_feature_names = reference_obj_feature_names.view(
batch_size, num_descs, -1
)
region_pointing_features = super().get_region_pointing_features(**kwargs)
return self.mlp(
torch.cat(
(
target_obj_feature_names,
reference_obj_feature_names,
region_pointing_features,
),
dim=-1,
)
)
def forward(self, input_rgb_pts, spatial_relation_name, **kwargs):
# prepare inputs
batch_size, num_desc, _, _ = input_rgb_pts.shape
num_output_pts = kwargs["output_xyz_pts"].shape[-2]
sampled_locator_feature_pts = self.completion_net(
input_feature_pts=input_rgb_pts, **kwargs
)
region_pointing_features = self.get_region_pointing_features(
spatial_relation_name=spatial_relation_name, **kwargs
)
return self.pointer(
key=sampled_locator_feature_pts.view(
batch_size * num_desc, num_output_pts, -1
),
query=region_pointing_features.contiguous().view(
batch_size * num_desc, 1, -1
),
).view(batch_size, num_desc, -1)
class ClipSpatialVOOL(Module):
def __init__(self, device: str, decoder_concat_xyz_pts: bool, **kwargs):
super().__init__()
self.register_buffer("steps", torch.zeros(1))
self.device = device
self.completion_net = SemAbs3D(device=device, **kwargs).to(device)
self.spatial_sampler = ImplicitVolumetricDecoder(
hidden_size=kwargs["unet_num_channels"],
output_dim=1,
concat_xyz_pts=decoder_concat_xyz_pts,
)
def get_feature_vol(
self,
input_xyz_pts,
input_description_saliency_pts,
tsdf_vol,
num_descs,
**kwargs
):
self.completion_net(
input_xyz_pts=input_xyz_pts,
input_feature_pts=input_description_saliency_pts,
tsdf_vol=tsdf_vol,
# placeholder
output_xyz_pts=torch.zeros_like(input_xyz_pts)[..., None, 0:1, :].repeat(
1, num_descs, 1, 1
),
)
return self.completion_net.visual_volumetric_features
def forward(self, output_xyz_pts, spatial_relation_name, **kwargs):
batch_size, num_descs = np.array(spatial_relation_name).T.shape
feature_vol = self.get_feature_vol(num_descs=num_descs, **kwargs)
num_output_pts = output_xyz_pts.shape[-2]
return self.spatial_sampler(
features_vol=feature_vol,
virtual_grid=self.completion_net.vg,
query_points=output_xyz_pts.view(batch_size * num_descs, num_output_pts, 3),
).view(batch_size, num_descs, num_output_pts)
| 25,154 | 36.047128 | 88 | py |
semantic-abstraction | semantic-abstraction-main/eval.py | import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
import os
import pickle
from dataset import ObjectLocalizationDataset, SceneCompletionDataset
from train_vool import get_losses as vool_get_losses, approach as vool_approaches
from train_ovssc import get_losses as ovssc_get_losses, approach as ovssc_approaches
import utils
from torch.utils.data import DataLoader
import pandas as pd
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
if __name__ == "__main__":
parser = utils.config_parser()
parser.add_argument("--task", choices=["ovssc", "vool"], required=True)
args = parser.parse_args()
with open(os.path.dirname(args.load) + "/args.pkl", "rb") as file:
exp_args = pickle.load(file)
for arg in vars(exp_args):
if any(arg == s for s in ["device", "file_path", "load", "gpus", "task"]):
continue
setattr(args, arg, getattr(exp_args, arg))
args.domain_randomization = False
args.scene_bounds = torch.tensor(args.scene_bounds)
args.batch_size = 1
args.num_workers = 8
args.balance_spatial_sampling = False
args.detailed_analysis = True
ddp = len(args.gpus) > 1
approaches = ovssc_approaches if args.task == "ovssc" else vool_approaches
dataset_class = (
SceneCompletionDataset if args.task == "ovssc" else ObjectLocalizationDataset
)
exp_dict = utils.setup_experiment(
args=args,
net_class=approaches[args.approach],
dataset_class=dataset_class,
split_file_path=args.file_path
+ ("/vool_split.pkl" if args.task == "vool" else "/ssc_split.pkl"),
return_vis=True,
ddp=ddp,
)
net = exp_dict["net"]
net.eval()
net.requires_grad = False
epoch = exp_dict["start_epoch"]
eval_detailed_stats = pd.DataFrame()
with torch.no_grad():
for split, dataset in exp_dict["datasets"].items():
if split == "train":
continue
sampler = None
if ddp:
sampler = DistributedSampler(
dataset=dataset, shuffle=False, drop_last=False
)
sampler.set_epoch(0)
loader = DataLoader(
dataset=dataset,
num_workers=args.num_workers,
batch_size=1,
sampler=sampler,
)
detailed_stats = utils.loop(
net=net,
loader=loader,
get_losses_fn=ovssc_get_losses
if args.task == "ovssc"
else vool_get_losses,
**{
**vars(args),
"optimizer": None,
"lr_scheduler": None,
"cutoffs": np.arange(-2.5, -0.0, 0.1),
"pbar": tqdm(
total=len(loader),
dynamic_ncols=True,
unit="batch",
postfix=f"| {split.upper()} ",
),
"detailed_analysis": True,
},
)
detailed_stats["epoch"] = [epoch] * len(detailed_stats)
detailed_stats["split"] = [split] * len(detailed_stats)
eval_detailed_stats = pd.concat([eval_detailed_stats, detailed_stats])
if (ddp and dist.get_rank() == 0) or not ddp:
stats_path = os.path.splitext(args.load)[0] + f"_eval_stats.pkl"
eval_detailed_stats.to_pickle(stats_path)
print("dumped stats to ", stats_path)
| 3,625 | 37.574468 | 86 | py |
semantic-abstraction | semantic-abstraction-main/unet3d.py | """
Code from the 3D UNet implementation:
https://github.com/wolny/pytorch-3dunet/
"""
import importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
from functools import partial
def number_of_features_per_level(init_channel_number, num_levels):
return [init_channel_number * 2**k for k in range(num_levels)]
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'gcr' -> groupnorm + conv + ReLU
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
'bcr' -> batchnorm + conv + ReLU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert "c" in order, "Conv layer MUST be present"
assert (
order[0] not in "rle"
), "Non-linearity cannot be the first operation in the layer"
modules = []
for i, char in enumerate(order):
if char == "r":
modules.append(("ReLU", nn.ReLU(inplace=True)))
elif char == "l":
modules.append(
("LeakyReLU", nn.LeakyReLU(negative_slope=0.1, inplace=True))
)
elif char == "e":
modules.append(("ELU", nn.ELU(inplace=True)))
elif char == "c":
# add learnable bias only in the absence of batchnorm/groupnorm
bias = not ("g" in order or "b" in order)
modules.append(
(
"conv",
conv3d(
in_channels, out_channels, kernel_size, bias, padding=padding
),
)
)
elif char == "g":
is_before_conv = i < order.index("c")
if is_before_conv:
num_channels = in_channels
else:
num_channels = out_channels
# use only one group if the given number of groups is greater than the number of channels
if num_channels < num_groups:
num_groups = 1
assert (
num_channels % num_groups == 0
), f"Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}"
modules.append(
(
"groupnorm",
nn.GroupNorm(num_groups=num_groups, num_channels=num_channels),
)
)
elif char == "b":
is_before_conv = i < order.index("c")
if is_before_conv:
modules.append(("batchnorm", nn.BatchNorm3d(in_channels)))
else:
modules.append(("batchnorm", nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
order="crg",
num_groups=8,
padding=1,
):
super(SingleConv, self).__init__()
for name, module in create_conv(
in_channels, out_channels, kernel_size, order, num_groups, padding=padding
):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
encoder,
kernel_size=3,
order="crg",
num_groups=8,
):
super(DoubleConv, self).__init__()
if encoder:
# we're in the encoder path
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels
else:
# we're in the decoder path, decrease the number of channels in the 1st convolution
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
# conv1
self.add_module(
"SingleConv1",
SingleConv(
conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups
),
)
# conv2
self.add_module(
"SingleConv2",
SingleConv(
conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups
),
)
class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
order="cge",
num_groups=8,
**kwargs,
):
super(ExtResNetBlock, self).__init__()
# first convolution
self.conv1 = SingleConv(
in_channels,
out_channels,
kernel_size=kernel_size,
order=order,
num_groups=num_groups,
)
# residual block
self.conv2 = SingleConv(
out_channels,
out_channels,
kernel_size=kernel_size,
order=order,
num_groups=num_groups,
)
# remove non-linearity from the 3rd convolution since it's going to be applied after adding the residual
n_order = order
for c in "rel":
n_order = n_order.replace(c, "")
self.conv3 = SingleConv(
out_channels,
out_channels,
kernel_size=kernel_size,
order=n_order,
num_groups=num_groups,
)
# create non-linearity separately
if "l" in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif "e" in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, x):
# apply first convolution and save the output as a residual
out = self.conv1(x)
residual = out
# residual block
out = self.conv2(out)
out = self.conv3(out)
out += residual
out = self.non_linearity(out)
return out
class Encoder(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
conv_kernel_size=3,
apply_pooling=True,
pool_kernel_size=(2, 2, 2),
pool_type="max",
basic_module=DoubleConv,
conv_layer_order="crg",
num_groups=8,
):
super(Encoder, self).__init__()
assert pool_type in ["max", "avg"]
if apply_pooling:
if pool_type == "max":
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None
self.basic_module = basic_module(
in_channels=in_channels,
out_channels=out_channels,
encoder=True,
kernel_size=conv_kernel_size,
order=conv_layer_order,
num_groups=num_groups,
)
def forward(self, x):
if self.pooling is not None:
x = self.pooling(x)
x = self.basic_module(x)
return x
class Decoder(nn.Module):
"""
A single module for decoder path consisting of the upsampling layer
(either learned ConvTranspose3d or nearest neighbor interpolation) followed by a basic module (DoubleConv or ExtResNetBlock).
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
scale_factor (tuple): used as the multiplier for the image H/W/D in
case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation
from the corresponding encoder
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
scale_factor=(2, 2, 2),
basic_module=DoubleConv,
conv_layer_order="crg",
num_groups=8,
mode="nearest",
):
super(Decoder, self).__init__()
if basic_module == DoubleConv:
# if DoubleConv is the basic_module use interpolation for upsampling and concatenation joining
self.upsampling = Upsampling(
transposed_conv=False,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
mode=mode,
)
# concat joining
self.joining = partial(self._joining, concat=True)
else:
# if basic_module=ExtResNetBlock use transposed convolution upsampling and summation joining
self.upsampling = Upsampling(
transposed_conv=True,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
mode=mode,
)
# sum joining
self.joining = partial(self._joining, concat=False)
# adapt the number of in_channels for the ExtResNetBlock
in_channels = out_channels
self.basic_module = basic_module(
in_channels=in_channels,
out_channels=out_channels,
encoder=False,
kernel_size=kernel_size,
order=conv_layer_order,
num_groups=num_groups,
)
def forward(self, encoder_features, x):
x = self.upsampling(encoder_features=encoder_features, x=x)
x = self.joining(encoder_features, x)
x = self.basic_module(x)
return x
@staticmethod
def _joining(encoder_features, x, concat):
if concat:
return torch.cat((encoder_features, x), dim=1)
else:
return encoder_features + x
class Upsampling(nn.Module):
"""
Upsamples a given multi-channel 3D data using either interpolation or learned transposed convolution.
Args:
transposed_conv (bool): if True uses ConvTranspose3d for upsampling, otherwise uses interpolation
concat_joining (bool): if True uses concatenation joining between encoder and decoder features, otherwise
uses summation joining (see Residual U-Net)
in_channels (int): number of input channels for transposed conv
out_channels (int): number of output channels for transpose conv
kernel_size (int or tuple): size of the convolving kernel
scale_factor (int or tuple): stride of the convolution
mode (str): algorithm used for upsampling:
'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area'. Default: 'nearest'
"""
def __init__(
self,
transposed_conv,
in_channels=None,
out_channels=None,
kernel_size=3,
scale_factor=(2, 2, 2),
mode="nearest",
):
super(Upsampling, self).__init__()
if transposed_conv:
# make sure that the output size reverses the MaxPool3d from the corresponding encoder
# (D_out = (D_in − 1) × stride[0] − 2 × padding[0] + kernel_size[0] + output_padding[0])
self.upsample = nn.ConvTranspose3d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=scale_factor,
padding=1,
)
else:
self.upsample = partial(self._interpolate, mode=mode)
def forward(self, encoder_features, x):
output_size = encoder_features.size()[2:]
return self.upsample(x, output_size)
@staticmethod
def _interpolate(x, size, mode):
return F.interpolate(x, size=size, mode=mode)
class FinalConv(nn.Sequential):
"""
A module consisting of a convolution layer (e.g. Conv3d+ReLU+GroupNorm3d) and the final 1x1 convolution
which reduces the number of channels to 'out_channels'.
with the number of output channels 'out_channels // 2' and 'out_channels' respectively.
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be change however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ReLU use order='cbr'.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
num_groups (int): number of groups for the GroupNorm
"""
def __init__(
self, in_channels, out_channels, kernel_size=3, order="crg", num_groups=8
):
super(FinalConv, self).__init__()
# conv1
self.add_module(
"SingleConv",
SingleConv(in_channels, in_channels, kernel_size, order, num_groups),
)
# in the last layer a 1×1 convolution reduces the number of output channels to out_channels
final_conv = nn.Conv3d(in_channels, out_channels, 1)
self.add_module("final_conv", final_conv)
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
f_maps (int, tuple): if int: number of feature maps in the first conv layer of the encoder (default: 64);
if tuple: number of feature maps at each level
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
"""
def __init__(
self,
in_channels,
out_channels,
final_sigmoid,
basic_module,
f_maps=64,
layer_order="gcr",
num_groups=8,
num_levels=4,
is_segmentation=False,
testing=False,
**kwargs,
):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
# create encoder path consisting of Encoder modules. Depth of the encoder is equal to `len(f_maps)`
encoders = []
for i, out_feature_num in enumerate(f_maps):
if i == 0:
encoder = Encoder(
in_channels,
out_feature_num,
apply_pooling=False,
basic_module=basic_module,
conv_layer_order=layer_order,
num_groups=num_groups,
)
else:
# TODO: adapt for anisotropy in the data, i.e. use proper pooling kernel to make the data isotropic after 1-2 pooling operations
# currently pools with a constant kernel: (2, 2, 2)
encoder = Encoder(
f_maps[i - 1],
out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
num_groups=num_groups,
)
encoders.append(encoder)
self.encoders = nn.ModuleList(encoders)
# create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1`
decoders = []
reversed_f_maps = list(reversed(f_maps))
for i in range(len(reversed_f_maps) - 1):
if basic_module == DoubleConv:
in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1]
else:
in_feature_num = reversed_f_maps[i]
out_feature_num = reversed_f_maps[i + 1]
# TODO: if non-standard pooling was used, make sure to use correct striding for transpose conv
# currently strides with a constant stride: (2, 2, 2)
decoder = Decoder(
in_feature_num,
out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
num_groups=num_groups,
)
decoders.append(decoder)
self.decoders = nn.ModuleList(decoders)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
def __init__(
self,
in_channels,
out_channels,
final_sigmoid=True,
f_maps=64,
layer_order="gcr",
num_groups=8,
num_levels=4,
is_segmentation=True,
**kwargs,
):
super(UNet3D, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
**kwargs,
)
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
def __init__(
self,
in_channels,
out_channels,
f_maps=64,
num_groups=8,
num_levels=5,
final_sigmoid=False,
layer_order="gcr",
is_segmentation=False,
**kwargs,
):
super(ResidualUNet3D, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=ExtResNetBlock,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
**kwargs,
)
| 25,729 | 36.289855 | 144 | py |
semantic-abstraction | semantic-abstraction-main/visualize.py | import io
import logging
from pathlib import Path
import textwrap
from typing import Any, Dict, List, Tuple
from skimage.measure import marching_cubes
import numpy as np
import torch
import os
import pickle
from net import SemAbs3D, SemAbsVOOL
from point_cloud import (
check_pts_in_frustum,
filter_pts_bounds,
get_pointcloud,
meshwrite,
)
import utils
import os
from utils import config_parser
from CLIP.clip import ClipWrapper, saliency_configs
from fusion import TSDFVolume
import typer
from matplotlib import pyplot as plt
from rich.progress import Progress
import open3d as o3d
from transforms3d import affines, euler
import imageio
from PIL import Image
import cv2
app = typer.Typer(pretty_exceptions_enable=False)
Point3D = Tuple[float, float, float]
def visualize_relevancies(
rgb: np.ndarray,
relevancies: np.ndarray,
obj_classes: List[str],
dump_path: str,
):
fig, axes = plt.subplots(4, int(np.ceil(len(obj_classes) / 4)), figsize=(15, 15))
axes = axes.flatten()
vmin = 0.000
cmap = plt.get_cmap("jet")
vmax = 0.01
[ax.axis("off") for ax in axes]
for ax, label_grad, label in zip(axes, relevancies, obj_classes):
ax.imshow(rgb)
ax.set_title(label, fontsize=12)
grad = np.clip((label_grad - vmin) / (vmax - vmin), a_min=0.0, a_max=1.0)
colored_grad = cmap(grad)
grad = 1 - grad
colored_grad[..., -1] = grad * 0.7
ax.imshow(colored_grad)
plt.tight_layout(pad=0)
plt.savefig(dump_path)
plt.close(fig)
def prep_data(
data_pickle_path: str,
scene_bounds: Tuple[Point3D, Point3D],
subtract_mean: bool,
dump_path: str,
):
scene_id = data_pickle_path.split("/")[-1].split(".pkl")[0]
data = pickle.load(open(data_pickle_path, "rb"))
rgb = data["rgb"]
assert rgb.dtype == np.uint8
depth = data["depth"]
assert depth.dtype == np.float32
cam_intr = data["cam_intr"]
assert depth.dtype == np.float32
cam_extr = data["cam_extr"]
assert depth.dtype == np.float32
scene_dump_path = f"{dump_path}/{scene_id}"
if not os.path.exists(scene_dump_path):
Path(scene_dump_path).mkdir(parents=True, exist_ok=True)
if "img_shape" in data:
rgb = cv2.resize(rgb, data["img_shape"])
depth = cv2.resize(depth, data["img_shape"])
descriptions = data["descriptions"]
target_obj_classes = [d[0] for d in descriptions]
spatial_relation_names = [d[1] for d in descriptions]
reference_obj_classes = [d[2] for d in descriptions]
ovssc_obj_classes = data["ovssc_obj_classes"]
relevancy_keys = list(
set(ovssc_obj_classes).union(target_obj_classes).union(reference_obj_classes)
)
h, w, c = rgb.shape
relevancies = (
ClipWrapper.get_clip_saliency(
img=rgb,
text_labels=np.array(relevancy_keys),
prompts=["a photograph of a {} in a home."],
**saliency_configs["ours"](h),
)[0]
* 50
)
assert len(relevancy_keys) == len(relevancies)
input_xyz_pts = torch.from_numpy(
get_pointcloud(depth, None, cam_intr, cam_extr)[0].astype(np.float32)
)
in_bounds_mask = filter_pts_bounds(input_xyz_pts, np.array(scene_bounds)).bool()
input_xyz_pts = input_xyz_pts[in_bounds_mask]
input_rgb_pts = rgb.reshape(-1, 3)[in_bounds_mask.cpu().numpy()]
if subtract_mean:
relevancies -= relevancies.mean(dim=0, keepdim=True)
visualize_relevancies(
rgb=rgb,
relevancies=relevancies.cpu().numpy() / 50,
obj_classes=relevancy_keys,
dump_path=scene_dump_path + "/relevancies.png",
)
ovssc_input_feature_pts = torch.stack(
[
relevancies[relevancy_keys.index(obj_class)].view(-1)[in_bounds_mask]
for obj_class in ovssc_obj_classes
]
)
input_target_saliency_pts = torch.stack(
[
relevancies[relevancy_keys.index(obj_class)].view(-1)[in_bounds_mask]
for obj_class in target_obj_classes
]
)
input_reference_saliency_pts = torch.stack(
[
relevancies[relevancy_keys.index(obj_class)].view(-1)[in_bounds_mask]
for obj_class in reference_obj_classes
]
)
batch = {
"input_xyz_pts": input_xyz_pts,
"input_rgb_pts": input_rgb_pts,
"relevancies": relevancies,
"input_feature_pts": ovssc_input_feature_pts,
"ovssc_obj_classes": ovssc_obj_classes,
"rgb": rgb,
"depth": depth,
"cam_intr": cam_intr,
"cam_extr": cam_extr,
"scene_id": scene_id,
"input_target_saliency_pts": input_target_saliency_pts,
"input_reference_saliency_pts": input_reference_saliency_pts,
"spatial_relation_name": spatial_relation_names,
"tsdf_vol": None,
"descriptions": [f"the {d[0]} {d[1]} the {d[2]}" for d in data["descriptions"]],
}
return batch
def process_batch_ovssc(
net: SemAbs3D,
batch: Dict[str, Any],
scene_bounds: Tuple[Point3D, Point3D],
device: str,
num_input_pts: int,
sampling_shape: Tuple[int, int, int] = (240, 240, 240),
num_pts_per_pass: int = int(2**20),
cutoff: float = -3.0,
) -> Dict[str, torch.Tensor]:
grid_points = get_sample_points(
sampling_shape=sampling_shape, scene_bounds=scene_bounds, device=device
)
assert filter_pts_bounds(
grid_points.cpu().numpy(), bounds=np.array(scene_bounds)
).all()
label_outputs = {}
with Progress() as progress:
inference_task = progress.add_task(
"Running completion", total=len(batch["ovssc_obj_classes"])
)
for class_idx, obj_class in enumerate(batch["ovssc_obj_classes"]):
label_outputs[obj_class] = []
for j in np.arange(
0,
((len(grid_points) // num_pts_per_pass) + 1) * num_pts_per_pass,
num_pts_per_pass,
):
if len(grid_points[j : j + num_pts_per_pass, :]) == 0:
break
output_xyz_pts = grid_points[j : j + num_pts_per_pass, :][
None, None, ...
]
input_xyz_pts = batch["input_xyz_pts"]
indices = np.random.choice(input_xyz_pts.shape[-2], size=num_input_pts)
label_outputs[obj_class].append(
net(
**{
**batch,
**{
"output_xyz_pts": output_xyz_pts.float().to(device),
"input_feature_pts": batch["input_feature_pts"][
None, None, [class_idx], indices, None
].to(device),
"input_xyz_pts": input_xyz_pts[..., indices, :]
.float()
.to(device),
},
}
)
.detach()
.cpu()
)
progress.update(inference_task, advance=1)
label_outputs = {
class_idx: torch.cat(patch_output, dim=-1).squeeze().view(*sampling_shape)
for class_idx, patch_output in label_outputs.items()
}
tsdf_vol = TSDFVolume(
vol_bnds=np.array(scene_bounds).T,
voxel_size=(scene_bounds[1][0] - scene_bounds[0][0]) / sampling_shape[0],
)
tsdf_vol.integrate(
color_im=batch["rgb"],
depth_im=batch["depth"],
cam_intr=batch["cam_intr"],
cam_pose=batch["cam_extr"],
)
tsdf_vol = tsdf_vol.get_volume()[0]
logprobs = torch.stack(
[label_outputs[label] for label in batch["ovssc_obj_classes"]], dim=-1
)
prediction = logprobs.argmax(dim=-1)
empty_mask = (logprobs < cutoff).all(dim=-1)
empty_mask = empty_mask.view(*sampling_shape)
in_frustum_mask = check_pts_in_frustum(
xyz_pts=grid_points.cpu().numpy(),
depth=batch["depth"],
cam_pose=batch["cam_extr"],
cam_intr=batch["cam_intr"],
)
in_frustum_mask = torch.from_numpy(in_frustum_mask).view(*sampling_shape)
prediction_volumes = {}
for class_idx, class_label in enumerate(batch["ovssc_obj_classes"]):
patch_prediction = (prediction == class_idx).float().view(*sampling_shape)
patch_prediction[empty_mask] = 0.0
patch_prediction[~in_frustum_mask] = 0.0
patch_prediction[tsdf_vol > 0.0] = 0.0
prediction_volumes[class_label] = patch_prediction.cpu().numpy()
return prediction_volumes
def export_obj(vol, filename, level=0.5):
vol[:, :, -1] = -np.inf
vol[:, :, 0] = -np.inf
vol[:, -1, :] = -np.inf
vol[:, 0, :] = -np.inf
vol[-1, :, :] = -np.inf
vol[0, :, :] = -np.inf
if (vol < level).all():
return
verts, faces, norms, _ = marching_cubes(vol, level=level)
vol_shape = np.array(vol.shape)
verts -= vol_shape / 2
verts = verts / vol_shape
# Write header
obj_file = open(filename, "w")
# Write vertex list
for i in range(verts.shape[0]):
obj_file.write("v %f %f %f\n" % (verts[i, 0], verts[i, 1], verts[i, 2]))
for i in range(norms.shape[0]):
obj_file.write("vn %f %f %f\n" % (norms[i, 0], norms[i, 1], norms[i, 2]))
faces = faces.copy()
faces += 1
for i in range(faces.shape[0]):
obj_file.write("f %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2]))
obj_file.close()
def get_sample_points(
sampling_shape: Tuple[int, int, int],
scene_bounds: Tuple[Point3D, Point3D],
device: str,
):
axis_coords = [torch.arange(0, x, device=device) for x in sampling_shape]
coords_per_axis = torch.meshgrid(*axis_coords, indexing="ij")
grid_idxs = torch.stack(coords_per_axis, dim=-1).to(device)
lc = torch.tensor(scene_bounds[0], device=device, dtype=torch.float32)
uc = torch.tensor(scene_bounds[1], device=device, dtype=torch.float32)
idx_scale = torch.tensor(sampling_shape, device=device, dtype=torch.float32) - 1
scales = (uc - lc) / idx_scale
offsets = lc
grid_idxs_f = grid_idxs.to(torch.float32)
grid_points = grid_idxs_f * scales + offsets
return grid_points.view(-1, 3)
@app.command()
def ovssc_inference(
data_pickle_path: str,
model_ckpt_path: str,
dump_path: str = "visualization/",
):
args = config_parser().parse_args(
args=["--load", model_ckpt_path, "--file_path", data_pickle_path]
)
with open(os.path.dirname(args.load) + "/args.pkl", "rb") as file:
exp_args = pickle.load(file)
for arg in vars(exp_args):
if any(arg == s for s in ["device", "file_path", "load"]):
continue
setattr(args, arg, getattr(exp_args, arg))
args.domain_randomization = False
scene_bounds = tuple(args.scene_bounds)
logging.info("Preparing batch")
batch = prep_data(
data_pickle_path=data_pickle_path,
scene_bounds=scene_bounds,
subtract_mean=args.subtract_mean_relevancy,
dump_path=dump_path,
)
logging.info(
f"Fetched {len(batch['ovssc_obj_classes'])} classes: "
+ ", ".join(batch["ovssc_obj_classes"])
)
pickle.dump(batch, open("new-input.pkl", "wb"))
batch = pickle.load(open("new-input.pkl", "rb"))
if not os.path.exists(f"{dump_path}/{batch['scene_id']}"):
Path(f"{dump_path}/{batch['scene_id']}").mkdir(parents=True, exist_ok=True)
net = utils.get_net(net_class=SemAbs3D, **vars(args))[0]
net.eval()
prediction_volumes = process_batch_ovssc(
net=net,
batch=batch,
scene_bounds=scene_bounds,
device=args.device,
num_input_pts=args.num_input_pts,
)
logging.info(f"Dumping meshes to {dump_path}/{batch['scene_id']}")
for obj_class, vol in prediction_volumes.items():
try:
export_obj(
vol=vol,
filename=f"{dump_path}/{batch['scene_id']}/{obj_class}.obj",
level=0.5,
)
except RuntimeError as e:
print(f"{obj_class} probably empty: {e}")
def process_batch_vool(
net: SemAbs3D,
batch: Dict[str, Any],
scene_bounds: Tuple[Point3D, Point3D],
device: str,
num_input_pts: int,
sampling_shape: Tuple[int, int, int] = (240, 240, 240),
num_pts_per_pass: int = int(2**20),
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
grid_points = get_sample_points(
sampling_shape=sampling_shape, scene_bounds=scene_bounds, device=device
)
assert filter_pts_bounds(
grid_points.cpu().numpy(), bounds=np.array(scene_bounds)
).all()
desc_predictions = {}
with Progress() as progress:
inference_task = progress.add_task(
"Running localization", total=len(batch["descriptions"])
)
for desc_idx, desc in enumerate(batch["descriptions"]):
desc_predictions[desc] = []
for j in np.arange(
0,
((len(grid_points) // num_pts_per_pass) + 1) * num_pts_per_pass,
num_pts_per_pass,
):
if len(grid_points[j : j + num_pts_per_pass, :]) == 0:
break
output_xyz_pts = grid_points[j : j + num_pts_per_pass, :][
None, None, ...
]
input_xyz_pts = batch["input_xyz_pts"]
indices = np.random.choice(input_xyz_pts.shape[-2], size=num_input_pts)
desc_predictions[desc].append(
net(
**{
**batch,
**{
"output_xyz_pts": output_xyz_pts.float().to(device),
"input_target_saliency_pts": batch[
"input_target_saliency_pts"
][None, None, [desc_idx], indices, None].to(device),
"input_reference_saliency_pts": batch[
"input_reference_saliency_pts"
][None, None, [desc_idx], indices, None].to(device),
"spatial_relation_name": [
[batch["spatial_relation_name"][desc_idx]]
],
"input_xyz_pts": input_xyz_pts[..., indices, :]
.float()
.to(device),
},
}
)
.detach()
.cpu()
)
progress.update(inference_task, advance=1)
desc_predictions = {
desc: torch.cat(patch_output, dim=-1).squeeze().view(*sampling_shape)
for desc, patch_output in desc_predictions.items()
}
return desc_predictions, grid_points
@app.command()
def vool_inference(
data_pickle_path: str,
model_ckpt_path: str,
dump_path: str = "visualization/",
):
args = config_parser().parse_args(
args=["--load", model_ckpt_path, "--file_path", data_pickle_path]
)
with open(os.path.dirname(args.load) + "/args.pkl", "rb") as file:
exp_args = pickle.load(file)
for arg in vars(exp_args):
if any(arg == s for s in ["device", "file_path", "load"]):
continue
setattr(args, arg, getattr(exp_args, arg))
args.domain_randomization = False
scene_bounds = tuple(args.scene_bounds)
logging.info("Preparing batch")
batch = prep_data(
data_pickle_path=data_pickle_path,
scene_bounds=scene_bounds,
subtract_mean=args.subtract_mean_relevancy,
dump_path=dump_path,
)
logging.info(
f"Fetched {len(batch['descriptions'])} descriptions: "
+ ", ".join(batch["descriptions"])
)
pickle.dump(batch, open("new-input.pkl", "wb"))
batch = pickle.load(open("new-input.pkl", "rb"))
net = utils.get_net(net_class=SemAbsVOOL, **vars(args))[0]
net.eval()
desc_predictions, grid_points = process_batch_vool(
net=net,
batch=batch,
scene_bounds=scene_bounds,
device=args.device,
num_input_pts=args.num_input_pts,
)
logging.info(f"Dumping pointclouds to {dump_path}/{batch['scene_id']}")
cmap = plt.get_cmap("jet")
for desc, prediction in desc_predictions.items():
prediction = prediction.squeeze().view(-1)
keep_mask = prediction > prediction.max() - 0.15
desc_points = grid_points[keep_mask]
logprobs = prediction[keep_mask]
logprobs = logprobs.exp().numpy()
vmin = logprobs.min()
vmax = logprobs.max()
logprobs = (logprobs - vmin) / (vmax - vmin)
colors = cmap(logprobs)[..., :3]
meshwrite(
filename=f"{dump_path}/{batch['scene_id']}/{desc}.ply",
verts=desc_points.cpu().numpy(),
colors=(colors * 255).astype(np.uint8),
)
indices = np.arange(len(batch["input_xyz_pts"]))
if len(batch["input_xyz_pts"]) > 100000:
indices = np.random.choice(
len(batch["input_xyz_pts"]), size=100000, replace=False
)
meshwrite(
filename=f"{dump_path}/{batch['scene_id']}/scene_rgb.ply",
verts=batch["input_xyz_pts"].cpu().numpy()[indices],
colors=batch["input_rgb_pts"][indices],
)
# color palette from https://sashamaps.net/docs/resources/20-colors/
twenty_color_palette = (
np.array(
[
[230, 25, 75],
[60, 180, 75],
[255, 225, 25],
[0, 130, 200],
[245, 130, 48],
[145, 30, 180],
[70, 240, 240],
[240, 50, 230],
[210, 245, 60],
[250, 190, 212],
[0, 128, 128],
[220, 190, 255],
[170, 110, 40],
[255, 250, 200],
[128, 0, 0],
[170, 255, 195],
[128, 128, 0],
[255, 215, 180],
[0, 0, 128],
[128, 128, 128],
[255, 255, 255],
[0, 0, 0],
]
)
/ 255
)
def render_animation(geometries, n_frames=220, point_size=6, **kwargs):
vis = o3d.visualization.Visualizer()
vis.create_window(width=1024, height=1024)
vis.get_render_option().point_size = point_size
for geom in geometries:
vis.add_geometry(geom)
images = []
with Progress() as progress:
render_task = progress.add_task("Rendering", total=n_frames)
for _ in range(n_frames):
ctr = vis.get_view_control()
ctr.rotate(10.0, 0.0)
vis.update_renderer()
img = np.asarray(vis.capture_screen_float_buffer(do_render=True))
images.append((img * 255).astype(np.uint8))
progress.update(render_task, advance=1)
vis.destroy_window()
return images
def generate_legend(legend):
f = lambda m, c: plt.plot([], [], marker=m, color=c, ls="none")[0]
handles = [f("s", c) for c in legend.values()]
legend = plt.legend(
handles, list(legend.keys()), loc=3, framealpha=0, frameon=False
)
fig = legend.figure
fig.canvas.draw()
bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
buf = io.BytesIO()
plt.savefig(buf, format="png", dpi=200, bbox_inches=bbox)
buf.seek(0)
img = np.array(Image.open(buf)).astype(np.uint8)
return img
@app.command()
def ovssc_visualize(output_path: str):
geometries = []
rotate = affines.compose(
T=[0, 0, 0], R=euler.euler2mat(-np.pi / 2, 0, 0), Z=[1, 1, 1]
)
legend = {}
for idx, path in enumerate(Path(output_path).rglob("*.obj")):
path = str(path)
mesh = o3d.io.read_triangle_mesh(path)
mesh = mesh.transform(rotate)
class_name = "\n".join(textwrap.wrap(path.split("/")[-1].split(".obj")[0], 30))
# color mesh
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(mesh.vertices)
pcd.paint_uniform_color(twenty_color_palette[idx % 20])
legend[class_name] = twenty_color_palette[idx % 20]
geometries.append(pcd)
output_path = f"{output_path}/completion.mp4"
legend_img = generate_legend(legend)[:, :, :3]
h, w, _ = legend_img.shape
mask = (legend_img != 255).any(axis=2)
with imageio.get_writer(output_path, fps=24) as writer:
for img in render_animation(geometries=geometries, point_size=4):
img[:h, :w, :][mask] = legend_img[mask]
writer.append_data(img)
print(output_path)
@app.command()
def vool_visualize(output_path: str):
pointclouds = {
str(path).split("/")[-1].split(".ply")[0]: o3d.io.read_point_cloud(str(path))
for path in Path(output_path).rglob("*.ply")
}
rotate = affines.compose(
T=[0, 0, 0], R=euler.euler2mat(-np.pi / 2, 0, 0), Z=[1, 1, 1]
)
scene = pointclouds["scene_rgb"].voxel_down_sample(voxel_size=0.03)
scene = scene.transform(rotate)
for desc, localization in pointclouds.items():
if desc == "scene_rgb":
continue
localization = localization.transform(rotate)
with imageio.get_writer(f"{output_path}/{desc}.mp4", fps=24) as writer:
for image in render_animation(geometries=[scene, localization]):
writer.append_data(image)
print(f"{output_path}/{desc}.mp4")
if __name__ == "__main__":
app()
"""
### scene_4_living-room-1.pkl (NO, VOOL messed up for some reason..., should look into this)
python visualize.py ovssc-inference matterport/scene_4_living-room-1.pkl models/ours/ovssc/ovssc.pth
python visualize.py ovssc-visualize visualization/scene_4_living-room-1
python visualize.py vool-inference matterport/scene_4_living-room-1.pkl models/ours/vool/vool.pth
python visualize.py vool-visualize visualization/scene_4_living-room-1
### scene_1_kitchen-5.pkl (YES)
python visualize.py ovssc-inference matterport/scene_1_kitchen-5.pkl models/ours/ovssc/ovssc.pth
python visualize.py ovssc-visualize visualization/scene_1_kitchen-5
python visualize.py vool-inference matterport/scene_1_kitchen-5.pkl models/ours/vool/vool.pth
python visualize.py vool-visualize visualization/scene_1_kitchen-5
### 00754-EqZacbtdApE_living-room-1 (YES)
python visualize.py ovssc-inference matterport/00754-EqZacbtdApE_living-room-1.pkl models/ours/ovssc/ovssc.pth
python visualize.py ovssc-visualize visualization/00754-EqZacbtdApE_living-room-1
python visualize.py vool-inference matterport/00754-EqZacbtdApE_living-room-1.pkl models/ours/vool/vool.pth
python visualize.py vool-visualize visualization/00754-EqZacbtdApE_living-room-1
scene_2_hallway-2 (YES)
310_kitchen-6 (BAD OVSSC)
scene_2_bedroom-8 (COMPLETION AND LOCALIZATION MESSED UP)
vn_poster (Good completion)
"""
| 23,057 | 35.084507 | 110 | py |
semantic-abstraction | semantic-abstraction-main/train_ovssc.py | import numpy as np
import torch
from torch.nn.functional import binary_cross_entropy_with_logits
from net import SemAbs3D, SemanticAwareOVSSC
import utils
import pandas as pd
from dataset import SceneCompletionDataset
from typing import Dict, Tuple, Union
def get_detailed_stats(
prediction,
gt_label,
xyz_pts,
patch_labels,
scene_ids,
scene_bounds,
ignore_pts,
detailed_analysis=False,
eval_device="cuda",
**kwargs,
):
num_scenes, num_patches = patch_labels.shape
retvals = {
"scene_id": np.array([[scene_id] * num_patches for scene_id in scene_ids])
.reshape(-1)
.tolist(),
"label": patch_labels.reshape(-1).tolist(),
}
retvals.update(
{
f"point_{k}": v
for k, v in utils.prediction_analysis(
prediction=prediction.to(eval_device),
label=gt_label.to(eval_device),
ignore=ignore_pts.to(eval_device),
).items()
}
)
voxelized_pts = utils.voxelize_points(
prediction=prediction,
label=gt_label,
xyz_pts=xyz_pts,
voxel_shape=(32, 32, 32),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts,
)
retvals.update(
{
"voxel32x32x32_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
if detailed_analysis:
voxelized_pts = utils.voxelize_points(
prediction=prediction,
label=gt_label,
xyz_pts=xyz_pts,
voxel_shape=(64, 64, 64),
scene_bounds=scene_bounds,
ignore_pts=ignore_pts,
)
retvals.update(
{
"voxel64x64x64_" + k: v
for k, v in utils.prediction_analysis(
**{k: v.to(eval_device) for k, v in voxelized_pts.items()}
).items()
}
)
for i, label in enumerate(patch_labels.reshape(-1).tolist()):
if label == "": # skip padding classes
for k in retvals.keys():
if "voxel" in k or "point" in k:
retvals[k][i] = np.NAN
return pd.DataFrame.from_dict(retvals)
def get_losses(
net,
batch: dict,
cutoffs=[0],
balance_positive_negative: bool = False,
**kwargs,
) -> Tuple[Dict[str, Union[float, torch.Tensor]], pd.DataFrame]:
stats = {}
num_pts = batch["output_xyz_pts"].shape[2]
if num_pts <= 500000:
outputs = net(**batch)
else:
num_patches = 1
# probably CUDA OOM
outputs = torch.cat(
[
net(
**{
**batch,
"input_feature_pts": batch["input_feature_pts"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
]
if batch["input_feature_pts"].shape[1]
== batch["output_xyz_pts"].shape[1]
else batch["input_feature_pts"],
"output_xyz_pts": batch["output_xyz_pts"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
],
"semantic_class_features": batch["semantic_class_features"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
],
}
)
for patch_i in range(len(batch["patch_labels"]) // num_patches + 1)
if np.prod(
batch["output_xyz_pts"][
:, patch_i * num_patches : (patch_i + 1) * num_patches, ...
].shape
)
> 0
],
dim=1,
)
batch["patch_labels"] = np.array(batch["patch_labels"]).T
padding_mask = torch.from_numpy(batch["patch_labels"] == "").bool()
batch["out_of_bounds_pts"] = batch["out_of_bounds_pts"].view(outputs.shape)
ignore_pts_mask = torch.zeros_like(outputs).bool()
# ignore all padding labels
ignore_pts_mask[padding_mask] = True
# ignore all points out of bounds
ignore_pts_mask = torch.logical_or(ignore_pts_mask, batch["out_of_bounds_pts"])
# don't eval on points outside of frustum
ignore_pts_mask = torch.logical_or(
ignore_pts_mask, batch["out_of_frustum_pts_mask"]
)
stats["loss"] = binary_cross_entropy_with_logits(
outputs[~ignore_pts_mask],
batch["output_label_pts"][~ignore_pts_mask],
weight=utils.get_bce_weight(
output_label_pts=batch["output_label_pts"],
balance_positive_negative=balance_positive_negative,
)[~ignore_pts_mask],
)
with torch.no_grad():
vision_accuracy_mask = (
(outputs > 0.0).long() == batch["output_label_pts"]
).float()
stats["accuracy"] = vision_accuracy_mask[~ignore_pts_mask].mean()
detailed_stats = [
get_detailed_stats(
prediction=outputs > cutoff,
gt_label=batch["output_label_pts"].bool(),
xyz_pts=batch["output_xyz_pts"],
ignore_pts=ignore_pts_mask,
patch_labels=batch["patch_labels"],
scene_ids=batch["scene_id"],
eval_device=net.device,
**kwargs,
)
for cutoff in cutoffs
]
for detailed_stat, cutoff in zip(detailed_stats, cutoffs):
detailed_stat["cutoff"] = [cutoff] * len(detailed_stat)
detailed_stats = pd.concat(detailed_stats)
for k in detailed_stats.columns:
if "iou" in k:
stats[k] = detailed_stats[k].mean()
return stats, detailed_stats
approach = {
"semantic_abstraction": SemAbs3D,
"semantic_aware": SemanticAwareOVSSC,
}
if __name__ == "__main__":
parser = utils.config_parser()
parser.add_argument("--log", type=str, required=True)
parser.add_argument(
"--approach", choices=approach.keys(), default="semantic_abstraction"
)
args = parser.parse_args()
if args.approach == "semantic_aware":
args.network_inputs = ["rgb"]
utils.train(
get_losses_fn=get_losses,
**utils.setup_experiment(
args=args,
ddp=len(args.gpus) > 1,
net_class=approach[args.approach],
dataset_class=SceneCompletionDataset,
split_file_path=args.file_path + "/ssc_split.pkl",
),
**vars(args),
)
| 6,693 | 32.808081 | 87 | py |
semantic-abstraction | semantic-abstraction-main/generate_thor_data.py | import logging
import re
from copy import deepcopy
import shutil
from argparse import ArgumentParser
from typing import List
import ray
from ai2thor.controller import Controller
from ai2thor.platform import CloudRendering
from matplotlib import pyplot as plt
import numpy as np
import torch
from transforms3d import affines, euler
from fusion import TSDFVolume, rigid_transform
from generate_relevancy import get_datastructure, init_dataset, resize_and_add_data
from net import VirtualGrid
import pickle
import os
from tqdm import tqdm
from point_cloud import filter_pts_bounds, get_pointcloud
from utils import write_to_hdf5
import h5py
from numba import njit, prange
fov_w = 80.0
width = 224 * 4
height = 224 * 4
num_output_pts = 1000000
scene_bounds = np.array([[-1, -1, -0.1], [1, 1, 1.9]])
focal_length = (width / 2) / np.tan((np.pi * fov_w / 180) / 2)
cam_intr = np.array(
[[focal_length, 0, height / 2], [0, focal_length, width / 2], [0, 0, 1]]
)
kitchens = [f"FloorPlan{i}_physics" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}_physics" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}_physics" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}_physics" for i in range(1, 31)]
test_scenes = kitchens[-5:] + living_rooms[-5:] + bedrooms[-5:] + bathrooms[-5:]
def parse_gt(scene_name: str, path_to_exported_scenes: str):
pickle_path = f"{path_to_exported_scenes}/{scene_name}.pkl"
scene_gt = None
if os.path.exists(pickle_path):
try:
scene_gt = pickle.load(open(pickle_path, "rb"))
except Exception as e:
logging.error(e)
logging.error(pickle_path)
# cache this pre-processing
if scene_gt is None:
labels = []
semantic = []
full_xyz_pts = np.array(
list(
map(
lambda l: list(map(float, l.rstrip().split("|"))),
open(
f"{path_to_exported_scenes}/{scene_name}/full_xyz_pts.txt"
).readlines(),
)
)
)
full_objid_pts = list(
map(
lambda l: l.rstrip(),
open(
f"{path_to_exported_scenes}/{scene_name}/full_objid_pts.txt"
).readlines(),
)
)
receptacle_infos = list(
map(
process_receptacle_line,
open(
f"{path_to_exported_scenes}/{scene_name}_receptacles.txt"
).readlines(),
)
)
receptacle_masks = {
receptacle_info["receptacle_name"]: check_inside_receptacle(
xyz_pts=full_xyz_pts, receptacle_info=receptacle_info
)
for receptacle_info in receptacle_infos
}
unique_obj_ids = list(set(full_objid_pts))
unique_labels = list(set(map(class_reduction_rule, unique_obj_ids)))
for objid in full_objid_pts:
label = class_reduction_rule(objid)
labels.append(label)
semantic.append(unique_labels.index(label))
semantic = np.array(semantic).astype(int)
scene_gt = {
"full_xyz_pts": full_xyz_pts,
"full_objid_pts": full_objid_pts,
"semantic": semantic,
"labels": labels,
"unique_labels": unique_labels,
"receptacle_masks": receptacle_masks,
}
pickle.dump(scene_gt, open(pickle_path, "wb"))
return scene_gt
def check_inside_receptacle(xyz_pts, receptacle_info):
local_pts = (
np.linalg.inv(receptacle_info["transform_matrix"])
@ np.concatenate((xyz_pts, np.ones(len(xyz_pts))[:, None]), axis=1).T
).T[:, :3]
# in and out
bbox = np.array(
[
-receptacle_info["bbox_size"] / 2,
receptacle_info["bbox_size"] / 2,
]
)
mask_pts = np.logical_and(
(local_pts >= bbox[0]).all(axis=-1), (local_pts <= bbox[1]).all(axis=-1)
)
return mask_pts
def process_receptacle_line(line):
receptacle_name, transform_matrix, bbox_size, bbox_center = (
line.rstrip().lstrip().split("|")
)
transform_matrix = np.array(
transform_matrix.replace(")(", ",").replace(")", "").replace("(", "").split(",")
).astype(float)
bbox_size = np.array(bbox_size[1 : len(bbox_size) - 1].split(",")).astype(float)
bbox_center = np.array(bbox_center[1 : len(bbox_center) - 1].split(",")).astype(
float
)
return {
"receptacle_name": receptacle_name,
"transform_matrix": transform_matrix.reshape(4, 4),
"bbox_size": bbox_size,
"bbox_center": bbox_center,
}
@njit(parallel=True)
def cam2pix(cam_pts, intr):
# from https://github.com/andyzeng/tsdf-fusion-python/blob/master/fusion.py#L181-L193
"""Convert camera coordinates to pixel coordinates."""
intr = intr.astype(np.float32)
fx, fy = intr[0, 0], intr[1, 1]
cx, cy = intr[0, 2], intr[1, 2]
pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
for i in prange(cam_pts.shape[0]):
pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
return pix
def xyz_pts_to_cam_pix(xyz_pts, cam_pose, cam_intr):
cam_pts = rigid_transform(xyz_pts, np.linalg.inv(cam_pose))
pix_z = cam_pts[:, 2]
pix = cam2pix(cam_pts, cam_intr)
pix_x, pix_y = pix[:, 0], pix[:, 1]
return pix_x, pix_y, pix_z
def get_all_relations(
scene_data,
receptacle_masks,
objects_info,
remapped_visible_obj_ids,
all_remapped_obj_ids,
visibility_pts_mask,
container_obj_classes={
"cabinet",
"fridge",
"drawer",
"bathtub basin",
"bowl",
"box",
"cup",
"desk",
"garbage can",
"laundry hamper",
"microwave",
"mug",
"pot",
"safe",
"sink basin",
"toaster",
},
no_localization_obj_classes={
"wall",
"ceiling",
"floor",
"empty",
"countertop",
"drawer",
"counter",
"banana",
},
direction_dot_threshold=0.6,
):
objects_in_scene = set(np.unique(scene_data["full_objid_pts"]))
descriptions = set()
unfiltered_descriptions = list()
def should_add_relation(target_obj_name, spatial_relation, reference_obj_name):
if target_obj_name == reference_obj_name:
# unhelpful
return False
if (
"ceiling" in reference_obj_name
or reference_obj_name
in {"floor", "rug", "baseboard", "light fixture", "decal"}
or target_obj_name
in {"floor", "rug", "baseboard", "light fixture", "decal"}
):
# people don't localize objects in reference to these objects
return False
if (
f"{target_obj_name} {spatial_relation} a {reference_obj_name}"
in descriptions
):
# duplicate
return False
if spatial_relation not in {"in", "on"} and (
(f"{target_obj_name} in a {reference_obj_name}" in descriptions)
or (f"{target_obj_name} on a {reference_obj_name}" in descriptions)
or (f"{reference_obj_name} on a {target_obj_name}" in descriptions)
or (f"{reference_obj_name} in a {target_obj_name}" in descriptions)
):
# if target obj is on or in reference obj, then it shouldn't also be
# left of, right of, behind, or in front of
return False
return True
retval = {
"target_obj_name": [],
"target_obj_material": [],
"target_obj_id": [],
"reference_obj_name": [],
"reference_obj_material": [],
"spatial_relation_name": [],
}
# map from object id to obj class name
for target_obj_id, obj_info in objects_info.items():
target_obj_name = " ".join(
map(lambda c: c.lower(), camel_case_split(obj_info["objectType"]))
)
if obj_info["parentReceptacles"] is not None:
for reference_obj_id in obj_info["parentReceptacles"]:
if reference_obj_id not in remapped_visible_obj_ids.keys():
# parent obj not visible
continue
if target_obj_id not in all_remapped_obj_ids:
logging.warning(
target_obj_id + " not in mapped objids " + reference_obj_id
)
continue
if (
all_remapped_obj_ids[target_obj_id] not in objects_in_scene
or all_remapped_obj_ids[reference_obj_id] not in objects_in_scene
):
# target or reference object doesn't even appear in scene bounds
continue
parent_obj_info = objects_info[reference_obj_id]
if parent_obj_info["objectType"] == "Floor":
continue
reference_obj_name = " ".join(
map(
lambda c: c.lower(),
camel_case_split(parent_obj_info["objectType"]),
)
)
spatial_relation_name = (
"in" if reference_obj_name in container_obj_classes else "on"
)
unfiltered_descriptions.append(
f"{target_obj_name} {spatial_relation_name} a {reference_obj_name}"
)
if should_add_relation(
target_obj_name=target_obj_name,
spatial_relation=spatial_relation_name,
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} {spatial_relation_name} a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_id"].append(all_remapped_obj_ids[target_obj_id])
retval["target_obj_material"].append(
"|".join(obj_info["salientMaterials"])
if obj_info["salientMaterials"] is not None
else ""
)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(
"|".join(parent_obj_info["salientMaterials"])
if parent_obj_info["salientMaterials"] is not None
else ""
)
retval["spatial_relation_name"].append(spatial_relation_name)
target_obj_is_visible = (
target_obj_id in remapped_visible_obj_ids.keys()
)
if not target_obj_is_visible:
# if target obj not visible then should
# supervise entire region
matching_receptacle_masks = {
rk: rv
for rk, rv in receptacle_masks.items()
if " ".join(
map(
lambda c: c.lower(),
camel_case_split(rk.split("_")[0]),
)
)
== retval["reference_obj_name"][-1]
}
if len(matching_receptacle_masks) == 0:
continue
receptacle_mask = np.logical_or.reduce(
tuple(
receptacle_mask["mask"]
for receptacle_mask in matching_receptacle_masks.values()
)
)
scene_data["full_objid_pts"][
:, np.logical_and(receptacle_mask, ~visibility_pts_mask)
] = all_remapped_obj_ids[target_obj_id]
# augment with inside relation
if target_obj_name in container_obj_classes:
container_name = target_obj_name
container_obj_id = target_obj_id
if container_obj_id not in remapped_visible_obj_ids.keys():
continue
matching_receptacle_masks = {
rk: rv
for rk, rv in receptacle_masks.items()
if " ".join(
map(lambda c: c.lower(), camel_case_split(rk.split("_")[0]))
)
== container_name
}
if len(matching_receptacle_masks) == 0:
continue
description = f"banana in a {container_name}"
unfiltered_descriptions.append(description)
if should_add_relation(
target_obj_name="banana",
spatial_relation="in",
reference_obj_name=container_name,
):
descriptions.add(description)
receptacle_mask = np.logical_or.reduce(
tuple(
receptacle_mask["mask"]
for receptacle_mask in matching_receptacle_masks.values()
)
)
hidden_obj_id = len(scene_data["objid_to_class"])
retval["reference_obj_name"].append(container_name)
retval["reference_obj_material"].append(
"|".join(obj_info["salientMaterials"])
if obj_info["salientMaterials"] is not None
else ""
)
hidden_obj_name = "banana"
retval["target_obj_name"].append(hidden_obj_name)
retval["target_obj_id"].append(hidden_obj_id)
retval["target_obj_material"].append("")
retval["spatial_relation_name"].append("in")
scene_data["objid_to_class"] = np.array(
scene_data["objid_to_class"].astype(str).tolist()
+ [f"banana[{hidden_obj_id}]"]
).astype("S")
scene_data["full_objid_pts"][
:, np.logical_and(receptacle_mask, ~visibility_pts_mask)
] = hidden_obj_id
# FIND ALL SPATIAL RELATIONS IN SCENE
for reference_obj_key, reference_obj_id in remapped_visible_obj_ids.items():
for target_obj_id in set(scene_data["full_objid_pts"][0]):
target_obj_name = (
scene_data["objid_to_class"][target_obj_id]
.decode("utf-8")
.split("[")[0]
)
reference_obj_name = (
scene_data["objid_to_class"][reference_obj_id]
.decode("utf-8")
.split("[")[0]
)
if reference_obj_id == target_obj_id:
continue
if (
target_obj_name in no_localization_obj_classes
or reference_obj_name in no_localization_obj_classes
):
continue
target_obj_mask = scene_data["full_objid_pts"][0] == target_obj_id
target_obj_xyz_pts = scene_data["full_xyz_pts"][0][target_obj_mask, :]
reference_obj_mask = scene_data["full_objid_pts"][0] == reference_obj_id
if not reference_obj_mask.any() or not target_obj_mask.any():
continue
reference_obj_xyz_pts = scene_data["full_xyz_pts"][0][reference_obj_mask, :]
displacement = reference_obj_xyz_pts.mean(axis=0) - target_obj_xyz_pts.mean(
axis=0
)
distance = np.linalg.norm(displacement)
direction = displacement / distance
reference_obj_bounds = reference_obj_xyz_pts.max(
axis=0
) - reference_obj_xyz_pts.min(axis=0)
distance_threshold = min(
max(max(reference_obj_bounds[0], reference_obj_bounds[1]) * 2.0, 0.1),
1.0,
)
if distance > distance_threshold:
# too far away, probably not an actual spatial relation
continue
reference_material = (
"|".join(objects_info[reference_obj_key]["salientMaterials"])
if reference_obj_key in objects_info
and objects_info[reference_obj_key]["salientMaterials"] is not None
else ""
)
target_obj_is_visible = target_obj_id in scene_data["seg"]
unfiltered_descriptions.append(
f"{target_obj_name} behind a {reference_obj_name}"
)
if np.dot(
direction, [-1, 0, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="behind",
reference_obj_name=reference_obj_name,
):
descriptions.add(f"{target_obj_name} behind a {reference_obj_name}")
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("behind")
if not target_obj_is_visible:
empty_id = list(
map(
lambda c: c.split("[")[0],
scene_data["objid_to_class"].astype(str),
)
).index("empty")
empty_mask = scene_data["full_objid_pts"][0] == empty_id
reference_class_mask_pts = np.logical_or.reduce(
tuple(
scene_data["full_objid_pts"][0] == objid
for objid, objclass in enumerate(
scene_data["objid_to_class"].astype(str)
)
if objclass.split("[")[0] == reference_obj_name
)
)
im_h, im_w = scene_data["depth"][0].shape
resize_scale = 10
pix_x, pix_y, pix_z = xyz_pts_to_cam_pix(
xyz_pts=scene_data["full_xyz_pts"][0],
cam_pose=scene_data["cam_pose"],
cam_intr=scene_data["cam_intr"],
)
# effectively resize
ref_pix_x, ref_pix_y, ref_pix_z = xyz_pts_to_cam_pix(
xyz_pts=scene_data["full_xyz_pts"][0][
reference_class_mask_pts, :
],
cam_pose=scene_data["cam_pose"],
cam_intr=scene_data["cam_intr"],
)
full_pix_xy = np.stack((pix_x, pix_y), axis=1)
corner = full_pix_xy.min(axis=0)
full_pix_xy -= corner
ref_pix_xy = np.stack((ref_pix_x, ref_pix_y), axis=1)
ref_pix_xy -= corner
full_pix_xy[:, 0] = np.digitize(
full_pix_xy[:, 0], bins=np.arange(0, im_w, resize_scale)
)
full_pix_xy[:, 1] = np.digitize(
full_pix_xy[:, 1], bins=np.arange(0, im_h, resize_scale)
)
ref_pix_xy[:, 0] = np.digitize(
ref_pix_xy[:, 0], bins=np.arange(0, im_w, resize_scale)
)
ref_pix_xy[:, 1] = np.digitize(
ref_pix_xy[:, 1], bins=np.arange(0, im_h, resize_scale)
)
ref_backsize = -np.ones(
(full_pix_xy[:, 0].max() + 1, full_pix_xy[:, 1].max() + 1)
).astype(float)
# get back side of object in each pixel
for pix_xy in np.unique(ref_pix_xy, axis=0):
mask = (ref_pix_xy == pix_xy).all(axis=1)
ref_backsize[pix_xy[0], pix_xy[1]] = ref_pix_z[mask].max()
accessed_depth = ref_backsize[full_pix_xy[:, 0], full_pix_xy[:, 1]]
behind_mask = np.logical_and(
accessed_depth < pix_z, accessed_depth != -1
)
target_obj_mask = np.logical_and.reduce(
(behind_mask, ~visibility_pts_mask, empty_mask)
)
scene_data["full_objid_pts"][:, target_obj_mask] = target_obj_id
# some objects shouldn't allow behind
if reference_obj_name in {"cabinet"}:
continue
# if in front of, left of, or right of, then target object
# should be visible
if target_obj_id not in remapped_visible_obj_ids.values():
continue
if np.dot(direction, [0, 1, 0]) > direction_dot_threshold:
unfiltered_descriptions.append(
f"{target_obj_name} on the right of a {reference_obj_name}"
)
elif np.dot(direction, [0, -1, 0]) > direction_dot_threshold:
unfiltered_descriptions.append(
f"{target_obj_name} on the left of a {reference_obj_name}"
)
elif np.dot(direction, [1, 0, 0]) > direction_dot_threshold:
unfiltered_descriptions.append(
f"{target_obj_name} in front of a {reference_obj_name}"
)
if np.dot(
direction, [0, 1, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="on the right of",
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} on the right of a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("on the right of")
elif np.dot(
direction, [0, -1, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="on the left of",
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} on the left of a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("on the left of")
elif np.dot(
direction, [1, 0, 0]
) > direction_dot_threshold and should_add_relation(
target_obj_name=target_obj_name,
spatial_relation="in front of",
reference_obj_name=reference_obj_name,
):
descriptions.add(
f"{target_obj_name} in front of a {reference_obj_name}"
)
retval["target_obj_name"].append(target_obj_name)
retval["target_obj_material"].append("")
retval["target_obj_id"].append(target_obj_id)
retval["reference_obj_name"].append(reference_obj_name)
retval["reference_obj_material"].append(reference_material)
retval["spatial_relation_name"].append("in front of")
return retval
def camel_case_split(str):
return re.findall(r"[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))", str)
def class_reduction_rule(raw_class_name):
if "FP326:PS_326_" in raw_class_name:
raw_class_name = raw_class_name.split("FP326:PS_326_")[1]
class_name = (
raw_class_name.split("_")[0]
.split("Height")[0]
.split("Standard")[-1]
.split("|")[0]
.split("Size")[0]
.split("Done")[0]
)
if class_name.upper() == class_name:
return class_name
if len(camel_case_split(class_name)):
class_name = " ".join(c.lower() for c in camel_case_split(class_name))
class_name = "".join(class_name.split("mesh")).rstrip().lstrip()
if "f " == class_name[:2]:
class_name = class_name[2:]
if "ladel" in class_name or "ladle" in class_name:
return "ladle"
if class_name == "towl":
return "towel"
if class_name == "plate stack":
return "plate"
if (
"deco" in class_name
and "decor" not in class_name
and "decorative" not in class_name
and "decoration" not in class_name
):
class_name = class_name.replace("deco", "decoration")
elif (
"decor" in class_name
and "decorative" not in class_name
and "decoration" not in class_name
):
class_name = class_name.replace("decor", "decoration")
class_name = class_name.replace("counter top", "countertop")
class_name = class_name.replace("fire place", "fireplace")
class_name = class_name.replace("base board", "baseboard")
class_name = class_name.replace("dish washer", "dishwasher")
class_name = class_name.replace("dish washer", "dishwasher")
class_name = class_name.replace("dish washer", "dishwasher")
class_name = class_name.replace("bath tub", "bathtub")
class_name = class_name.replace("base board", "baseboard")
if "book" == class_name or "book stack" == class_name:
return "book"
if "rug" == class_name[-3:]:
return "rug"
if (
class_name[-len("bottles") :] == "bottles"
or class_name[-len("wires") :] == "wires"
or class_name[-len("windows") :] == "windows"
or class_name[-len("pans") :] == "pans"
or class_name[-len("decals") :] == "decals"
or class_name[-len("cups") :] == "cups"
or class_name[-len("walls") :] == "walls"
or class_name[-len("rods") :] == "rods"
or class_name[-len("cans") :] == "cans"
or class_name[-len("lights") :] == "lights"
):
return class_name[:-1]
if class_name[-len("glasses") :] == "glasses":
return class_name[:-2]
if "cloth" in class_name:
return "cloth"
if "island" in class_name:
return "kitchen island"
if "ceiling" in class_name:
return class_name
if "cabinet" in class_name:
return "cabinet"
if "fridge" in class_name:
return "fridge"
if "shelf" in class_name or "shelving" in class_name or "shelves" in class_name:
return "shelf"
if "knife" in class_name:
return "knife"
if "stove" in class_name:
return "stove"
if "wall" in class_name:
return "wall"
if "window" in class_name:
return "window"
if "door" in class_name:
return "door"
return class_name
def process_class_name(c):
return c.split("|")[0].split(" ")[0]
def run_simulator(
scene_id: str,
domain_randomization: bool,
np_rand: np.random.RandomState,
num_attempts: int = 10,
dist: float = 3.0,
debug: bool = False,
):
controller = None
try:
controller = Controller(
agentMode="default",
visibilityDistance=1.5,
scene=scene_id,
# step sizes
gridSize=0.05,
snapToGrid=False,
rotateStepDegrees=5,
# image modalities
renderDepthImage=True,
renderInstanceSegmentation=True,
# camera properties
width=width,
height=height,
fieldOfView=fov_w,
# render headless
platform=CloudRendering,
)
except Exception as e:
logging.error(e)
if controller is not None:
controller.stop()
return
datapoint = None
reachable_positions = controller.step(action="GetReachablePositions").metadata[
"actionReturn"
]
for _ in range(num_attempts):
sampled_position = np_rand.choice(reachable_positions)
sampled_rotation = dict(x=0, y=np_rand.uniform(0, 360), z=0)
try:
event = controller.step(
action="Teleport",
position=sampled_position,
rotation=sampled_rotation,
horizon=0,
standing=True,
)
except Exception as e:
logging.error(e)
controller.stop()
return
classes = list(set(map(process_class_name, event.color_to_object_id.values())))
semantic_img = np.zeros(event.instance_segmentation_frame.shape[:2]).astype(int)
for color, objname in event.color_to_object_id.items():
objname = process_class_name(objname)
obj_mask = (event.instance_segmentation_frame == color).all(axis=-1)
semantic_img[obj_mask] = classes.index(objname)
# reflective surfaces in Unity shows depth of reflection probe
reflective_surface_mask = event.depth_frame > 10.0
depth = deepcopy(event.depth_frame)
depth[reflective_surface_mask] = np.interp(
np.flatnonzero(reflective_surface_mask),
np.flatnonzero(~reflective_surface_mask),
depth[~reflective_surface_mask],
)
if "Wall" in classes and (semantic_img == classes.index("Wall")).mean() > 0.8:
continue
# ideally most objects are between 1.5 and 3.5 meters away
pixel_in_good_range = np.logical_and(
depth < dist + 1.0,
depth > dist - 1.0,
)
if len(np.unique(semantic_img)) < 4:
if debug:
plt.imshow(semantic_img)
plt.show()
logging.debug("not enough interesting objects")
continue
if pixel_in_good_range.mean() < 0.2:
if debug:
logging.debug("not enough pixels in good range")
fig, axes = plt.subplots(1, 3)
axes[0].axis("off")
axes[1].axis("off")
axes[2].axis("off")
axes[0].imshow(depth)
axes[1].imshow(pixel_in_good_range.astype(int))
axes[2].imshow(event.frame)
plt.show()
continue
domain_randomized_rgb = np.zeros(1)
if domain_randomization:
controller.step(action="RandomizeMaterials")
domain_randomized_rgb = controller.step(action="RandomizeMaterials").frame
controller.stop()
datapoint = {
"scene_id": scene_id,
"rgb": deepcopy(event.frame),
"depth": depth,
"instance": deepcopy(event.instance_segmentation_frame),
"color_to_object_id": deepcopy(event.color_to_object_id),
"semantic": semantic_img,
"classes": classes,
"position": list(event.metadata["agent"]["position"].values()),
"camera_horizon": event.metadata["agent"]["cameraHorizon"],
"rotation": list(event.metadata["agent"]["rotation"].values()),
"objects_info": event.metadata["objects"],
"sampled_position": sampled_position,
"sampled_rotation": sampled_rotation,
"domain_randomized_rgb": domain_randomized_rgb,
}
break
if datapoint is None:
controller.stop()
logging.debug("attempts ran out")
return
return datapoint
def scene_data_from_thor_datapoint(
np_rand,
datapoint: dict,
dist: float,
path_to_exported_scenes: str,
debug: bool = False,
):
cam_pose = affines.compose(
T=datapoint["position"],
R=euler.euler2mat(
datapoint["rotation"][2] * np.pi / 180,
datapoint["rotation"][1] * np.pi / 180,
datapoint["rotation"][0] * np.pi / 180,
),
Z=np.ones(3),
)
xyz_pts, rgb_pts = get_pointcloud(
depth_img=datapoint["depth"],
color_img=datapoint["rgb"],
cam_intr=cam_intr,
cam_pose=cam_pose,
)
# compute transform to align ground truth with view
transform = (
affines.compose(T=[0, 0, 2], R=euler.euler2mat(0, 0, 0), Z=np.array([1, 1, 1]))
@ affines.compose(
T=[0, 0, 0], R=euler.euler2mat(0, 0, 0), Z=np.array([1, 1, -1])
)
@ affines.compose(
T=[0, 0, 0], R=euler.euler2mat(np.pi / 2, 0, 0), Z=np.ones(3) * 0.6
)
@ affines.compose(T=[0, 0, 0], R=euler.euler2mat(0, np.pi, 0), Z=np.ones(3))
@ affines.compose(
T=[dist - 0.5, 2.0, 0], R=euler.euler2mat(0, np.pi / 2, 0), Z=np.ones(3)
)
@ affines.compose(
T=[0, 0, 0], R=euler.euler2mat(0, -np.pi, -np.pi), Z=np.ones(3)
)
@ np.linalg.inv(cam_pose)
)
scene_gt = parse_gt(
scene_name=datapoint["scene_id"],
path_to_exported_scenes=path_to_exported_scenes,
)
full_xyz_pts = scene_gt["full_xyz_pts"]
remapped_full_objid_pts = scene_gt["full_objid_pts"]
full_objid_unique = scene_gt["objids"]
objid_to_class = scene_gt["objid_to_class"]
receptacle_masks = scene_gt["receptacle_masks"]
original_xyz_pts = full_xyz_pts.copy()
full_xyz_pts = (
transform
@ np.concatenate(
(original_xyz_pts, np.ones(len(original_xyz_pts))[:, None]), axis=1
).T
).T[:, :3]
if debug:
from plot_utils import plot_pointcloud
mask = filter_pts_bounds(xyz=full_xyz_pts, bounds=scene_bounds)
fig, ax = plt.subplots(1)
ax.imshow(datapoint["rgb"])
plot_pointcloud(
xyz=full_xyz_pts[mask],
features=remapped_full_objid_pts[mask],
object_labels=np.array(objid_to_class),
show_plot=False,
delete_fig=False,
)
xyz_pts, rgb_pts = get_pointcloud(
depth_img=datapoint["depth"],
color_img=datapoint["rgb"],
cam_intr=cam_intr,
cam_pose=transform @ cam_pose,
)
plot_pointcloud(
xyz=xyz_pts,
features=rgb_pts,
show_plot=True,
)
plt.show()
# process instance
remapped_seg = -np.ones(datapoint["instance"].shape[:2]).astype(int)
objects_in_view = {
color: instance_key
for color, instance_key in datapoint["color_to_object_id"].items()
if (datapoint["instance"] == color).all(axis=-1).any()
}
remapped_visible_obj_ids = dict()
for obj_color, instance_key in objects_in_view.items():
obj_mask = (datapoint["instance"] == obj_color).all(axis=-1)
if instance_key in full_objid_unique:
remapped_objid = full_objid_unique.index(instance_key)
else:
# project out to 3D, then find class in gt which is spatially closest
# to projected mask
xyz_pts, _ = get_pointcloud(
depth_img=datapoint["depth"],
color_img=None,
cam_intr=cam_intr,
cam_pose=transform @ cam_pose,
)
partial_obj_xyz_pts = xyz_pts[obj_mask.reshape(-1), :]
partial_to_full_distances = dict()
for int_obj_id, gt_obj_id in enumerate(full_objid_unique):
if gt_obj_id == "empty":
continue
gt_obj_mask = remapped_full_objid_pts == int_obj_id
full_obj_xyz_pts = full_xyz_pts[gt_obj_mask, :]
if len(full_obj_xyz_pts) == 0:
continue
elif len(full_obj_xyz_pts) > 100:
full_obj_xyz_pts = full_obj_xyz_pts[
np_rand.choice(len(full_obj_xyz_pts), 100, replace=False), :
]
distances = (
(full_obj_xyz_pts[None, ...] - partial_obj_xyz_pts[:, None, ...])
** 2
).sum(axis=2)
all_distances = distances.min(axis=1).sum(axis=0)
partial_to_full_distances[gt_obj_id] = all_distances
gt_obj_id = min(partial_to_full_distances.items(), key=lambda v: v[1])[0]
remapped_objid = full_objid_unique.index(gt_obj_id)
remapped_visible_obj_ids[instance_key] = remapped_objid
remapped_seg[obj_mask] = remapped_objid
mask = filter_pts_bounds(xyz=full_xyz_pts, bounds=scene_bounds)
full_xyz_pts = full_xyz_pts[mask, :]
remapped_full_objid_pts = remapped_full_objid_pts[mask]
logging.debug(f"NUM PTS: { len(full_xyz_pts)}")
try:
indices = np_rand.choice(len(full_xyz_pts), size=num_output_pts, replace=False)
except Exception as e:
logging.error("Not enough points")
logging.error(e)
return
remapped_obj_ids = deepcopy(remapped_visible_obj_ids)
for remapped_id, objid in enumerate(full_objid_unique):
if objid not in remapped_obj_ids:
remapped_obj_ids[objid] = remapped_id
vox_size = 64
tsdf_vol = TSDFVolume(vol_bnds=scene_bounds.T, voxel_size=2.0 / vox_size)
tsdf_vol.integrate(
color_im=datapoint["rgb"],
depth_im=datapoint["depth"],
cam_intr=cam_intr,
cam_pose=transform @ cam_pose,
)
tsdf_xyz_pts = tsdf_vol.vox2world(
tsdf_vol._vol_origin, tsdf_vol.vox_coords, tsdf_vol._voxel_size
)
tsdf_value_pts = tsdf_vol.get_volume()[0].reshape(-1)
for objid in range(len(objid_to_class)):
objid_to_class[objid] = objid_to_class[objid] + f"[{objid}]"
scene_data = {
"rgb": datapoint["rgb"][None, ...],
"domain_randomized_rgb": datapoint["domain_randomized_rgb"][None, ...],
"depth": datapoint["depth"][None, ...],
"seg": remapped_seg[None, ...],
"cam_intr": cam_intr,
"cam_pose": transform @ cam_pose,
"scene_bounds": scene_bounds,
"tsdf_value_pts": tsdf_value_pts[None, ...],
"tsdf_xyz_pts": tsdf_xyz_pts[None, ...],
"full_xyz_pts": full_xyz_pts[indices, :][None, ...],
"full_objid_pts": remapped_full_objid_pts[indices][None, ...],
"objid_to_class": np.array(objid_to_class).astype("S"),
}
vg = VirtualGrid(
scene_bounds=scene_bounds, grid_shape=tuple([vox_size] * 3), batch_size=1
)
query_points = torch.from_numpy(scene_data["full_xyz_pts"])
grid_indices = (
vg.get_points_grid_idxs(query_points, cast_to_int=True)[0].cpu().numpy()
)
tsdf_vol = tsdf_vol.get_volume()[0]
visibility_pts_mask = (
tsdf_vol[grid_indices[:, 0], grid_indices[:, 1], grid_indices[:, 2]] > 0.0
)
scene_data["descriptions"] = get_all_relations(
scene_data=scene_data,
receptacle_masks={
receptacle_name: {
"mask": receptacle_mask[mask][indices],
"xyz_pts": original_xyz_pts[receptacle_mask],
}
for receptacle_name, receptacle_mask in receptacle_masks.items()
},
objects_info={
obj_info["objectId"]: obj_info for obj_info in datapoint["objects_info"]
},
remapped_visible_obj_ids=remapped_visible_obj_ids,
all_remapped_obj_ids=remapped_obj_ids,
visibility_pts_mask=visibility_pts_mask,
)
return scene_data
@ray.remote(num_cpus=1, num_gpus=0.05)
def generate_datapoint(
scene_ids,
dataset_dir_path: str,
seed: int,
path_to_exported_scenes: str,
dist: float = 3.0,
**kwargs,
):
np_rand = np.random.RandomState(seed=seed)
scene_id = np_rand.choice(scene_ids)
output_path = f"{dataset_dir_path}/{seed:05d}|{scene_id}.hdf5"
if os.path.exists(output_path):
return
domain_randomization = scene_id in test_scenes
datapoint = run_simulator(
scene_id=scene_id,
dist=dist,
np_rand=np_rand,
domain_randomization=domain_randomization,
**kwargs,
)
if datapoint is None:
return
scene_data = scene_data_from_thor_datapoint(
datapoint=datapoint,
dist=dist,
np_rand=np_rand,
path_to_exported_scenes=path_to_exported_scenes,
)
if scene_data is None:
return
init_dataset(output_path, data_structure=data_structure)
with h5py.File(output_path, "a") as file:
group = file.create_group(f"data")
for key, value in scene_data.items():
if key in data_structure.keys():
region_references = resize_and_add_data(dataset=file[key], data=value)
write_to_hdf5(group, key, region_references, dtype=h5py.regionref_dtype)
else:
write_to_hdf5(group, key, value)
def generate_gt_scenes(
scene_ids: List[str], path_to_exported_scenes: str, path_to_custom_unity: str
):
np.random.shuffle(scene_ids)
for scene_id in scene_ids:
if os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_xyz_pts.txt"
) and os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_objid_pts.txt"
):
continue
controller = None
try:
controller = Controller(
local_executable_path=path_to_custom_unity,
agentMode="default",
visibilityDistance=1.5,
scene=scene_id,
# step sizes
gridSize=0.25,
snapToGrid=True,
rotateStepDegrees=90,
# image modalities
renderDepthImage=True,
renderInstanceSegmentation=True,
# camera properties
width=width,
height=height,
fieldOfView=fov_w,
# render headless
platform=CloudRendering,
)
except Exception as e:
logging.error(e)
finally:
if controller is not None:
controller.stop()
exit()
def generate_datapoints(
dataset_dir_path: str,
path_to_custom_unity: str,
path_to_exported_scenes: str,
num_processes: int,
num_pts: int,
start_seed: int,
local: bool,
):
ray.init(
log_to_driver=True,
local_mode=local,
)
tasks = []
scene_ids = sorted(kitchens + living_rooms + bathrooms + bedrooms)
not_gt_scene_ids = list(
filter(
lambda scene_id: not (
os.path.exists(f"{path_to_exported_scenes}/{scene_id}/full_xyz_pts.txt")
and os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_objid_pts.txt"
)
),
scene_ids,
)
)
logging.info("scenes without gts: " + ", ".join(not_gt_scene_ids))
if (
len(not_gt_scene_ids) > 0
and input(f"There are {len(not_gt_scene_ids)} scenes without gt. Generate?")
== "y"
):
generate_gt_scenes(
not_gt_scene_ids, path_to_exported_scenes, path_to_custom_unity
)
scene_ids = list(
filter(
lambda scene_id: (
os.path.exists(f"{path_to_exported_scenes}/{scene_id}/full_xyz_pts.txt")
and os.path.exists(
f"{path_to_exported_scenes}/{scene_id}/full_objid_pts.txt"
)
),
scene_ids,
)
)
seed = start_seed
tasks = [
generate_datapoint.remote(
scene_ids=scene_ids,
dataset_dir_path=dataset_dir_path,
path_to_exported_scenes=path_to_exported_scenes,
seed=seed + i,
)
for i in range(num_processes)
]
seed += num_processes
pbar = tqdm(total=num_pts, smoothing=0.001)
offset = 0
while seed < start_seed + num_pts:
readies, tasks = ray.wait(tasks, num_returns=1)
pbar.update((seed - start_seed) - pbar.n)
offset += len(readies)
tasks.extend(
[
generate_datapoint.remote(
scene_ids=scene_ids,
dataset_dir_path=dataset_dir_path,
path_to_exported_scenes=path_to_exported_scenes,
seed=seed + i,
)
for i in range(len(readies))
]
)
seed += len(readies)
pbar.set_description(f"CURR SEED: {seed:06d}")
try:
ray.get(readies)
except Exception as e:
logging.error(e)
pass
data_structure = get_datastructure(
image_shape=(width, height),
relevancy_shape=(128, 128),
clip_hidden_dim=512,
tsdf_dim=(64, 64, 64),
num_output_pts=num_output_pts,
)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--dataset_dir_path", type=str, required=True)
parser.add_argument("--num_processes", type=int, default=1)
parser.add_argument("--num_pts", type=int, default=50000)
parser.add_argument("--start_seed", type=int, default=0)
parser.add_argument("--local", action="store_true", default=False)
parser.add_argument("--path_to_custom_unity", type=str)
parser.add_argument("--path_to_exported_scenes", type=str)
args = parser.parse_args()
if os.path.exists(args.dataset_dir_path) and (
input(f"{args.dataset_dir_path} exists. replace?") == "y"
):
shutil.rmtree(args.dataset_dir_path)
os.mkdir(args.dataset_dir_path)
elif not os.path.exists(args.dataset_dir_path):
os.mkdir(args.dataset_dir_path)
data = generate_datapoints(**vars(args))
| 46,662 | 37.405761 | 91 | py |
semantic-abstraction | semantic-abstraction-main/arm/utils.py | # Adapted from: https://github.com/stepjam/ARM/blob/main/arm/utils.py
import torch
import numpy as np
from scipy.spatial.transform import Rotation
import pyrender
import trimesh
from pyrender.trackball import Trackball
def normalize_quaternion(quat):
return np.array(quat) / np.linalg.norm(quat, axis=-1, keepdims=True)
def quaternion_to_discrete_euler(quaternion, resolution):
euler = Rotation.from_quat(quaternion).as_euler("xyz", degrees=True) + 180
assert np.min(euler) >= 0 and np.max(euler) <= 360
disc = np.around((euler / resolution)).astype(int)
disc[disc == int(360 / resolution)] = 0
return disc
def discrete_euler_to_quaternion(discrete_euler, resolution):
euluer = (discrete_euler * resolution) - 180
return Rotation.from_euler("xyz", euluer, degrees=True).as_quat()
def point_to_voxel_index(
point: np.ndarray, voxel_size: np.ndarray, coord_bounds: np.ndarray
):
bb_mins = np.array(coord_bounds[0:3])
bb_maxs = np.array(coord_bounds[3:])
dims_m_one = np.array([voxel_size] * 3) - 1
bb_ranges = bb_maxs - bb_mins
res = bb_ranges / (np.array([voxel_size] * 3) + 1e-12)
voxel_indicy = np.minimum(
np.floor((point - bb_mins) / (res + 1e-12)).astype(np.int32), dims_m_one
)
return voxel_indicy
def stack_on_channel(x):
# expect (B, T, C, ...)
return torch.cat(torch.split(x, 1, dim=1), dim=2).squeeze(1)
def _compute_initial_camera_pose(scene):
# Adapted from:
# https://github.com/mmatl/pyrender/blob/master/pyrender/viewer.py#L1032
centroid = scene.centroid
scale = scene.scale
# if scale == 0.0:
# scale = DEFAULT_SCENE_SCALE
scale = 4.0
s2 = 1.0 / np.sqrt(2.0)
cp = np.eye(4)
cp[:3, :3] = np.array([[0.0, -s2, s2], [1.0, 0.0, 0.0], [0.0, s2, s2]])
hfov = np.pi / 6.0
dist = scale / (2.0 * np.tan(hfov))
cp[:3, 3] = dist * np.array([1.0, 0.0, 1.0]) + centroid
return cp
def _from_trimesh_scene(trimesh_scene, bg_color=None, ambient_light=None):
# convert trimesh geometries to pyrender geometries
geometries = {
name: pyrender.Mesh.from_trimesh(geom, smooth=False)
for name, geom in trimesh_scene.geometry.items()
}
# create the pyrender scene object
scene_pr = pyrender.Scene(bg_color=bg_color, ambient_light=ambient_light)
# add every node with geometry to the pyrender scene
for node in trimesh_scene.graph.nodes_geometry:
pose, geom_name = trimesh_scene.graph[node]
scene_pr.add(geometries[geom_name], pose=pose)
return scene_pr
def create_voxel_scene(
voxel_grid: np.ndarray,
q_attention: np.ndarray = None,
highlight_coordinate: np.ndarray = None,
highlight_gt_coordinate: np.ndarray = None,
highlight_alpha: float = 1.0,
voxel_size: float = 0.1,
show_bb: bool = False,
alpha: float = 0.5,
):
_, d, h, w = voxel_grid.shape
v = voxel_grid.transpose((1, 2, 3, 0))
occupancy = v[:, :, :, -1] != 0
alpha = np.expand_dims(np.full_like(occupancy, alpha, dtype=np.float32), -1)
rgb = np.concatenate([(v[:, :, :, 3:6] + 1) / 2.0, alpha], axis=-1)
if q_attention is not None:
q = np.max(q_attention, 0)
q = q / np.max(q)
show_q = q > 0.75
occupancy = (show_q + occupancy).astype(bool)
q = np.expand_dims(q - 0.5, -1) # Max q can be is 0.9
q_rgb = np.concatenate(
[q, np.zeros_like(q), np.zeros_like(q), np.clip(q, 0, 1)], axis=-1
)
rgb = np.where(np.expand_dims(show_q, -1), q_rgb, rgb)
if highlight_coordinate is not None:
x, y, z = highlight_coordinate
occupancy[x, y, z] = True
rgb[x, y, z] = [1.0, 0.0, 0.0, highlight_alpha]
if highlight_gt_coordinate is not None:
x, y, z = highlight_gt_coordinate
occupancy[x, y, z] = True
rgb[x, y, z] = [0.0, 0.0, 1.0, highlight_alpha]
transform = trimesh.transformations.scale_and_translate(
scale=voxel_size, translate=(0.0, 0.0, 0.0)
)
trimesh_voxel_grid = trimesh.voxel.VoxelGrid(
encoding=occupancy, transform=transform
)
geometry = trimesh_voxel_grid.as_boxes(colors=rgb)
scene = trimesh.Scene()
scene.add_geometry(geometry)
if show_bb:
assert d == h == w
_create_bounding_box(scene, voxel_size, d)
return scene
def visualise_voxel(
voxel_grid: np.ndarray,
q_attention: np.ndarray = None,
highlight_coordinate: np.ndarray = None,
highlight_gt_coordinate: np.ndarray = None,
highlight_alpha: float = 1.0,
rotation_amount: float = 0.0,
show: bool = False,
voxel_size: float = 0.1,
offscreen_renderer: pyrender.OffscreenRenderer = None,
show_bb: bool = False,
alpha: float = 0.5,
render_gripper=False,
gripper_pose=None,
gripper_mesh_scale=1.0,
):
scene = create_voxel_scene(
voxel_grid,
q_attention,
highlight_coordinate,
highlight_gt_coordinate,
highlight_alpha,
voxel_size,
show_bb,
alpha,
)
if show:
scene.show()
else:
r = offscreen_renderer or pyrender.OffscreenRenderer(
viewport_width=1920, viewport_height=1080, point_size=1.0
)
s = _from_trimesh_scene(
scene, ambient_light=[0.8, 0.8, 0.8], bg_color=[1.0, 1.0, 1.0]
)
cam = pyrender.PerspectiveCamera(
yfov=np.pi / 4.0, aspectRatio=r.viewport_width / r.viewport_height
)
p = _compute_initial_camera_pose(s)
t = Trackball(p, (r.viewport_width, r.viewport_height), s.scale, s.centroid)
t.rotate(rotation_amount, np.array([0.0, 0.0, 1.0]))
s.add(cam, pose=t.pose)
if render_gripper:
gripper_trimesh = trimesh.load("peract_colab/meshes/hand.dae", force="mesh")
gripper_trimesh.vertices *= gripper_mesh_scale
radii = np.linalg.norm(
gripper_trimesh.vertices - gripper_trimesh.center_mass, axis=1
)
gripper_trimesh.visual.vertex_colors = trimesh.visual.interpolate(
radii * gripper_mesh_scale, color_map="winter"
)
gripper_mesh = pyrender.Mesh.from_trimesh(
gripper_trimesh, poses=np.array([gripper_pose]), smooth=False
)
s.add(gripper_mesh)
color, depth = r.render(s)
return color.copy()
def get_gripper_render_pose(
voxel_scale, scene_bound_origin, continuous_trans, continuous_quat
):
# finger tip to gripper offset
offset = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0.1 * voxel_scale], [0, 0, 0, 1]]
)
# scale and translate by origin
translation = (continuous_trans - (np.array(scene_bound_origin[:3]))) * voxel_scale
mat = np.eye(4, 4)
mat[:3, :3] = Rotation.from_quat(
[continuous_quat[0], continuous_quat[1], continuous_quat[2], continuous_quat[3]]
).as_matrix()
offset_mat = np.matmul(mat, offset)
mat[:3, 3] = translation - offset_mat[:3, 3]
return mat
| 7,072 | 32.842105 | 88 | py |
semantic-abstraction | semantic-abstraction-main/arm/network_utils.py | # Adapted from https://github.com/stepjam/ARM/blob/main/arm/network_utils.py
import copy
from typing import List, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
LRELU_SLOPE = 0.02
def act_layer(act):
if act == "relu":
return nn.ReLU()
elif act == "lrelu":
return nn.LeakyReLU(LRELU_SLOPE)
elif act == "elu":
return nn.ELU()
elif act == "tanh":
return nn.Tanh()
elif act == "prelu":
return nn.PReLU()
else:
raise ValueError("%s not recognized." % act)
def norm_layer2d(norm, channels):
if norm == "batch":
return nn.BatchNorm2d(channels)
elif norm == "instance":
return nn.InstanceNorm2d(channels, affine=True)
elif norm == "layer":
return nn.GroupNorm(1, channels, affine=True)
elif norm == "group":
return nn.GroupNorm(4, channels, affine=True)
else:
raise ValueError("%s not recognized." % norm)
def norm_layer1d(norm, num_channels):
if norm == "batch":
return nn.BatchNorm1d(num_channels)
elif norm == "instance":
return nn.InstanceNorm1d(num_channels, affine=True)
elif norm == "layer":
return nn.LayerNorm(num_channels)
else:
raise ValueError("%s not recognized." % norm)
class FiLMBlock(nn.Module):
def __init__(self):
super(FiLMBlock, self).__init__()
def forward(self, x, gamma, beta):
beta = beta.view(x.size(0), x.size(1), 1, 1)
gamma = gamma.view(x.size(0), x.size(1), 1, 1)
x = gamma * x + beta
return x
class Conv2DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes,
strides,
norm=None,
activation=None,
padding_mode="replicate",
):
super(Conv2DBlock, self).__init__()
padding = (
kernel_sizes // 2
if isinstance(kernel_sizes, int)
else (kernel_sizes[0] // 2, kernel_sizes[1] // 2)
)
self.conv2d = nn.Conv2d(
in_channels,
out_channels,
kernel_sizes,
strides,
padding=padding,
padding_mode=padding_mode,
)
if activation is None:
nn.init.xavier_uniform_(
self.conv2d.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.conv2d.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.conv2d.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.conv2d.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.conv2d.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.conv2d.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.conv2d.weight, nonlinearity="relu")
nn.init.zeros_(self.conv2d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
self.norm = norm_layer2d(norm, out_channels)
if activation is not None:
self.activation = act_layer(activation)
def forward(self, x):
x = self.conv2d(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class Conv2DFiLMBlock(Conv2DBlock):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes,
strides,
norm=None,
activation=None,
padding_mode="replicate",
):
super(Conv2DFiLMBlock, self).__init__(
in_channels,
out_channels,
kernel_sizes,
strides,
norm,
activation,
padding_mode,
)
self.film = FiLMBlock()
def forward(self, x, gamma, beta):
x = self.conv2d(x)
x = self.norm(x) if self.norm is not None else x
x = self.film(x, gamma, beta)
x = self.activation(x) if self.activation is not None else x
return x
class Conv3DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes: Union[int, list] = 3,
strides=1,
norm=None,
activation=None,
padding_mode="replicate",
padding=None,
):
super(Conv3DBlock, self).__init__()
padding = kernel_sizes // 2 if padding is None else padding
self.conv3d = nn.Conv3d(
in_channels,
out_channels,
kernel_sizes,
strides,
padding=padding,
padding_mode=padding_mode,
)
if activation is None:
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.conv3d.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.conv3d.weight, nonlinearity="relu")
nn.init.zeros_(self.conv3d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
raise NotImplementedError("Norm not implemented.")
if activation is not None:
self.activation = act_layer(activation)
self.out_channels = out_channels
def forward(self, x):
x = self.conv3d(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class ConvTranspose3DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes: Union[int, list],
strides,
norm=None,
activation=None,
padding_mode="zeros",
padding=None,
):
super(ConvTranspose3DBlock, self).__init__()
padding = kernel_sizes // 2 if padding is None else padding
self.conv3d = nn.ConvTranspose3d(
in_channels,
out_channels,
kernel_sizes,
strides,
padding=padding,
padding_mode=padding_mode,
)
if activation is None:
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.conv3d.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.conv3d.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.conv3d.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.conv3d.weight, nonlinearity="relu")
nn.init.zeros_(self.conv3d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
self.norm = norm_layer3d(norm, out_channels)
if activation is not None:
self.activation = act_layer(activation)
def forward(self, x):
x = self.conv3d(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class Conv2DUpsampleBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_sizes,
strides,
norm=None,
activation=None,
):
super(Conv2DUpsampleBlock, self).__init__()
layer = [
Conv2DBlock(in_channels, out_channels, kernel_sizes, 1, norm, activation)
]
if strides > 1:
layer.append(
nn.Upsample(scale_factor=strides, mode="bilinear", align_corners=False)
)
convt_block = Conv2DBlock(
out_channels, out_channels, kernel_sizes, 1, norm, activation
)
layer.append(convt_block)
self.conv_up = nn.Sequential(*layer)
def forward(self, x):
return self.conv_up(x)
class Conv3DUpsampleBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
strides,
kernel_sizes=3,
norm=None,
activation=None,
):
super(Conv3DUpsampleBlock, self).__init__()
layer = [
Conv3DBlock(in_channels, out_channels, kernel_sizes, 1, norm, activation)
]
if strides > 1:
layer.append(
nn.Upsample(scale_factor=strides, mode="trilinear", align_corners=False)
)
convt_block = Conv3DBlock(
out_channels, out_channels, kernel_sizes, 1, norm, activation
)
layer.append(convt_block)
self.conv_up = nn.Sequential(*layer)
def forward(self, x):
return self.conv_up(x)
class DenseBlock(nn.Module):
def __init__(self, in_features, out_features, norm=None, activation=None):
super(DenseBlock, self).__init__()
self.linear = nn.Linear(in_features, out_features)
if activation is None:
nn.init.xavier_uniform_(
self.linear.weight, gain=nn.init.calculate_gain("linear")
)
nn.init.zeros_(self.linear.bias)
elif activation == "tanh":
nn.init.xavier_uniform_(
self.linear.weight, gain=nn.init.calculate_gain("tanh")
)
nn.init.zeros_(self.linear.bias)
elif activation == "lrelu":
nn.init.kaiming_uniform_(
self.linear.weight, a=LRELU_SLOPE, nonlinearity="leaky_relu"
)
nn.init.zeros_(self.linear.bias)
elif activation == "relu":
nn.init.kaiming_uniform_(self.linear.weight, nonlinearity="relu")
nn.init.zeros_(self.linear.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if norm is not None:
self.norm = norm_layer1d(norm, out_features)
if activation is not None:
self.activation = act_layer(activation)
def forward(self, x):
x = self.linear(x)
x = self.norm(x) if self.norm is not None else x
x = self.activation(x) if self.activation is not None else x
return x
class SiameseNet(nn.Module):
def __init__(
self,
input_channels: List[int],
filters: List[int],
kernel_sizes: List[int],
strides: List[int],
norm: str = None,
activation: str = "relu",
):
super(SiameseNet, self).__init__()
self._input_channels = input_channels
self._filters = filters
self._kernel_sizes = kernel_sizes
self._strides = strides
self._norm = norm
self._activation = activation
self.output_channels = filters[-1] # * len(input_channels)
def build(self):
self._siamese_blocks = nn.ModuleList()
for i, ch in enumerate(self._input_channels):
blocks = []
for i, (filt, ksize, stride) in enumerate(
zip(self._filters, self._kernel_sizes, self._strides)
):
conv_block = Conv2DBlock(
ch, filt, ksize, stride, self._norm, self._activation
)
blocks.append(conv_block)
self._siamese_blocks.append(nn.Sequential(*blocks))
self._fuse = Conv2DBlock(
self._filters[-1] * len(self._siamese_blocks),
self._filters[-1],
1,
1,
self._norm,
self._activation,
)
def forward(self, x):
if len(x) != len(self._siamese_blocks):
raise ValueError(
"Expected a list of tensors of size %d." % len(self._siamese_blocks)
)
self.streams = [stream(y) for y, stream in zip(x, self._siamese_blocks)]
y = self._fuse(torch.cat(self.streams, 1))
return y
class CNNAndFcsNet(nn.Module):
def __init__(
self,
siamese_net: SiameseNet,
low_dim_state_len: int,
input_resolution: List[int],
filters: List[int],
kernel_sizes: List[int],
strides: List[int],
norm: str = None,
fc_layers: List[int] = None,
activation: str = "relu",
):
super(CNNAndFcsNet, self).__init__()
self._siamese_net = copy.deepcopy(siamese_net)
self._input_channels = self._siamese_net.output_channels + low_dim_state_len
self._filters = filters
self._kernel_sizes = kernel_sizes
self._strides = strides
self._norm = norm
self._activation = activation
self._fc_layers = [] if fc_layers is None else fc_layers
self._input_resolution = input_resolution
def build(self):
self._siamese_net.build()
layers = []
channels = self._input_channels
for i, (filt, ksize, stride) in enumerate(
list(zip(self._filters, self._kernel_sizes, self._strides))[:-1]
):
layers.append(
Conv2DBlock(channels, filt, ksize, stride, self._norm, self._activation)
)
channels = filt
layers.append(
Conv2DBlock(
channels, self._filters[-1], self._kernel_sizes[-1], self._strides[-1]
)
)
self._cnn = nn.Sequential(*layers)
self._maxp = nn.AdaptiveMaxPool2d(1)
channels = self._filters[-1]
dense_layers = []
for n in self._fc_layers[:-1]:
dense_layers.append(DenseBlock(channels, n, activation=self._activation))
channels = n
dense_layers.append(DenseBlock(channels, self._fc_layers[-1]))
self._fcs = nn.Sequential(*dense_layers)
def forward(self, observations, low_dim_ins):
x = self._siamese_net(observations)
_, _, h, w = x.shape
low_dim_latents = low_dim_ins.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, h, w)
combined = torch.cat([x, low_dim_latents], dim=1)
x = self._cnn(combined)
x = self._maxp(x).squeeze(-1).squeeze(-1)
return self._fcs(x)
class CNNLangAndFcsNet(nn.Module):
def __init__(
self,
siamese_net: SiameseNet,
low_dim_state_len: int,
input_resolution: List[int],
filters: List[int],
kernel_sizes: List[int],
strides: List[int],
norm: str = None,
fc_layers: List[int] = None,
activation: str = "relu",
):
super(CNNLangAndFcsNet, self).__init__()
self._siamese_net = copy.deepcopy(siamese_net)
self._input_channels = self._siamese_net.output_channels + low_dim_state_len
self._filters = filters
self._kernel_sizes = kernel_sizes
self._strides = strides
self._norm = norm
self._activation = activation
self._fc_layers = [] if fc_layers is None else fc_layers
self._input_resolution = input_resolution
self._lang_feat_dim = 1024
def build(self):
self._siamese_net.build()
layers = []
channels = self._input_channels
self.conv1 = Conv2DFiLMBlock(
channels, self._filters[0], self._kernel_sizes[0], self._strides[0]
)
self.gamma1 = nn.Linear(self._lang_feat_dim, self._filters[0])
self.beta1 = nn.Linear(self._lang_feat_dim, self._filters[0])
self.conv2 = Conv2DFiLMBlock(
self._filters[0], self._filters[1], self._kernel_sizes[1], self._strides[1]
)
self.gamma2 = nn.Linear(self._lang_feat_dim, self._filters[1])
self.beta2 = nn.Linear(self._lang_feat_dim, self._filters[1])
self.conv3 = Conv2DFiLMBlock(
self._filters[1], self._filters[2], self._kernel_sizes[2], self._strides[2]
)
self.gamma3 = nn.Linear(self._lang_feat_dim, self._filters[2])
self.beta3 = nn.Linear(self._lang_feat_dim, self._filters[2])
self._maxp = nn.AdaptiveMaxPool2d(1)
channels = self._filters[-1]
dense_layers = []
for n in self._fc_layers[:-1]:
dense_layers.append(DenseBlock(channels, n, activation=self._activation))
channels = n
dense_layers.append(DenseBlock(channels, self._fc_layers[-1]))
self._fcs = nn.Sequential(*dense_layers)
def forward(self, observations, low_dim_ins, lang_goal_feats):
x = self._siamese_net(observations)
_, _, h, w = x.shape
low_dim_latents = low_dim_ins.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, h, w)
combined = torch.cat([x, low_dim_latents], dim=1)
g1 = self.gamma1(lang_goal_feats)
b1 = self.beta1(lang_goal_feats)
x = self.conv1(combined, g1, b1)
g2 = self.gamma2(lang_goal_feats)
b2 = self.beta2(lang_goal_feats)
x = self.conv2(x, g2, b2)
g3 = self.gamma3(lang_goal_feats)
b3 = self.beta3(lang_goal_feats)
x = self.conv3(x, g3, b3)
x = self._maxp(x).squeeze(-1).squeeze(-1)
return self._fcs(x)
class Conv3DInceptionBlockUpsampleBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
scale_factor,
norm=None,
activation=None,
residual=False,
):
super(Conv3DInceptionBlockUpsampleBlock, self).__init__()
layer = []
convt_block = Conv3DInceptionBlock(in_channels, out_channels, norm, activation)
layer.append(convt_block)
if scale_factor > 1:
layer.append(
nn.Upsample(
scale_factor=scale_factor, mode="trilinear", align_corners=False
)
)
convt_block = Conv3DInceptionBlock(out_channels, out_channels, norm, activation)
layer.append(convt_block)
self.conv_up = nn.Sequential(*layer)
def forward(self, x):
return self.conv_up(x)
class Conv3DInceptionBlock(nn.Module):
def __init__(
self, in_channels, out_channels, norm=None, activation=None, residual=False
):
super(Conv3DInceptionBlock, self).__init__()
self._residual = residual
cs = out_channels // 4
assert out_channels % 4 == 0
latent = 32
self._1x1conv = Conv3DBlock(
in_channels,
cs * 2,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._1x1conv_a = Conv3DBlock(
in_channels,
latent,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._3x3conv = Conv3DBlock(
latent, cs, kernel_sizes=3, strides=1, norm=norm, activation=activation
)
self._1x1conv_b = Conv3DBlock(
in_channels,
latent,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._5x5_via_3x3conv_a = Conv3DBlock(
latent, latent, kernel_sizes=3, strides=1, norm=norm, activation=activation
)
self._5x5_via_3x3conv_b = Conv3DBlock(
latent, cs, kernel_sizes=3, strides=1, norm=norm, activation=activation
)
self.out_channels = out_channels + (in_channels if residual else 0)
def forward(self, x):
yy = []
if self._residual:
yy = [x]
return torch.cat(
yy
+ [
self._1x1conv(x),
self._3x3conv(self._1x1conv_a(x)),
self._5x5_via_3x3conv_b(self._5x5_via_3x3conv_a(self._1x1conv_b(x))),
],
1,
)
class ConvTransposeUp3DBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
strides=2,
padding=0,
norm=None,
activation=None,
residual=False,
):
super(ConvTransposeUp3DBlock, self).__init__()
self._residual = residual
self._1x1conv = Conv3DBlock(
in_channels,
out_channels,
kernel_sizes=1,
strides=1,
norm=norm,
activation=activation,
)
self._3x3conv = ConvTranspose3DBlock(
out_channels,
out_channels,
kernel_sizes=2,
strides=strides,
norm=norm,
activation=activation,
padding=padding,
)
self._1x1conv_a = Conv3DBlock(
out_channels,
out_channels,
kernel_sizes=1,
strides=1,
norm=norm,
)
self.out_channels = out_channels
def forward(self, x):
x = self._1x1conv(x)
x = self._3x3conv(x)
x = self._1x1conv_a(x)
return x
class SpatialSoftmax3D(torch.nn.Module):
def __init__(self, depth, height, width, channel):
super(SpatialSoftmax3D, self).__init__()
self.depth = depth
self.height = height
self.width = width
self.channel = channel
self.temperature = 0.01
pos_x, pos_y, pos_z = np.meshgrid(
np.linspace(-1.0, 1.0, self.depth),
np.linspace(-1.0, 1.0, self.height),
np.linspace(-1.0, 1.0, self.width),
)
pos_x = torch.from_numpy(
pos_x.reshape(self.depth * self.height * self.width)
).float()
pos_y = torch.from_numpy(
pos_y.reshape(self.depth * self.height * self.width)
).float()
pos_z = torch.from_numpy(
pos_z.reshape(self.depth * self.height * self.width)
).float()
self.register_buffer("pos_x", pos_x)
self.register_buffer("pos_y", pos_y)
self.register_buffer("pos_z", pos_z)
def forward(self, feature):
feature = feature.view(
-1, self.height * self.width * self.depth
) # (B, c*d*h*w)
softmax_attention = F.softmax(feature / self.temperature, dim=-1)
expected_x = torch.sum(self.pos_x * softmax_attention, dim=1, keepdim=True)
expected_y = torch.sum(self.pos_y * softmax_attention, dim=1, keepdim=True)
expected_z = torch.sum(self.pos_z * softmax_attention, dim=1, keepdim=True)
expected_xy = torch.cat([expected_x, expected_y, expected_z], 1)
feature_keypoints = expected_xy.view(-1, self.channel * 3)
return feature_keypoints
| 23,208 | 30.363514 | 88 | py |
semantic-abstraction | semantic-abstraction-main/arm/optim/lamb.py | # From https://github.com/cybertronai/pytorch-lamb/blob/master/pytorch_lamb/lamb.py
"""Lamb optimizer."""
import collections
import math
import torch
from torch.optim import Optimizer
# def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
# """Log a histogram of trust ratio scalars in across layers."""
# results = collections.defaultdict(list)
# for group in optimizer.param_groups:
# for p in group['params']:
# state = optimizer.state[p]
# for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
# if i in state:
# results[i].append(state[i])
#
# for k, v in results.items():
# event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Lamb does not support sparse gradients, consider SparseAdam instad."
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group[
"lr"
] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"])
if group["weight_decay"] != 0:
adam_step.add_(p.data, alpha=group["weight_decay"])
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state["weight_norm"] = weight_norm
state["adam_norm"] = adam_norm
state["trust_ratio"] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
| 5,163 | 39.34375 | 103 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/clip_explainability.py | # modified from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP/clip/clip.py
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model_explainability import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px, overload_resolution=False):
transforms = [
_convert_image_to_rgb,
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
),
]
if not overload_resolution:
transforms = [Resize(224, interpolation=BICUBIC), CenterCrop(n_px)] + transforms
return Compose(transforms)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False,
download_root: str = None,
overload_resolution=False,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(
_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")
)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
)
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution, overload_resolution)
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes("prim::Constant")
if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith(
"cuda"
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item(), overload_resolution)
def tokenize(
texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False
) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = torch.tensor(tokens)
return result
| 9,663 | 34.270073 | 154 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/auxiliary.py | # adding hooks, copied from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/e63b4ab0d0722faa11ff2f7549c4f88074e7edd7/CLIP/clip/auxilary.py
import torch
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from math import ceil, floor
# We define this function as _pad because it takes an argument
# named pad, which clobbers the recursive reference to the pad
# function needed for __torch_function__ support
pad = F.pad
# This class exists solely for Transformer; it has an annotation stating
# that bias is never None, which appeases TorchScript
def interpolate_positional_emb(positional_embedding, target_seq_len):
interpolated_positional_emb = torch.zeros_like(positional_embedding[0])[
None, :
].repeat(target_seq_len, 1)
for i in range(target_seq_len):
i3 = float(i) / (target_seq_len / 50)
i1 = floor(i3)
i2 = ceil(i3)
if i2 < len(positional_embedding):
interpolated_positional_emb[i] = torch.lerp(
positional_embedding[i1, :], positional_embedding[i2, :], i3 - i1
)
else:
interpolated_positional_emb[i] = positional_embedding[-1, :]
return interpolated_positional_emb
class _LinearWithBias(torch.nn.Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
attention_probs_forward_hook=None,
attention_probs_backwards_hook=None,
) -> Tuple[Tensor, Optional[Tensor]]:
if not torch.jit.is_scripting():
tens_ops = (
query,
key,
value,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
out_proj_weight,
out_proj_bias,
)
if any([type(t) is not Tensor for t in tens_ops]) and F.has_torch_function(
tens_ops
):
return F.handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = F.linear(
key, k_proj_weight_non_opt, in_proj_bias[embed_dim : (embed_dim * 2)]
)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2) :])
else:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias)
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(
attn_mask.dtype
)
if attn_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError(
"attn_mask's dimension {} is not supported".format(attn_mask.dim())
)
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat(
[
k,
torch.zeros(
(k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device
),
],
dim=1,
)
v = torch.cat(
[
v,
torch.zeros(
(v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device
),
],
dim=1,
)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(
bsz * num_heads, tgt_len, src_len
)
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
# use hooks for the attention weights if necessary
if (
attention_probs_forward_hook is not None
and attention_probs_backwards_hook is not None
):
attention_probs_forward_hook(attn_output_weights)
# attn_output_weights.register_hook(attention_probs_backwards_hook)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
class MultiheadAttention(torch.nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(
self,
query,
key,
value,
key_padding_mask=None,
need_weights=True,
attn_mask=None,
attention_probs_forward_hook=None,
attention_probs_backwards_hook=None,
):
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
attention_probs_forward_hook=attention_probs_forward_hook,
attention_probs_backwards_hook=attention_probs_backwards_hook,
)
else:
return multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
attention_probs_forward_hook=attention_probs_forward_hook,
attention_probs_backwards_hook=attention_probs_backwards_hook,
)
| 21,829 | 38.981685 | 157 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
# if [int(i) for i in torch.__version__.split(".")] < [1, 7, 1]:
# warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize", "tokenizer"]
tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px, overload_resolution=False):
transforms = [
_convert_image_to_rgb,
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)
),
]
if not overload_resolution:
transforms = [Resize(224, interpolation=BICUBIC), CenterCrop(n_px)] + transforms
return Compose(transforms)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False,
download_root: str = None,
overload_resolution=False,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(
_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")
)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
)
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution, overload_resolution)
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes("prim::Constant")
if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith(
"cuda"
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item(), overload_resolution)
def tokenize(
texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False
) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = tokenizer.encoder["<|startoftext|>"]
eot_token = tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = torch.tensor(tokens)
return result
| 9,497 | 33.791209 | 154 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .auxiliary import interpolate_positional_emb
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
assert len(x) >= 50
if len(x) > 50:
target_seq_len = len(x)
pe = interpolate_positional_emb(self.positional_embedding, target_seq_len)
x = x + pe[:, None, :] # (HW+1)NC
else:
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
)
def forward(self, x: torch.Tensor, tile_attn_mask: torch.Tensor = None):
prev_attn_masks = []
if tile_attn_mask is not None:
for resblock in filter(
lambda module: isinstance(module, ResidualAttentionBlock),
self.resblocks.modules(),
):
prev_attn_masks.append(
resblock.attn_mask.clone()
if resblock.attn_mask is not None
else None
)
resblock.attn_mask = tile_attn_mask
x = self.resblocks(x)
if tile_attn_mask is not None:
for resblock, prev_attn_mask in zip(
filter(
lambda module: isinstance(module, ResidualAttentionBlock),
self.resblocks.modules(),
),
prev_attn_masks,
):
resblock.attn_mask = prev_attn_mask
return x
class VisionTransformer(nn.Module):
def __init__(
self,
input_resolution: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)
)
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, **kwargs):
x = self.conv1(x) # shape = [*, width, grid, grid]
# shape = [*, width, grid ** 2]
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, **kwargs)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, **kwargs):
return self.visual(image.type(self.dtype), **kwargs)
def encode_text(self, text, return_transformer_outputs=False):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)[: x.shape[1], :]
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
transformer_output = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = (
transformer_output[
torch.arange(transformer_output.shape[0]), text.argmax(dim=-1)
]
@ self.text_projection
)
if return_transformer_outputs:
return x, transformer_output
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"visual.layer{b}")
)
)
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert (
output_width**2 + 1
== state_dict["visual.attnpool.positional_embedding"].shape[0]
)
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 20,260 | 33.457483 | 112 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/clip_gradcam.py | from typing import List
import torch
import torch.nn as nn
from .clip_explainability import load
from .clip import tokenize
from torch import device
import numpy as np
import torch.nn.functional as nnf
import itertools
def zeroshot_classifier(clip_model, classnames, templates, device):
with torch.no_grad():
texts = list(
itertools.chain(
*[
[template.format(classname) for template in templates]
for classname in classnames
]
)
) # format with class
texts = tokenize(texts).to(device) # tokenize
class_embeddings = clip_model.encode_text(texts)
class_embeddings = class_embeddings.view(len(classnames), len(templates), -1)
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
zeroshot_weights = class_embeddings.mean(dim=1)
return zeroshot_weights.T # shape: [dim, n classes]
class ClipGradcam(nn.Module):
def __init__(
self,
clip_model_name: str,
classes: List[str],
templates: List[str],
device: device,
num_layers=10,
positive_attn_only=False,
**kwargs
):
super(ClipGradcam, self).__init__()
self.clip_model_name = clip_model_name
self.model, self.preprocess = load(clip_model_name, device=device, **kwargs)
self.templates = templates
self.device = device
self.target_classes = None
self.set_classes(classes)
self.num_layers = num_layers
self.positive_attn_only = positive_attn_only
self.num_res_attn_blocks = {
"ViT-B/32": 12,
"ViT-B/16": 12,
"ViT-L/14": 16,
"ViT-L/14@336px": 16,
}[clip_model_name]
def forward(self, x: torch.Tensor, o: List[str]):
"""
non-standard hack around an nn, really should be more principled here
"""
image_features = self.model.encode_image(x.to(self.device))
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
zeroshot_weights = torch.cat(
[self.class_to_language_feature[prompt] for prompt in o], dim=1
)
logits_per_image = 100.0 * image_features @ zeroshot_weights
return self.interpret(logits_per_image, self.model, self.device)
def interpret(self, logits_per_image, model, device):
# modified from: https://colab.research.google.com/github/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP_explainability.ipynb#scrollTo=fWKGyu2YAeSV
batch_size = logits_per_image.shape[0]
num_prompts = logits_per_image.shape[1]
one_hot = [logit for logit in logits_per_image.sum(dim=0)]
model.zero_grad()
image_attn_blocks = list(
dict(model.visual.transformer.resblocks.named_children()).values()
)
num_tokens = image_attn_blocks[0].attn_probs.shape[-1]
R = torch.eye(
num_tokens, num_tokens, dtype=image_attn_blocks[0].attn_probs.dtype
).to(device)
R = R[None, None, :, :].repeat(num_prompts, batch_size, 1, 1)
for i, block in enumerate(image_attn_blocks):
if i <= self.num_layers:
continue
# TODO try scaling block.attn_probs by value magnitude
# TODO actual parallelized prompt gradients
grad = torch.stack(
[
torch.autograd.grad(logit, [block.attn_probs], retain_graph=True)[
0
].detach()
for logit in one_hot
]
)
grad = grad.view(
num_prompts,
batch_size,
self.num_res_attn_blocks,
num_tokens,
num_tokens,
)
cam = (
block.attn_probs.view(
1, batch_size, self.num_res_attn_blocks, num_tokens, num_tokens
)
.detach()
.repeat(num_prompts, 1, 1, 1, 1)
)
cam = cam.reshape(num_prompts, batch_size, -1, cam.shape[-1], cam.shape[-1])
grad = grad.reshape(
num_prompts, batch_size, -1, grad.shape[-1], grad.shape[-1]
)
cam = grad * cam
cam = cam.reshape(
num_prompts * batch_size, -1, cam.shape[-1], cam.shape[-1]
)
if self.positive_attn_only:
cam = cam.clamp(min=0)
# average of all heads
cam = cam.mean(dim=-3)
R = R + torch.bmm(
cam, R.view(num_prompts * batch_size, num_tokens, num_tokens)
).view(num_prompts, batch_size, num_tokens, num_tokens)
image_relevance = R[:, :, 0, 1:]
img_dim = int(np.sqrt(num_tokens - 1))
image_relevance = image_relevance.reshape(
num_prompts, batch_size, img_dim, img_dim
)
return image_relevance
def set_classes(self, classes):
self.target_classes = classes
language_features = zeroshot_classifier(
self.model, self.target_classes, self.templates, self.device
)
self.class_to_language_feature = {}
for i, c in enumerate(self.target_classes):
self.class_to_language_feature[c] = language_features[:, [i]]
| 5,420 | 36.909091 | 165 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/__init__.py | from .clip import *
from .clip_gradcam import ClipGradcam
import torch
import numpy as np
from PIL import Image
import torchvision
from functools import reduce
def factors(n):
return set(
reduce(
list.__add__,
([i, n // i] for i in range(1, int(n**0.5) + 1) if n % i == 0),
)
)
saliency_configs = {
"ours": lambda img_dim: {
"distractor_labels": {},
"horizontal_flipping": True,
"augmentations": 5,
"imagenet_prompt_ensemble": False,
"positive_attn_only": True,
"cropping_augmentations": [
{"tile_size": img_dim, "stride": img_dim // 4},
{"tile_size": int(img_dim * 2 / 3), "stride": int(img_dim * 2 / 3) // 4},
{"tile_size": img_dim // 2, "stride": (img_dim // 2) // 4},
{"tile_size": img_dim // 4, "stride": (img_dim // 4) // 4},
],
},
"chefer_et_al": lambda img_dim: {
"distractor_labels": {},
"horizontal_flipping": False,
"augmentations": 0,
"imagenet_prompt_ensemble": False,
"positive_attn_only": True,
"cropping_augmentations": [{"tile_size": img_dim, "stride": img_dim // 4}],
},
}
class ClipWrapper:
# SINGLETON WRAPPER
clip_model = None
clip_preprocess = None
clip_gradcam = None
lavt = None
device = None
jittering_transforms = None
def __init__(self, clip_model_type, device, **kwargs):
ClipWrapper.device = device
ClipWrapper.jittering_transforms = torchvision.transforms.ColorJitter(
brightness=0.6, contrast=0.6, saturation=0.6, hue=0.1
)
ClipWrapper.clip_model, ClipWrapper.clip_preprocess = load(
clip_model_type, ClipWrapper.device, **kwargs
)
ClipWrapper.clip_gradcam = ClipGradcam(
clip_model_name=clip_model_type,
classes=[""],
templates=["{}"],
device=ClipWrapper.device,
**kwargs
)
@classmethod
def check_initialized(cls, clip_model_type="ViT-B/32", **kwargs):
if cls.clip_gradcam is None:
ClipWrapper(
clip_model_type=clip_model_type,
device="cuda" if torch.cuda.is_available() else "cpu",
**kwargs
)
@classmethod
def get_clip_text_feature(cls, string):
ClipWrapper.check_initialized()
with torch.no_grad():
return (
cls.clip_model.encode_text(
tokenize(string, context_length=77).to(cls.device)
)
.squeeze()
.cpu()
.numpy()
)
@classmethod
def get_visual_feature(cls, rgb, tile_attn_mask, device=None):
if device is None:
device = ClipWrapper.device
ClipWrapper.check_initialized()
rgb = ClipWrapper.clip_preprocess(Image.fromarray(rgb)).unsqueeze(0)
with torch.no_grad():
clip_feature = ClipWrapper.clip_model.encode_image(
rgb.to(ClipWrapper.device), tile_attn_mask=tile_attn_mask
).squeeze()
return clip_feature.to(device)
@classmethod
def get_clip_saliency(
cls,
img,
text_labels,
prompts,
distractor_labels=set(),
use_lavt=False,
**kwargs
):
cls.check_initialized()
if use_lavt:
return cls.lavt.localize(img=img, prompts=text_labels)
cls.clip_gradcam.templates = prompts
cls.clip_gradcam.set_classes(text_labels)
text_label_features = torch.stack(
list(cls.clip_gradcam.class_to_language_feature.values()), dim=0
)
text_label_features = text_label_features.squeeze(dim=-1).cpu()
text_maps = cls.get_clip_saliency_convolve(
img=img, text_labels=text_labels, **kwargs
)
if len(distractor_labels) > 0:
distractor_labels = set(distractor_labels) - set(text_labels)
cls.clip_gradcam.set_classes(list(distractor_labels))
distractor_maps = cls.get_clip_saliency_convolve(
img=img, text_labels=list(distractor_labels), **kwargs
)
text_maps -= distractor_maps.mean(dim=0)
text_maps = text_maps.cpu()
return text_maps, text_label_features.squeeze(dim=-1)
@classmethod
def get_clip_saliency_convolve(
cls,
text_labels,
horizontal_flipping=False,
positive_attn_only: bool = False,
tile_batch_size=32,
prompt_batch_size=32,
tile_interpolate_batch_size=32,
**kwargs
):
cls.clip_gradcam.positive_attn_only = positive_attn_only
tiles, tile_imgs, counts, tile_sizes = cls.create_tiles(**kwargs)
outputs = {
k: torch.zeros(
[len(text_labels)] + list(count.shape), device=cls.device
).half()
for k, count in counts.items()
}
tile_gradcams = torch.cat(
[
torch.cat(
[
cls.clip_gradcam(
x=tile_imgs[tile_idx : tile_idx + tile_batch_size],
o=text_labels[prompt_idx : prompt_idx + prompt_batch_size],
)
for tile_idx in np.arange(0, len(tile_imgs), tile_batch_size)
],
dim=1,
)
for prompt_idx in np.arange(0, len(text_labels), prompt_batch_size)
],
dim=0,
)
if horizontal_flipping:
flipped_tile_imgs = tile_imgs[
..., torch.flip(torch.arange(0, tile_imgs.shape[-1]), dims=[0])
]
flipped_tile_gradcams = torch.cat(
[
torch.cat(
[
cls.clip_gradcam(
x=flipped_tile_imgs[
tile_idx : tile_idx + tile_batch_size
],
o=text_labels[
prompt_idx : prompt_idx + prompt_batch_size
],
)
for tile_idx in np.arange(
0, len(tile_imgs), tile_batch_size
)
],
dim=1,
)
for prompt_idx in np.arange(0, len(text_labels), prompt_batch_size)
],
dim=0,
)
with torch.no_grad():
flipped_tile_gradcams = flipped_tile_gradcams[
...,
torch.flip(
torch.arange(0, flipped_tile_gradcams.shape[-1]), dims=[0]
),
]
tile_gradcams = (tile_gradcams + flipped_tile_gradcams) / 2
del flipped_tile_gradcams
with torch.no_grad():
torch.cuda.empty_cache()
for tile_size in np.unique(tile_sizes):
tile_size_mask = tile_sizes == tile_size
curr_size_grads = tile_gradcams[:, tile_size_mask]
curr_size_tiles = tiles[tile_size_mask]
for tile_idx in np.arange(
0, curr_size_grads.shape[1], tile_interpolate_batch_size
):
resized_tiles = torch.nn.functional.interpolate(
curr_size_grads[
:, tile_idx : tile_idx + tile_interpolate_batch_size
],
size=tile_size,
mode="bilinear",
align_corners=False,
)
for tile_idx, tile_slice in enumerate(
curr_size_tiles[
tile_idx : tile_idx + tile_interpolate_batch_size
]
):
outputs[tile_size][tile_slice] += resized_tiles[
:, tile_idx, ...
]
output = sum(
output.float() / count
for output, count in zip(outputs.values(), counts.values())
) / len(counts)
del outputs, counts, tile_gradcams
output = output.cpu()
return output
@classmethod
def create_tiles(cls, img, augmentations, cropping_augmentations, **kwargs):
assert type(img) == np.ndarray
images = []
cls.check_initialized()
# compute image crops
img_pil = Image.fromarray(img)
images.append(np.array(img_pil))
for _ in range(augmentations):
images.append(np.array(cls.jittering_transforms(img_pil)))
# for taking average
counts = {
crop_aug["tile_size"]: torch.zeros(img.shape[:2], device=cls.device).float()
+ 1e-5
for crop_aug in cropping_augmentations
}
tiles = []
tile_imgs = []
tile_sizes = []
for img in images:
for crop_aug in cropping_augmentations:
tile_size = crop_aug["tile_size"]
stride = crop_aug["stride"]
for y in np.arange(0, img.shape[1] - tile_size + 1, stride):
if y >= img.shape[0]:
continue
for x in np.arange(0, img.shape[0] - tile_size + 1, stride):
if x >= img.shape[1]:
continue
tile = (
slice(None, None),
slice(x, x + tile_size),
slice(y, y + tile_size),
)
tiles.append(tile)
counts[tile_size][tile[1:]] += 1
tile_sizes.append(tile_size)
# this is currently biggest bottle neck
tile_imgs.append(
cls.clip_gradcam.preprocess(
Image.fromarray(img[tiles[-1][1:]])
)
)
tile_imgs = torch.stack(tile_imgs).to(cls.device)
return np.array(tiles), tile_imgs, counts, np.array(tile_sizes)
imagenet_templates = [
"a bad photo of a {}.",
"a photo of many {}.",
"a sculpture of a {}.",
"a photo of the hard to see {}.",
"a low resolution photo of the {}.",
"a rendering of a {}.",
"graffiti of a {}.",
"a bad photo of the {}.",
"a cropped photo of the {}.",
"a tattoo of a {}.",
"the embroidered {}.",
"a photo of a hard to see {}.",
"a bright photo of a {}.",
"a photo of a clean {}.",
"a photo of a dirty {}.",
"a dark photo of the {}.",
"a drawing of a {}.",
"a photo of my {}.",
"the plastic {}.",
"a photo of the cool {}.",
"a close-up photo of a {}.",
"a black and white photo of the {}.",
"a painting of the {}.",
"a painting of a {}.",
"a pixelated photo of the {}.",
"a sculpture of the {}.",
"a bright photo of the {}.",
"a cropped photo of a {}.",
"a plastic {}.",
"a photo of the dirty {}.",
"a jpeg corrupted photo of a {}.",
"a blurry photo of the {}.",
"a photo of the {}.",
"a good photo of the {}.",
"a rendering of the {}.",
"a {} in a video game.",
"a photo of one {}.",
"a doodle of a {}.",
"a close-up photo of the {}.",
"a photo of a {}.",
"the origami {}.",
"the {} in a video game.",
"a sketch of a {}.",
"a doodle of the {}.",
"a origami {}.",
"a low resolution photo of a {}.",
"the toy {}.",
"a rendition of the {}.",
"a photo of the clean {}.",
"a photo of a large {}.",
"a rendition of a {}.",
"a photo of a nice {}.",
"a photo of a weird {}.",
"a blurry photo of a {}.",
"a cartoon {}.",
"art of a {}.",
"a sketch of the {}.",
"a embroidered {}.",
"a pixelated photo of a {}.",
"itap of the {}.",
"a jpeg corrupted photo of the {}.",
"a good photo of a {}.",
"a plushie {}.",
"a photo of the nice {}.",
"a photo of the small {}.",
"a photo of the weird {}.",
"the cartoon {}.",
"art of the {}.",
"a drawing of the {}.",
"a photo of the large {}.",
"a black and white photo of a {}.",
"the plushie {}.",
"a dark photo of a {}.",
"itap of a {}.",
"graffiti of the {}.",
"a toy {}.",
"itap of my {}.",
"a photo of a cool {}.",
"a photo of a small {}.",
"a tattoo of the {}.",
]
__all__ = ["ClipWrapper", "imagenet_templates"]
| 12,890 | 33.934959 | 88 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/clip/model_explainability.py | # modified from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP/clip/model.py
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .auxiliary import (
multi_head_attention_forward,
MultiheadAttention,
interpolate_positional_emb,
)
import sys
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample_modules = OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes, planes * self.expansion, 1, stride=1, bias=False
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
self.downsample = nn.Sequential(self.downsample_modules)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, is_visual=True
):
super().__init__()
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp_modules = OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
self.mlp = nn.Sequential(self.mlp_modules)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.attn_probs = None
self.attn_grad = None
self.is_visual = is_visual
def set_attn_probs(self, attn_probs):
self.attn_probs = attn_probs
def set_attn_grad(self, attn_grad):
self.attn_grad = attn_grad
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
if self.is_visual:
return self.attn(
x,
x,
x,
need_weights=False,
attn_mask=self.attn_mask,
attention_probs_forward_hook=self.set_attn_probs,
attention_probs_backwards_hook=self.set_attn_grad,
)[0]
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None,
is_visual=False,
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks_modules = [
ResidualAttentionBlock(width, heads, attn_mask, is_visual=is_visual)
for _ in range(layers)
]
self.resblocks = nn.Sequential(*self.resblocks_modules)
def forward(self, x: torch.Tensor, tile_attn_mask: torch.Tensor = None):
prev_attn_masks = []
if tile_attn_mask is not None:
for resblock in self.resblocks.modules():
prev_attn_masks.append(resblock.attn_mask.clone())
resblock.attn_mask = tile_attn_mask
x = self.resblocks(x)
if tile_attn_mask is not None:
for resblock, prev_attn_mask in zip(
self.resblocks.modules(), prev_attn_masks
):
resblock.attn_mask = prev_attn_mask
return x
class VisionTransformer(nn.Module):
def __init__(
self,
input_resolution: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)
)
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, is_visual=True)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, **kwargs):
x = self.conv1(x) # shape = [*, width, grid, grid]
# shape = [*, width, grid ** 2]
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
if len(x[0]) != 50:
pe = interpolate_positional_emb(self.positional_embedding, len(x[0]))
x += pe.to(x.dtype)
else:
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, **kwargs)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
is_visual=False,
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, **kwargs):
return self.visual(image.type(self.dtype), **kwargs)
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"visual.layer{b}")
)
)
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert (
output_width**2 + 1
== state_dict["visual.attnpool.positional_embedding"].shape[0]
)
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 20,409 | 32.84743 | 112 | py |
semantic-abstraction | semantic-abstraction-main/CLIP/tests/test_consistency.py | import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize("model_name", clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
| 812 | 30.269231 | 73 | py |
UniVL | UniVL-main/main_task_retrieval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
from torch.utils.data import (SequentialSampler)
import numpy as np
import random
import os
from metrics import compute_metrics
import time
import argparse
from modules.tokenization import BertTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import UniVL
from modules.optimization import BertAdam
from torch.utils.data import DataLoader
from util import parallel_apply, get_logger
from dataloaders.dataloader_youcook_retrieval import Youcook_DataLoader
from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_DataLoader
from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_TrainDataLoader
torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='UniVL on Retrieval Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/youcookii_singlef_train.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/youcookii_singlef_val.csv', help='')
parser.add_argument('--data_path', type=str, default='data/youcookii_caption.pickle', help='data pickle file path')
parser.add_argument('--features_path', type=str, default='data/youcookii_videos_feature.pickle', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str, required=True,
help="Bert pre-trained model")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task_type", default="retrieval", type=str, help="Point the task `retrieval` to finetune.")
parser.add_argument("--datatype", default="youcook", type=str, help="Point the dataset `youcook` to finetune.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=2, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=3, help="Layer NO. of decoder.")
parser.add_argument('--train_sim_after_cross', action='store_true', help="Test retrieval after cross encoder.")
parser.add_argument('--expand_msrvtt_sentences', action='store_true', help="")
args = parser.parse_args()
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
def dataloader_youcook_train(args, tokenizer):
youcook_dataset = Youcook_DataLoader(
csv=args.train_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(youcook_dataset)
dataloader = DataLoader(
youcook_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(youcook_dataset), train_sampler
def dataloader_youcook_test(args, tokenizer):
youcook_testset = Youcook_DataLoader(
csv=args.val_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
test_sampler = SequentialSampler(youcook_testset)
dataloader_youcook = DataLoader(
youcook_testset,
sampler=test_sampler,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
pin_memory=False,
)
logger.info('YoucookII validation pairs: {}'.format(len(youcook_testset)))
return dataloader_youcook, len(youcook_testset)
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_TrainDataLoader(
csv_path=args.train_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
unfold_sentences=args.expand_msrvtt_sentences,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(
msrvtt_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(msrvtt_dataset), train_sampler
def dataloader_msrvtt_test(args, tokenizer):
msrvtt_testset = MSRVTT_DataLoader(
csv_path=args.val_csv,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
dataloader_msrvtt = DataLoader(
msrvtt_testset,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
shuffle=False,
drop_last=False,
)
return dataloader_msrvtt, len(msrvtt_testset)
def save_model(epoch, args, model, type_name=""):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin.{}{}".format("" if type_name=="" else type_name+".", epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
# multi-gpu does scattering it-self
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask,
pairs_masked_text=pairs_masked_text, pairs_token_labels=pairs_token_labels,
masked_video=masked_video, video_labels_index=video_labels_index)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):
sim_matrix = []
for idx1, b1 in enumerate(batch_list_t):
input_ids, input_mask, segment_ids, _, _, _, _, _, _ = b1
sequence_output = batch_sequence_output_list[idx1]
each_row = []
for idx2, b2 in enumerate(batch_list_v):
_, _, _, video, video_mask, _, _, _, _ = b2
visual_output = batch_visual_output_list[idx2]
b1b2_logits = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=-1)
sim_matrix.append(each_row)
return sim_matrix
def eval_epoch(args, model, test_dataloader, device, n_gpu):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
batch_list = []
batch_sequence_output_list, batch_visual_output_list = [], []
for bid, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, _, _, _, _ = batch
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
batch_sequence_output_list.append(sequence_output)
batch_visual_output_list.append(visual_output)
batch_list.append(batch)
print("{}/{}\r".format(bid, len(test_dataloader)), end="")
if n_gpu > 1:
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list)
split_len = (bacth_len + n_gpu - 1) // n_gpu
for dev_id in device_ids:
s_, e_ = dev_id * split_len, (dev_id + 1) * split_len
if dev_id == 0:
batch_list_t_splits.append(batch_list[s_:e_])
batch_list_v_splits.append(batch_list)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list]
batch_list_v_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id],
batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
sim_matrix = _run_on_single_gpu(model, batch_list, batch_list, batch_sequence_output_list, batch_visual_output_list)
metrics = compute_metrics(sim_matrix)
logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
logger.info('\t>>> R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}'.
format(metrics['R1'], metrics['R5'], metrics['R10'], metrics['MR']))
R1 = metrics['R1']
return R1
DATALOADER_DICT = {}
DATALOADER_DICT["youcook"] = {"train":dataloader_youcook_train, "val":dataloader_youcook_test}
DATALOADER_DICT["msrvtt"] = {"train":dataloader_msrvtt_train, "val":dataloader_msrvtt_test}
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
assert args.task_type == "retrieval"
model = init_model(args, device, n_gpu, args.local_rank)
assert args.datatype in DATALOADER_DICT
test_dataloader, test_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer)
if args.local_rank == 0:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", args.batch_size_val)
logger.info(" Num steps = %d", len(test_dataloader))
if args.do_train:
train_dataloader, train_length, train_sampler = DATALOADER_DICT[args.datatype]["train"](args, tokenizer)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if args.local_rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.00001
best_output_model_file = None
global_step = 0
for epoch in range(args.epochs):
train_sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.local_rank)
if args.local_rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
output_model_file = save_model(epoch, args, model, type_name="")
R1 = eval_epoch(args, model, test_dataloader, device, n_gpu)
if best_score <= R1:
best_score = R1
best_output_model_file = output_model_file
logger.info("The best model is: {}, the R1 is: {:.4f}".format(best_output_model_file, best_score))
if args.local_rank == 0:
model = load_model(-1, args, n_gpu, device, model_file=best_output_model_file)
eval_epoch(args, model, test_dataloader, device, n_gpu)
elif args.do_eval:
if args.local_rank == 0:
eval_epoch(args, model, test_dataloader, device, n_gpu)
if __name__ == "__main__":
main() | 24,353 | 46.28932 | 144 | py |
UniVL | UniVL-main/main_pretrain.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
from torch.utils.data import (SequentialSampler)
import numpy as np
import random
import os
from collections import OrderedDict
import pickle
import time
import argparse
from modules.tokenization import BertTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import UniVL
from modules.optimization import BertAdam
from dataloaders.dataloader_howto100m import Youtube_DataLoader
from torch.utils.data import DataLoader
from util import get_logger
torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='UniVL on Pretrain'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/HowTo100M_v1.csv', help='train csv')
parser.add_argument('--features_path', type=str, default='feature', help='feature path for 2D features')
parser.add_argument('--data_path', type=str, default='data/data.pickle', help='data pickle file path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--min_words', type=int, default=0, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--min_time', type=float, default=5.0, help='Gather small clips')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str, required=True,
help="Bert pre-trained model")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether use MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=2, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=3, help="Layer NO. of decoder.")
parser.add_argument('--stage_two', action='store_true', help="Whether training with decoder.")
parser.add_argument('--pretrain_enhance_vmodal', action='store_true', help="Enhance visual and other modalities when pretraining.")
parser.add_argument("--load_checkpoint", action="store_true")
parser.add_argument("--checkpoint_model", default="pytorch_model.bin.checkpoint", type=str, required=False,
help="Save the last model as a checkpoint.")
args = parser.parse_args()
if args.sampled_use_mil: # sample from each video, has a higher priority than use_mil.
args.use_mil = True
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_pretrain:
raise ValueError("`do_pretrain` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
args.checkpoint_model = '{}_{}_{}_{}.checkpoint'.format(args.checkpoint_model, args.bert_model, args.max_words, args.max_frames)
return args
def set_seed_logger(args):
global logger
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
def dataloader_pretrain(args, tokenizer, only_sim=False):
if args.local_rank == 0:
logger.info('Loading captions: {}'.format(args.data_path))
data_dict = pickle.load(open(args.data_path, 'rb'))
if args.local_rank == 0:
logger.info('Done, data_dict length: {}'.format(len(data_dict)))
dataset = Youtube_DataLoader(
csv=args.train_csv,
features_path=args.features_path,
data_dict=data_dict,
min_time=args.min_time,
max_words=args.max_words,
min_words=args.min_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
n_pair=args.n_pair,
max_frames=args.max_frames,
use_mil=args.use_mil,
only_sim=only_sim,
sampled_use_mil=args.sampled_use_mil,
pretrain_enhance_vmodal=args.pretrain_enhance_vmodal,
video_dim=args.video_dim,
)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(sampler is None),
sampler=sampler,
drop_last=True,
)
return dataloader, len(dataset), sampler
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_model(epoch, args, model, local_rank, type_name="", global_step=-1, optimizer=None):
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin.{}{}".format("" if type_name=="" else type_name+".", epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
if global_step != -1 and optimizer is not None:
state_dict = {
'epoch': epoch,
'global_step': global_step,
'model_state_dict': model_to_save.state_dict(),
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
}
checkpoint_model_file = os.path.join(args.output_dir, args.checkpoint_model)
torch.save(state_dict, checkpoint_model_file)
logger.info("Checkpoint is saved. use `load_checkpoint` to recovery it.")
return output_model_file
def load_model(epoch, args, n_gpu, device, model, global_step=0, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
last_optim_state = None
checkpoint_model_file = os.path.join(args.output_dir, args.checkpoint_model)
if epoch == -1 and args.load_checkpoint and os.path.exists(checkpoint_model_file):
checkpoint_state = torch.load(checkpoint_model_file, map_location='cpu')
epoch = checkpoint_state['epoch']
global_step = checkpoint_state['global_step']
model_state_dict = checkpoint_state['model_state_dict']
last_optim_state = checkpoint_state['last_optimizer_state']
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
if args.local_rank == 0:
logger.info("Checkpoint loaded from %s", checkpoint_model_file)
elif os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return epoch, global_step, last_optim_state, model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index,\
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask,
pairs_masked_text=pairs_masked_text, pairs_token_labels=pairs_token_labels,
masked_video=masked_video, video_labels_index=video_labels_index,
input_caption_ids=pairs_input_caption_ids, decoder_mask=pairs_decoder_mask,
output_caption_ids=pairs_output_caption_ids)
if n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
model = init_model(args, device, n_gpu, args.local_rank)
only_sim = model.module._stage_one if hasattr(model, 'module') else model._stage_one
train_dataloader, train_length, sampler = dataloader_pretrain(args, tokenizer, only_sim=only_sim)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
global_step = 0
epoch = -1
last_optim_state = None
if args.load_checkpoint:
epoch, global_step, last_optim_state, model = load_model(epoch, args, n_gpu, device, model, global_step=global_step)
epoch += 1
if args.local_rank == 0:
logger.warning("Will continue to epoch: {}".format(epoch))
epoch = 0 if epoch < 0 else epoch
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if last_optim_state is not None:
optimizer.load_state_dict(last_optim_state)
if args.local_rank == 0:
logger.info("***** Running pretraining *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
iter_ls_ = [itm for itm in range(args.epochs) if itm >= epoch]
for epoch in iter_ls_:
sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.local_rank)
if args.local_rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
save_model(epoch, args, model, args.local_rank, type_name="pretrain", global_step=global_step, optimizer=optimizer)
if __name__ == "__main__":
main() | 19,914 | 47.691932 | 140 | py |
UniVL | UniVL-main/main_task_caption.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import torch
from torch.utils.data import (SequentialSampler)
import numpy as np
import random
import os
from collections import OrderedDict
from nlgeval import NLGEval
import time
import argparse
from modules.tokenization import BertTokenizer
from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modules.modeling import UniVL
from modules.optimization import BertAdam
from modules.beam import Beam
from torch.utils.data import DataLoader
from dataloaders.dataloader_youcook_caption import Youcook_Caption_DataLoader
from dataloaders.dataloader_msrvtt_caption import MSRVTT_Caption_DataLoader
from util import get_logger
torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='UniVL on Caption Task'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument('--train_csv', type=str, default='data/youcookii_singlef_train.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/youcookii_singlef_val.csv', help='')
parser.add_argument('--data_path', type=str, default='data/youcookii_caption_transcript.pickle',
help='caption and transcription pickle file path')
parser.add_argument('--features_path', type=str, default='data/youcookii_videos_feature.pickle',
help='feature path for 2D features')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--min_time', type=float, default=5.0, help='Gather small clips')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str, required=True, help="Bert pre-trained model")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--task_type", default="caption", type=str, help="Point the task `caption` to finetune.")
parser.add_argument("--datatype", default="youcook", type=str, help="Point the dataset `youcook` to finetune.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).")
parser.add_argument('--sampled_use_mil', action='store_true', help="Whether use MIL, has a high priority than use_mil.")
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--cross_num_hidden_layers', type=int, default=2, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=3, help="Layer NO. of decoder.")
parser.add_argument('--stage_two', action='store_true', help="Whether training with decoder.")
args = parser.parse_args()
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * coef_lr},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
return optimizer, scheduler, model
def dataloader_youcook_train(args, tokenizer):
youcook_dataset = Youcook_Caption_DataLoader(
csv=args.train_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(youcook_dataset)
dataloader = DataLoader(
youcook_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(youcook_dataset), train_sampler
def dataloader_youcook_test(args, tokenizer):
youcook_testset = Youcook_Caption_DataLoader(
csv=args.val_csv,
data_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
)
test_sampler = SequentialSampler(youcook_testset)
dataloader_youcook = DataLoader(
youcook_testset,
sampler=test_sampler,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
pin_memory=False,
)
if args.local_rank == 0:
logger.info('YoucookII validation pairs: {}'.format(len(youcook_testset)))
return dataloader_youcook, len(youcook_testset)
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_Caption_DataLoader(
csv_path=args.train_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
split_type="train",
)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(
msrvtt_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last=True,
)
return dataloader, len(msrvtt_dataset), train_sampler
def dataloader_msrvtt_test(args, tokenizer, split_type="test",):
msrvtt_testset = MSRVTT_Caption_DataLoader(
csv_path=args.val_csv,
json_path=args.data_path,
features_path=args.features_path,
max_words=args.max_words,
feature_framerate=args.feature_framerate,
tokenizer=tokenizer,
max_frames=args.max_frames,
split_type=split_type,
)
test_sampler = SequentialSampler(msrvtt_testset)
dataloader_msrvtt = DataLoader(
msrvtt_testset,
sampler=test_sampler,
batch_size=args.batch_size_val,
num_workers=args.num_thread_reader,
pin_memory=False,
drop_last=False,
)
return dataloader_msrvtt, len(msrvtt_testset)
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_model(epoch, args, model, type_name=""):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin.{}{}".format("" if type_name=="" else type_name+".", epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = UniVL.from_pretrained(args.bert_model, args.visual_model, args.cross_model, args.decoder_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, tokenizer, device, n_gpu, optimizer, scheduler,
global_step, nlgEvalObj=None, local_rank=0):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for step, batch in enumerate(train_dataloader):
# if n_gpu == 1:
# # multi-gpu does scattering it-self
# batch = tuple(t.to(device) for t in batch)
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index,\
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids = batch
loss = model(input_ids, segment_ids, input_mask, video, video_mask,
pairs_masked_text=pairs_masked_text, pairs_token_labels=pairs_token_labels,
masked_video=masked_video, video_labels_index=video_labels_index,
input_caption_ids=pairs_input_caption_ids, decoder_mask=pairs_decoder_mask,
output_caption_ids=pairs_output_caption_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
total_loss += float(loss)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),
float(loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
return total_loss, global_step
# ---------------------------------------->
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(input_tuples, inst_idx_to_position_map, active_inst_idx_list, n_bm, device):
assert isinstance(input_tuples, tuple)
sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt = input_tuples
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(device)
active_sequence_output_rpt = collect_active_part(sequence_output_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_visual_output_rpt = collect_active_part(visual_output_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_input_ids_rpt = collect_active_part(input_ids_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_input_mask_rpt = collect_active_part(input_mask_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_video_mask_rpt = collect_active_part(video_mask_rpt, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
return (active_sequence_output_rpt, active_visual_output_rpt, active_input_ids_rpt, active_input_mask_rpt, active_video_mask_rpt), \
active_inst_idx_to_position_map
def beam_decode_step(decoder, inst_dec_beams, len_dec_seq,
inst_idx_to_position_map, n_bm, device, input_tuples, decoder_length=None):
assert isinstance(input_tuples, tuple)
''' Decode and update beam status, and then return active beam idx'''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def predict_word(next_decoder_ids, n_active_inst, n_bm, device, input_tuples):
sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt = input_tuples
next_decoder_mask = torch.ones(next_decoder_ids.size(), dtype=torch.uint8).to(device)
dec_output = decoder(sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt,
video_mask_rpt, next_decoder_ids, next_decoder_mask, shaped=True, get_logits=True)
dec_output = dec_output[:, -1, :]
word_prob = torch.nn.functional.log_softmax(dec_output, dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map, decoder_length=None):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
if decoder_length is None:
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
else:
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position], word_length=decoder_length[inst_idx])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
word_prob = predict_word(dec_seq, n_active_inst, n_bm, device, input_tuples)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, inst_idx_to_position_map,
decoder_length=decoder_length)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
# >----------------------------------------
def eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=None, test_set=None):
if hasattr(model, 'module'):
model = model.module.to(device)
if model._stage_one:
return 0.
all_result_lists = []
all_caption_lists = []
model.eval()
for batch in test_dataloader:
batch = tuple(t.to(device, non_blocking=True) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids = batch
with torch.no_grad():
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
# -- Repeat data for beam search
n_bm = 5 # beam_size
device = sequence_output.device
n_inst, len_s, d_h = sequence_output.size()
_, len_v, v_h = visual_output.size()
decoder = model.decoder_caption
# Note: shaped first, then decoder need the parameter shaped=True
input_ids = input_ids.view(-1, input_ids.shape[-1])
input_mask = input_mask.view(-1, input_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
sequence_output_rpt = sequence_output.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
visual_output_rpt = visual_output.repeat(1, n_bm, 1).view(n_inst * n_bm, len_v, v_h)
input_ids_rpt = input_ids.repeat(1, n_bm).view(n_inst * n_bm, len_s)
input_mask_rpt = input_mask.repeat(1, n_bm).view(n_inst * n_bm, len_s)
video_mask_rpt = video_mask.repeat(1, n_bm).view(n_inst * n_bm, len_v)
# -- Prepare beams
inst_dec_beams = [Beam(n_bm, device=device, tokenizer=tokenizer) for _ in range(n_inst)]
# -- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
# -- Decode
for len_dec_seq in range(1, args.max_words + 1):
active_inst_idx_list = beam_decode_step(decoder, inst_dec_beams,
len_dec_seq, inst_idx_to_position_map, n_bm, device,
(sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt))
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
(sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt), \
inst_idx_to_position_map = collate_active_info((sequence_output_rpt, visual_output_rpt, input_ids_rpt, input_mask_rpt, video_mask_rpt),
inst_idx_to_position_map, active_inst_idx_list, n_bm, device)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, 1)
result_list = [batch_hyp[i][0] for i in range(n_inst)]
pairs_output_caption_ids = pairs_output_caption_ids.view(-1, pairs_output_caption_ids.shape[-1])
caption_list = pairs_output_caption_ids.cpu().detach().numpy()
for re_idx, re_list in enumerate(result_list):
decode_text_list = tokenizer.convert_ids_to_tokens(re_list)
if "[SEP]" in decode_text_list:
SEP_index = decode_text_list.index("[SEP]")
decode_text_list = decode_text_list[:SEP_index]
if "[PAD]" in decode_text_list:
PAD_index = decode_text_list.index("[PAD]")
decode_text_list = decode_text_list[:PAD_index]
decode_text = ' '.join(decode_text_list)
decode_text = decode_text.replace(" ##", "").strip("##").strip()
all_result_lists.append(decode_text)
for re_idx, re_list in enumerate(caption_list):
decode_text_list = tokenizer.convert_ids_to_tokens(re_list)
if "[SEP]" in decode_text_list:
SEP_index = decode_text_list.index("[SEP]")
decode_text_list = decode_text_list[:SEP_index]
if "[PAD]" in decode_text_list:
PAD_index = decode_text_list.index("[PAD]")
decode_text_list = decode_text_list[:PAD_index]
decode_text = ' '.join(decode_text_list)
decode_text = decode_text.replace(" ##", "").strip("##").strip()
all_caption_lists.append(decode_text)
# Save full results
if test_set is not None and hasattr(test_set, 'iter2video_pairs_dict'):
hyp_path = os.path.join(args.output_dir, "hyp_complete_results.txt")
with open(hyp_path, "w", encoding='utf-8') as writer:
writer.write("{}\t{}\t{}\n".format("video_id", "start_time", "caption"))
for idx, pre_txt in enumerate(all_result_lists):
video_id, sub_id = test_set.iter2video_pairs_dict[idx]
start_time = test_set.data_dict[video_id]['start'][sub_id]
writer.write("{}\t{}\t{}\n".format(video_id, start_time, pre_txt))
logger.info("File of complete results is saved in {}".format(hyp_path))
# Save pure results
hyp_path = os.path.join(args.output_dir, "hyp.txt")
with open(hyp_path, "w", encoding='utf-8') as writer:
for pre_txt in all_result_lists:
writer.write(pre_txt+"\n")
ref_path = os.path.join(args.output_dir, "ref.txt")
with open(ref_path, "w", encoding='utf-8') as writer:
for ground_txt in all_caption_lists:
writer.write(ground_txt + "\n")
if args.datatype == "msrvtt":
all_caption_lists = []
sentences_dict = test_dataloader.dataset.sentences_dict
video_sentences_dict = test_dataloader.dataset.video_sentences_dict
for idx in range(len(sentences_dict)):
video_id, _ = sentences_dict[idx]
sentences = video_sentences_dict[video_id]
all_caption_lists.append(sentences)
all_caption_lists = [list(itms) for itms in zip(*all_caption_lists)]
else:
all_caption_lists = [all_caption_lists]
# Evaluate
metrics_nlg = nlgEvalObj.compute_metrics(ref_list=all_caption_lists, hyp_list=all_result_lists)
logger.info(">>> BLEU_1: {:.4f}, BLEU_2: {:.4f}, BLEU_3: {:.4f}, BLEU_4: {:.4f}".
format(metrics_nlg["Bleu_1"], metrics_nlg["Bleu_2"], metrics_nlg["Bleu_3"], metrics_nlg["Bleu_4"]))
logger.info(">>> METEOR: {:.4f}, ROUGE_L: {:.4f}, CIDEr: {:.4f}".format(metrics_nlg["METEOR"], metrics_nlg["ROUGE_L"], metrics_nlg["CIDEr"]))
Bleu_4 = metrics_nlg["Bleu_4"]
return Bleu_4
DATALOADER_DICT = {}
DATALOADER_DICT["youcook"] = {"train":dataloader_youcook_train, "val":dataloader_youcook_test}
DATALOADER_DICT["msrvtt"] = {"train":dataloader_msrvtt_train, "val":dataloader_msrvtt_test}
def main():
global logger
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
model = init_model(args, device, n_gpu, args.local_rank)
assert args.task_type == "caption"
nlgEvalObj = NLGEval(no_overlap=False, no_skipthoughts=True, no_glove=True, metrics_to_omit=None)
assert args.datatype in DATALOADER_DICT
test_dataloader, test_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer)
if args.local_rank == 0:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", test_length)
logger.info(" Batch size = %d", args.batch_size_val)
logger.info(" Num steps = %d", len(test_dataloader))
if args.do_train:
train_dataloader, train_length, train_sampler = DATALOADER_DICT[args.datatype]["train"](args, tokenizer)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if args.local_rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.00001
best_output_model_file = None
global_step = 0
for epoch in range(args.epochs):
train_sampler.set_epoch(epoch)
tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, tokenizer, device, n_gpu, optimizer,
scheduler, global_step, nlgEvalObj=nlgEvalObj, local_rank=args.local_rank)
if args.local_rank == 0:
logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss)
output_model_file = save_model(epoch, args, model, type_name="")
if epoch > 0:
Bleu_4 = eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=nlgEvalObj)
if best_score <= Bleu_4:
best_score = Bleu_4
best_output_model_file = output_model_file
logger.info("The best model is: {}, the Bleu_4 is: {:.4f}".format(best_output_model_file, best_score))
else:
logger.warning("Skip the evaluation after {}-th epoch.".format(epoch+1))
if args.local_rank == 0:
model = load_model(-1, args, n_gpu, device, model_file=best_output_model_file)
eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=nlgEvalObj)
elif args.do_eval:
if args.local_rank == 0:
eval_epoch(args, model, test_dataloader, tokenizer, device, n_gpu, nlgEvalObj=nlgEvalObj)
if __name__ == "__main__":
main() | 33,617 | 47.792453 | 151 | py |
UniVL | UniVL-main/util.py | import torch
import torch.nn as nn
import threading
from torch._utils import ExceptionWrapper
import logging
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert len(modules) == len(inputs)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker, args=(i, module, input))
for i, (module, input) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger | 2,495 | 33.191781 | 99 | py |
UniVL | UniVL-main/modules/module_visual.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'visual_config.json'
WEIGHTS_NAME = 'visual_pytorch_model.bin'
class VisualConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `VisualModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file=4096,
hidden_size=768,
num_hidden_layers=3,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02):
"""Constructs VisualConfig.
Args:
vocab_size_or_config_json_file: Size of the encoder layers and the pooler layer.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class VisualEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(VisualEmbeddings, self).__init__()
self.word_embeddings = nn.Linear(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_embeddings):
seq_length = input_embeddings.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(input_embeddings.size(0), -1)
words_embeddings = self.word_embeddings(input_embeddings)
# words_embeddings = self.transform_act_fn(words_embeddings)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class VisualSelfAttention(nn.Module):
def __init__(self, config):
super(VisualSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in VisualModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class VisualSelfOutput(nn.Module):
def __init__(self, config):
super(VisualSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualAttention(nn.Module):
def __init__(self, config):
super(VisualAttention, self).__init__()
self.self = VisualSelfAttention(config)
self.output = VisualSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class VisualIntermediate(nn.Module):
def __init__(self, config):
super(VisualIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class VisualOutput(nn.Module):
def __init__(self, config):
super(VisualOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class VisualLayer(nn.Module):
def __init__(self, config):
super(VisualLayer, self).__init__()
self.attention = VisualAttention(config)
self.intermediate = VisualIntermediate(config)
self.output = VisualOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class VisualEncoder(nn.Module):
def __init__(self, config):
super(VisualEncoder, self).__init__()
layer = VisualLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class VisualPooler(nn.Module):
def __init__(self, config):
super(VisualPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class VisualPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(VisualPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class VisualLMPredictionHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualLMPredictionHead, self).__init__()
self.transform = VisualPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.weight = visual_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(visual_model_embedding_weights.size(1)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(self.weight) + self.bias
return hidden_states
class VisualOnlyMLMHead(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualOnlyMLMHead, self).__init__()
self.predictions = VisualLMPredictionHead(config, visual_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class VisualOnlyNSPHead(nn.Module):
def __init__(self, config):
super(VisualOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class VisualPreTrainingHeads(nn.Module):
def __init__(self, config, visual_model_embedding_weights):
super(VisualPreTrainingHeads, self).__init__()
self.predictions = VisualLMPredictionHead(config, visual_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VisualModel(PreTrainedModel):
"""Visual model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a VisualConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for Visual-base, 24 for Visual-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see 's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
config = modeling.VisualConfig(vocab_size_or_config_json_file=4096, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.VisualModel(config=config)
all_encoder_layers, pooled_output = model(video, video_mask)
```
"""
def __init__(self, config):
super(VisualModel, self).__init__(config)
self.embeddings = VisualEmbeddings(config)
self.encoder = VisualEncoder(config)
self.pooler = VisualPooler(config)
self.apply(self.init_weights)
def forward(self, video, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(video.size(0), video.size(1))
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(video)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output | 19,708 | 45.374118 | 139 | py |
UniVL | UniVL-main/modules/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
# next_m.mul_(beta1).add_(1 - beta1, grad) --> pytorch 1.7
next_m.mul_(beta1).add_(grad, alpha=1 - beta1)
# next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) --> pytorch 1.7
next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | 7,260 | 42.220238 | 141 | py |
UniVL | UniVL-main/modules/module_decoder.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
import torch
from torch import nn
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'decoder_config.json'
WEIGHTS_NAME = 'decoder_pytorch_model.bin'
class DecoderConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `DecoderModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_vocab_size=2,
initializer_range=0.02,
max_target_embeddings=128,
num_decoder_layers=1):
"""Constructs DecoderConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `DecoderModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`DecoderModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
max_target_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
num_decoder_layers:
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.max_target_embeddings = max_target_embeddings
self.num_decoder_layers = num_decoder_layers
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(decoder_model_embedding_weights.size(1),
decoder_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = decoder_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(decoder_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, config):
super(MultiHeadAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, q, k, v, attention_mask):
mixed_query_layer = self.query(q)
mixed_key_layer = self.key(k)
mixed_value_layer = self.value(v)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(ACT2FN["gelu"](self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class DecoderAttention(nn.Module):
def __init__(self, config):
super(DecoderAttention, self).__init__()
self.att = MultiHeadAttention(config)
self.output = BertSelfOutput(config)
def forward(self, q, k, v, attention_mask):
att_output, attention_probs = self.att(q, k, v, attention_mask)
attention_output = self.output(att_output, q)
return attention_output, attention_probs
class DecoderLayer(nn.Module):
def __init__(self, config):
super(DecoderLayer, self).__init__()
self.slf_attn = DecoderAttention(config)
self.enc_attn = DecoderAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, dec_input, enc_output, slf_attn_mask=None, dec_enc_attn_mask=None):
slf_output, _ = self.slf_attn(dec_input, dec_input, dec_input, slf_attn_mask)
dec_output, dec_att_scores = self.enc_attn(slf_output, enc_output, enc_output, dec_enc_attn_mask)
intermediate_output = self.intermediate(dec_output)
dec_output = self.output(intermediate_output, dec_output)
return dec_output, dec_att_scores
class DecoderEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, decoder_word_embeddings_weight, decoder_position_embeddings_weight):
super(DecoderEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_target_embeddings, config.hidden_size)
self.word_embeddings.weight = decoder_word_embeddings_weight
self.position_embeddings.weight = decoder_position_embeddings_weight
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class Decoder(nn.Module):
def __init__(self, config):
super(Decoder, self).__init__()
layer = DecoderLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_decoder_layers)])
def forward(self, hidden_states, encoder_outs, self_attn_mask, attention_mask, output_all_encoded_layers=False):
dec_att_scores = None
all_encoder_layers = []
all_dec_att_probs = []
for layer_module in self.layer:
hidden_states, dec_att_scores = layer_module(hidden_states, encoder_outs, self_attn_mask, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
all_dec_att_probs.append(dec_att_scores)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
all_dec_att_probs.append(dec_att_scores)
return all_encoder_layers, all_dec_att_probs
class DecoderClassifier(nn.Module):
def __init__(self, config, embedding_weights):
super(DecoderClassifier, self).__init__()
self.cls = BertOnlyMLMHead(config, embedding_weights)
def forward(self, hidden_states):
cls_scores = self.cls(hidden_states)
return cls_scores
class DecoderModel(PreTrainedModel):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
final_norm (bool, optional): apply layer norm to the output of the
final decoder layer (default: True).
"""
def __init__(self, config, decoder_word_embeddings_weight, decoder_position_embeddings_weight):
super(DecoderModel, self).__init__(config)
self.config = config
self.max_target_length = config.max_target_embeddings
self.embeddings = DecoderEmbeddings(config, decoder_word_embeddings_weight, decoder_position_embeddings_weight)
self.decoder = Decoder(config)
self.classifier = DecoderClassifier(config, decoder_word_embeddings_weight)
self.apply(self.init_weights)
def forward(self, input_ids, encoder_outs=None, answer_mask=None, encoder_mask=None):
"""
Args:
input_ids (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing
encoder_outs (Tensor, optional): output from the encoder, used for encoder-side attention
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len, vocab)`
- the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)`
"""
embedding_output = self.embeddings(input_ids)
extended_encoder_mask = encoder_mask.unsqueeze(1).unsqueeze(2) # b x 1 x 1 x ls
extended_encoder_mask = extended_encoder_mask.to(dtype=self.dtype) # fp16 compatibility
extended_encoder_mask = (1.0 - extended_encoder_mask) * -10000.0
extended_answer_mask = answer_mask.unsqueeze(1).unsqueeze(2)
extended_answer_mask = extended_answer_mask.to(dtype=self.dtype) # fp16 compatibility
sz_b, len_s, _ = embedding_output.size()
subsequent_mask = torch.triu(torch.ones((len_s, len_s), device=embedding_output.device, dtype=embedding_output.dtype), diagonal=1)
self_attn_mask = subsequent_mask.unsqueeze(0).expand(sz_b, -1, -1).unsqueeze(1) # b x 1 x ls x ls
slf_attn_mask = ((1.0 - extended_answer_mask) + self_attn_mask).gt(0).to(dtype=self.dtype)
self_attn_mask = slf_attn_mask * -10000.0
decoded_layers, dec_att_scores = self.decoder(embedding_output,
encoder_outs,
self_attn_mask,
extended_encoder_mask,
)
sequence_output = decoded_layers[-1]
cls_scores = self.classifier(sequence_output)
return cls_scores
| 18,283 | 43.923833 | 138 | py |
UniVL | UniVL-main/modules/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from modules.until_module import PreTrainedModel, LayerNorm, CrossEn, MILNCELoss, MaxMarginRankingLoss
from modules.module_bert import BertModel, BertConfig, BertOnlyMLMHead
from modules.module_visual import VisualModel, VisualConfig, VisualOnlyMLMHead
from modules.module_cross import CrossModel, CrossConfig
from modules.module_decoder import DecoderModel, DecoderConfig
logger = logging.getLogger(__name__)
class UniVLPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, bert_config, visual_config, cross_config, decoder_config, *inputs, **kwargs):
# utilize bert config as base config
super(UniVLPreTrainedModel, self).__init__(bert_config)
self.bert_config = bert_config
self.visual_config = visual_config
self.cross_config = cross_config
self.decoder_config = decoder_config
self.bert = None
self.visual = None
self.cross = None
self.decoder = None
@classmethod
def from_pretrained(cls, pretrained_bert_name, visual_model_name, cross_model_name, decoder_model_name,
state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
bert_config, state_dict = BertConfig.get_config(pretrained_bert_name, cache_dir, type_vocab_size, state_dict, task_config=task_config)
visual_config, _ = VisualConfig.get_config(visual_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
decoder_config, _ = DecoderConfig.get_config(decoder_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(bert_config, visual_config, cross_config, decoder_config, *inputs, **kwargs)
assert model.bert is not None
assert model.visual is not None
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
class NormalizeVideo(nn.Module):
def __init__(self, task_config):
super(NormalizeVideo, self).__init__()
self.visual_norm2d = LayerNorm(task_config.video_dim)
def forward(self, video):
video = torch.as_tensor(video).float()
video = video.view(-1, video.shape[-2], video.shape[-1])
video = self.visual_norm2d(video)
return video
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class UniVL(UniVLPreTrainedModel):
def __init__(self, bert_config, visual_config, cross_config, decoder_config, task_config):
super(UniVL, self).__init__(bert_config, visual_config, cross_config, decoder_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words <= bert_config.max_position_embeddings
assert self.task_config.max_words <= decoder_config.max_target_embeddings
assert self.task_config.max_frames <= visual_config.max_position_embeddings
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
if check_attr('stage_two', self.task_config):
self._stage_one = False
self._stage_two = self.task_config.stage_two
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.train_sim_after_cross = False
if self._stage_one and check_attr('train_sim_after_cross', self.task_config):
self.train_sim_after_cross = True
show_log(task_config, "Test retrieval after cross encoder.")
# Text Encoder ===>
bert_config = update_attr("bert_config", bert_config, "num_hidden_layers",
self.task_config, "text_num_hidden_layers")
self.bert = BertModel(bert_config)
bert_word_embeddings_weight = self.bert.embeddings.word_embeddings.weight
bert_position_embeddings_weight = self.bert.embeddings.position_embeddings.weight
# <=== End of Text Encoder
# Video Encoder ===>
visual_config = update_attr("visual_config", visual_config, "num_hidden_layers",
self.task_config, "visual_num_hidden_layers")
self.visual = VisualModel(visual_config)
visual_word_embeddings_weight = self.visual.embeddings.word_embeddings.weight
# <=== End of Video Encoder
if self._stage_one is False or self.train_sim_after_cross:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers",
self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
if self.train_sim_after_cross is False:
# Decoder ===>
decoder_config = update_attr("decoder_config", decoder_config, "num_decoder_layers",
self.task_config, "decoder_num_hidden_layers")
self.decoder = DecoderModel(decoder_config, bert_word_embeddings_weight, bert_position_embeddings_weight)
# <=== End of Decoder
if self.task_config.do_pretrain:
self.cls = BertOnlyMLMHead(bert_config, bert_word_embeddings_weight)
self.cls_visual = VisualOnlyMLMHead(visual_config, visual_word_embeddings_weight)
self.alm_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.similarity_dense = nn.Linear(bert_config.hidden_size, 1)
self.decoder_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.normalize_video = NormalizeVideo(task_config)
mILNCELoss = MILNCELoss(batch_size=task_config.batch_size // task_config.n_gpu, n_pair=task_config.n_pair, )
maxMarginRankingLoss = MaxMarginRankingLoss(margin=task_config.margin,
negative_weighting=task_config.negative_weighting,
batch_size=task_config.batch_size // task_config.n_gpu,
n_pair=task_config.n_pair,
hard_negative_rate=task_config.hard_negative_rate, )
if task_config.use_mil:
self.loss_fct = CrossEn() if self._stage_two else mILNCELoss
self._pretrain_sim_loss_fct = mILNCELoss
else:
self.loss_fct = CrossEn() if self._stage_two else maxMarginRankingLoss
self._pretrain_sim_loss_fct = maxMarginRankingLoss
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None,
pairs_masked_text=None, pairs_token_labels=None, masked_video=None, video_labels_index=None,
input_caption_ids=None, decoder_mask=None, output_caption_ids=None):
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = self.normalize_video(video)
if input_caption_ids is not None:
input_caption_ids = input_caption_ids.view(-1, input_caption_ids.shape[-1])
decoder_mask = decoder_mask.view(-1, decoder_mask.shape[-1])
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True)
if self.training:
loss = 0.
if self._stage_one:
sim_matrix = self.get_similarity_logits(sequence_output, visual_output, attention_mask,
video_mask, shaped=True)
sim_loss = self.loss_fct(sim_matrix)
loss += sim_loss
if self._stage_two:
if self.task_config.do_pretrain:
pairs_masked_text = pairs_masked_text.view(-1, pairs_masked_text.shape[-1])
pairs_token_labels = pairs_token_labels.view(-1, pairs_token_labels.shape[-1])
masked_video = self.normalize_video(masked_video)
video_labels_index = video_labels_index.view(-1, video_labels_index.shape[-1])
sequence_output_alm, visual_output_alm = self.get_sequence_visual_output(pairs_masked_text, token_type_ids,
attention_mask, masked_video, video_mask, shaped=True)
cross_output, pooled_output, concat_mask = self._get_cross_output(sequence_output_alm, visual_output_alm, attention_mask, video_mask)
sequence_cross_output, visual_cross_output = torch.split(cross_output, [attention_mask.size(-1), video_mask.size(-1)], dim=1)
alm_loss = self._calculate_mlm_loss(sequence_cross_output, pairs_token_labels)
loss += alm_loss
nce_loss = self._calculate_mfm_loss(visual_cross_output, video, video_mask, video_labels_index)
loss += nce_loss
sim_matrix = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, _pretrain_joint=True)
sim_loss_joint = self._pretrain_sim_loss_fct(sim_matrix)
loss += sim_loss_joint
if (input_caption_ids is not None) and \
(self.task_config.do_pretrain
or (self.task_config.do_pretrain is False and self.task_config.task_type == "caption")):
if self.task_config.do_pretrain:
decoder_scores, res_tuples = self._get_decoder_score(sequence_output_alm, visual_output_alm,
input_ids, attention_mask, video_mask,
input_caption_ids, decoder_mask, shaped=True)
elif self.task_config.task_type == "caption":
decoder_scores, res_tuples = self._get_decoder_score(sequence_output, visual_output,
input_ids, attention_mask, video_mask,
input_caption_ids, decoder_mask, shaped=True)
else:
raise NotImplementedError
output_caption_ids = output_caption_ids.view(-1, output_caption_ids.shape[-1])
decoder_loss = self.decoder_loss_fct(decoder_scores.view(-1, self.bert_config.vocab_size), output_caption_ids.view(-1))
loss += decoder_loss
if self.task_config.do_pretrain or self.task_config.task_type == "retrieval":
if self.task_config.do_pretrain:
sim_matrix_text_visual = self.get_similarity_logits(sequence_output_alm, visual_output_alm,
attention_mask, video_mask, shaped=True)
elif self.task_config.task_type == "retrieval":
sim_matrix_text_visual = self.get_similarity_logits(sequence_output, visual_output,
attention_mask, video_mask, shaped=True)
else:
raise NotImplementedError
sim_loss_text_visual = self.loss_fct(sim_matrix_text_visual)
loss += sim_loss_text_visual
return loss
else:
return None
def _calculate_mlm_loss(self, sequence_output_alm, pairs_token_labels):
alm_scores = self.cls(sequence_output_alm)
alm_loss = self.alm_loss_fct(alm_scores.view(-1, self.bert_config.vocab_size), pairs_token_labels.view(-1))
return alm_loss
def _calculate_mfm_loss(self, visual_output_alm, video, video_mask, video_labels_index):
afm_scores = self.cls_visual(visual_output_alm)
afm_scores_tr = afm_scores.view(-1, afm_scores.shape[-1])
video_tr = video.permute(2, 0, 1)
video_tr = video_tr.view(video_tr.shape[0], -1)
logits_matrix = torch.mm(afm_scores_tr, video_tr)
video_mask_float = video_mask.to(dtype=torch.float)
mask_matrix = torch.mm(video_mask_float.view(-1, 1), video_mask_float.view(1, -1))
masked_logits = logits_matrix + (1. - mask_matrix) * -1e8
logpt = F.log_softmax(masked_logits, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
video_labels_index_mask = (video_labels_index != self.ignore_video_index)
nce_loss = nce_loss.masked_select(video_labels_index_mask.view(-1))
nce_loss = nce_loss.mean()
return nce_loss
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = self.normalize_video(video)
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=True)
sequence_output = encoded_layers[-1]
visual_layers, _ = self.visual(video, video_mask, output_all_encoded_layers=True)
visual_output = visual_layers[-1]
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return text_out, video_out
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = 5
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, _pretrain_joint=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
if (self._stage_two and _pretrain_joint is False) or self.train_sim_after_cross:
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask)
else:
text_out, video_out = self._mean_pooling_for_similarity(sequence_output, visual_output, attention_mask, video_mask)
if self.task_config.use_mil is False:
text_out = F.normalize(text_out, dim=-1)
video_out = F.normalize(video_out, dim=-1)
retrieve_logits = torch.matmul(text_out, video_out.t())
return retrieve_logits
def _get_decoder_score(self, sequence_output, visual_output, input_ids, attention_mask, video_mask, input_caption_ids, decoder_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
input_caption_ids = input_caption_ids.view(-1, input_caption_ids.shape[-1])
decoder_mask = decoder_mask.view(-1, decoder_mask.shape[-1])
res_tuples = ()
cross_output, pooled_output, concat_mask = self._get_cross_output(sequence_output, visual_output, attention_mask, video_mask)
decoder_scores = self.decoder(input_caption_ids, encoder_outs=cross_output, answer_mask=decoder_mask, encoder_mask=concat_mask)
return decoder_scores, res_tuples
def decoder_caption(self, sequence_output, visual_output, input_ids, attention_mask, video_mask, input_caption_ids, decoder_mask,
shaped=False, get_logits=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
input_caption_ids = input_caption_ids.view(-1, input_caption_ids.shape[-1])
decoder_mask = decoder_mask.view(-1, decoder_mask.shape[-1])
decoder_scores, _ = self._get_decoder_score(sequence_output, visual_output,
input_ids, attention_mask, video_mask,
input_caption_ids, decoder_mask, shaped=True)
if get_logits:
return decoder_scores
_, decoder_scores_result = torch.max(decoder_scores, -1)
return decoder_scores_result | 22,558 | 51.707944 | 153 | py |
UniVL | UniVL-main/modules/until_module.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import math
from modules.until_config import PretrainedConfig
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def resize_token_embeddings(self, new_num_tokens=None):
raise NotImplementedError
@classmethod
def init_preweight(cls, model, state_dict, prefix=None, task_config=None):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
if prefix is not None:
old_keys = []
new_keys = []
for key in state_dict.keys():
old_keys.append(key)
new_keys.append(prefix + key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
if prefix is None and (task_config is None or task_config.local_rank == 0):
logger.info("-" * 20)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(missing_keys)))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys)))
if len(error_msgs) > 0:
logger.error("Weights from pretrained model cause errors in {}: {}"
.format(model.__class__.__name__, "\n " + "\n ".join(error_msgs)))
return model
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
@classmethod
def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
"""
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
return model
model = cls.init_preweight(model, state_dict)
return model
##################################
###### LOSS FUNCTION #############
##################################
class CrossEn(nn.Module):
def __init__(self,):
super(CrossEn, self).__init__()
def forward(self, sim_matrix):
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class MILNCELoss(nn.Module):
def __init__(self, batch_size=1, n_pair=1,):
super(MILNCELoss, self).__init__()
self.batch_size = batch_size
self.n_pair = n_pair
torch_v = float(".".join(torch.__version__.split(".")[:2]))
self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8
def forward(self, sim_matrix):
mm_mask = np.eye(self.batch_size)
mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair)))
mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device)
from_text_matrix = sim_matrix + mm_mask * -1e12
from_video_matrix = sim_matrix.transpose(1, 0)
new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1)
logpt = F.log_softmax(new_sim_matrix, dim=-1)
mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1)
masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12
new_logpt = -torch.logsumexp(masked_logpt, dim=-1)
logpt_choice = torch.zeros_like(new_logpt)
mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2)
logpt_choice[mark_ind] = 1
sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean()
return sim_loss
class MaxMarginRankingLoss(nn.Module):
def __init__(self,
margin=1.0,
negative_weighting=False,
batch_size=1,
n_pair=1,
hard_negative_rate=0.5,
):
super(MaxMarginRankingLoss, self).__init__()
self.margin = margin
self.n_pair = n_pair
self.batch_size = batch_size
easy_negative_rate = 1 - hard_negative_rate
self.easy_negative_rate = easy_negative_rate
self.negative_weighting = negative_weighting
if n_pair > 1 and batch_size > 1:
alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))
mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha
mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))
mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))
self.mm_mask = mm_mask.float()
def forward(self, x):
d = torch.diag(x)
max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \
F.relu(self.margin + x - d.view(1, -1))
if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1:
max_margin = max_margin * self.mm_mask.to(max_margin.device)
return max_margin.mean()
| 10,299 | 39.873016 | 114 | py |
UniVL | UniVL-main/modules/beam.py | """
Manage beam search info structure.
Heavily borrowed from OpenNMT-py.
For code in OpenNMT-py, please check the following link (maybe in oldest version):
https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Beam.py
"""
import torch
class Constants():
def __init__(self):
self.PAD = 0
self.UNK = 1
self.BOS = 2
self.EOS = 3
self.PAD_WORD = '[PAD]'
self.UNK_WORD = '[UNK]'
self.BOS_WORD = '[CLS]'
self.EOS_WORD = '[SEP]'
@classmethod
def from_tokenizer(cls, tokenizer):
instance = cls()
instance.PAD = tokenizer.vocab[instance.PAD_WORD]
instance.UNK = tokenizer.vocab[instance.UNK_WORD]
instance.BOS = tokenizer.vocab[instance.BOS_WORD]
instance.EOS = tokenizer.vocab[instance.EOS_WORD]
return instance
class Beam():
''' Beam search '''
def __init__(self, size, device=False, tokenizer=None):
if tokenizer is None:
self.constants = Constants()
else:
self.constants = Constants.from_tokenizer(tokenizer)
self.size = size
self._done = False
# The score for each interface on the beam.
self.scores = torch.zeros((size,), dtype=torch.float, device=device)
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [torch.full((size,), self.constants.BOS, dtype=torch.long, device=device)]
def get_current_state(self):
"Get the outputs for the current timestep."
return self.get_tentative_hypothesis()
def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prev_ks[-1]
@property
def done(self):
return self._done
def advance(self, word_prob, word_length=None):
"Update beam status and check if finished or not."
num_words = word_prob.size(1)
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)
else:
beam_lk = word_prob[0]
flat_beam_lk = beam_lk.view(-1)
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True) # 1st sort
self.all_scores.append(self.scores)
self.scores = best_scores
# bestScoresId is flattened as a (beam x word) array,
# so we need to calculate which word and beam each score came from
prev_k = best_scores_id // num_words
self.prev_ks.append(prev_k)
self.next_ys.append(best_scores_id - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.next_ys[-1][0].item() == self.constants.EOS:
self._done = True
return self._done
def sort_scores(self):
"Sort the scores."
return torch.sort(self.scores, 0, True)
def get_the_best_score_and_idx(self):
"Get the score of the best in the beam."
scores, ids = self.sort_scores()
return scores[1], ids[1]
def get_tentative_hypothesis(self):
"Get the decoded sequence for the current timestep."
if len(self.next_ys) == 1:
dec_seq = self.next_ys[0].unsqueeze(1)
else:
_, keys = self.sort_scores()
hyps = [self.get_hypothesis(k) for k in keys]
hyps = [[self.constants.BOS] + h for h in hyps]
dec_seq = torch.LongTensor(hyps)
return dec_seq
def get_hypothesis(self, k):
""" Walk back to construct the full hypothesis. """
hyp = []
for j in range(len(self.prev_ks) - 1, -1, -1):
hyp.append(self.next_ys[j+1][k])
k = self.prev_ks[j][k]
return list(map(lambda x: x.item(), hyp[::-1]))
| 3,840 | 31.82906 | 97 | py |
UniVL | UniVL-main/modules/module_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
class BertConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `BertModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertModel(PreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`type`: a str, indicates which masking will be used in the attention, choice from [`bi`, `seq`, `gen`]
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output | 21,157 | 46.333333 | 139 | py |
UniVL | UniVL-main/modules/module_cross.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
import torch.nn.functional as F
from .file_utils import cached_path
from .until_config import PretrainedConfig
from .until_module import PreTrainedModel, LayerNorm, ACT2FN
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {}
CONFIG_NAME = 'cross_config.json'
WEIGHTS_NAME = 'cross_pytorch_model.bin'
class CrossConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `CrossModel`.
"""
pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
config_name = CONFIG_NAME
weights_name = WEIGHTS_NAME
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs CrossConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CrossModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`CrossModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
class CrossEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(CrossEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, concat_embeddings, concat_type=None):
batch_size, seq_length = concat_embeddings.size(0), concat_embeddings.size(1)
if concat_type is None:
concat_type = torch.zeros(batch_size, concat_type).to(concat_embeddings.device)
position_ids = torch.arange(seq_length, dtype=torch.long, device=concat_embeddings.device)
position_ids = position_ids.unsqueeze(0).expand(concat_embeddings.size(0), -1)
token_type_embeddings = self.token_type_embeddings(concat_type)
position_embeddings = self.position_embeddings(position_ids)
embeddings = concat_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class CrossSelfAttention(nn.Module):
def __init__(self, config):
super(CrossSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in CrossModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class CrossSelfOutput(nn.Module):
def __init__(self, config):
super(CrossSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CrossAttention(nn.Module):
def __init__(self, config):
super(CrossAttention, self).__init__()
self.self = CrossSelfAttention(config)
self.output = CrossSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class CrossIntermediate(nn.Module):
def __init__(self, config):
super(CrossIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class CrossOutput(nn.Module):
def __init__(self, config):
super(CrossOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CrossLayer(nn.Module):
def __init__(self, config):
super(CrossLayer, self).__init__()
self.attention = CrossAttention(config)
self.intermediate = CrossIntermediate(config)
self.output = CrossOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class CrossEncoder(nn.Module):
def __init__(self, config):
super(CrossEncoder, self).__init__()
layer = CrossLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class CrossPooler(nn.Module):
def __init__(self, config):
super(CrossPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class CrossPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(CrossPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class CrossLMPredictionHead(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossLMPredictionHead, self).__init__()
self.transform = CrossPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(cross_model_embedding_weights.size(1),
cross_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = cross_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(cross_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class CrossOnlyMLMHead(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossOnlyMLMHead, self).__init__()
self.predictions = CrossLMPredictionHead(config, cross_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class CrossOnlyNSPHead(nn.Module):
def __init__(self, config):
super(CrossOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class CrossPreTrainingHeads(nn.Module):
def __init__(self, config, cross_model_embedding_weights):
super(CrossPreTrainingHeads, self).__init__()
self.predictions = CrossLMPredictionHead(config, cross_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class CrossModel(PreTrainedModel):
def __init__(self, config):
super(CrossModel, self).__init__(config)
self.embeddings = CrossEmbeddings(config)
self.encoder = CrossEncoder(config)
self.pooler = CrossPooler(config)
self.apply(self.init_weights)
def forward(self, concat_input, concat_type=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones(concat_input.size(0), concat_input.size(1))
if concat_type is None:
concat_type = torch.zeros_like(attention_mask)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(concat_input, concat_type)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
| 17,516 | 43.346835 | 108 | py |
UniVL | UniVL-main/modules/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,021 | 32.425 | 98 | py |
UniVL | UniVL-main/modules/until_config.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import logging
import tarfile
import tempfile
import shutil
import torch
from .file_utils import cached_path
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
pretrained_model_archive_map = {}
config_name = ""
weights_name = ""
@classmethod
def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None):
archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name)
if os.path.exists(archive_file) is False:
if pretrained_model_name in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
if task_config is None or task_config.local_rank == 0:
logger.error(
"Model name '{}' was not found in model name list. "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
archive_file))
return None
if resolved_archive_file == archive_file:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {}".format(archive_file))
else:
if task_config is None or task_config.local_rank == 0:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
if task_config is None or task_config.local_rank == 0:
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, cls.config_name)
config = cls.from_json_file(config_file)
config.type_vocab_size = type_vocab_size
if task_config is None or task_config.local_rank == 0:
logger.info("Model config {}".format(config))
if state_dict is None:
weights_path = os.path.join(serialization_dir, cls.weights_name)
if os.path.exists(weights_path):
state_dict = torch.load(weights_path, map_location='cpu')
else:
if task_config is None or task_config.local_rank == 0:
logger.info("Weight doesn't exsits. {}".format(weights_path))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return config, state_dict
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" | 5,036 | 38.97619 | 105 | py |
UniVL | UniVL-main/dataloaders/dataloader_msrvtt_caption.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import pickle
import pandas as pd
from collections import defaultdict
import json
import random
class MSRVTT_Caption_DataLoader(Dataset):
"""MSRVTT train dataset loader."""
def __init__(
self,
csv_path,
json_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
split_type=""
):
self.csv = pd.read_csv(csv_path)
self.data = json.load(open(json_path, 'r'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv['video_id'].values[0]].shape[-1]
assert split_type in ["train", "val", "test"]
# Train: video0 : video6512 (6513)
# Val: video6513 : video7009 (497)
# Test: video7010 : video9999 (2990)
video_ids = [self.data['videos'][idx]['video_id'] for idx in range(len(self.data['videos']))]
split_dict = {"train": video_ids[:6513], "val": video_ids[6513:6513 + 497], "test": video_ids[6513 + 497:]}
choiced_video_ids = split_dict[split_type]
self.sample_len = 0
self.sentences_dict = {}
self.video_sentences_dict = defaultdict(list)
if split_type == "train": # expand all sentence to train
for itm in self.data['sentences']:
if itm['video_id'] in choiced_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption'])
self.video_sentences_dict[itm['video_id']].append(itm['caption'])
elif split_type == "val" or split_type == "test":
for itm in self.data['sentences']:
if itm['video_id'] in choiced_video_ids:
self.video_sentences_dict[itm['video_id']].append(itm['caption'])
for vid in choiced_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (vid, self.video_sentences_dict[vid][0])
else:
raise NotImplementedError
self.sample_len = len(self.sentences_dict)
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = []
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
# For generate captions
if caption is not None:
caption_words = self.tokenizer.tokenize(caption)
else:
caption_words = self._get_single_text(video_id)
if len(caption_words) > total_length_with_CLS:
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = ["[CLS]"] + caption_words
output_caption_words = caption_words + ["[SEP]"]
# For generate captions
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = [1] * len(input_caption_ids)
while len(input_caption_ids) < self.max_words:
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert len(input_caption_ids) == self.max_words
assert len(output_caption_ids) == self.max_words
assert len(decoder_mask) == self.max_words
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, choice_video_ids
def _get_single_text(self, video_id):
rind = random.randint(0, len(self.sentences[video_id]) - 1)
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}".format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, idx):
video_id, caption = self.sentences_dict[idx]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, \
pairs_input_caption_ids, pairs_decoder_mask, \
pairs_output_caption_ids, choice_video_ids = self._get_text(video_id, caption)
video, video_mask, masked_video, video_labels_index = self._get_video(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids
| 10,371 | 44.095652 | 115 | py |
UniVL | UniVL-main/dataloaders/dataloader_youcook_retrieval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import pickle
import random
class Youcook_DataLoader(Dataset):
"""Youcook dataset loader."""
def __init__(
self,
csv,
data_path,
features_path,
tokenizer,
feature_framerate=1.0,
max_words=30,
max_frames=100,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.data_dict = pickle.load(open(data_path, 'rb'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
# Get iterator video ids
video_id_list = [itm for itm in self.csv['video_id'].values]
self.video_id2idx_dict = {video_id: id for id, video_id in enumerate(video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
iter_idx_ = 0
for video_id in video_id_list:
data_dict = self.data_dict[video_id]
n_caption = len(data_dict['start'])
for sub_id in range(n_caption):
self.iter2video_pairs_dict[iter_idx_] = (video_id, sub_id)
iter_idx_ += 1
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_text(self, video_id, sub_id):
data_dict = self.data_dict[video_id]
k, r_ind = 1, [sub_id]
starts = np.zeros(k)
ends = np.zeros(k)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
words = self.tokenizer.tokenize(data_dict['text'][ind])
start_, end_ = data_dict['start'][ind], data_dict['end'][ind]
starts[i], ends[i] = start_, end_
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, starts, ends
def _get_video(self, idx, s, e):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
video_features = self.feature_dict[self.csv["feature_file"].values[idx]]
video = np.zeros((len(s), self.max_frames, video_features.shape[-1]), dtype=np.float)
for i in range(len(s)):
start = int(s[i] * self.feature_framerate)
end = int(e[i] * self.feature_framerate) + 1
video_slice = video_features[start:end]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}, start: {}, end: {}".format(self.csv["video_id"].values[idx], start, end))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(s))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, feature_idx):
video_id, sub_id = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[video_id]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, starts, ends = self._get_text(video_id, sub_id)
video, video_mask, masked_video, video_labels_index = self._get_video(idx, starts, ends)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index
| 7,820 | 40.163158 | 113 | py |
UniVL | UniVL-main/dataloaders/dataloader_youcook_caption.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import pickle
import re
import random
import io
class Youcook_Caption_DataLoader(Dataset):
"""Youcook dataset loader."""
def __init__(
self,
csv,
data_path,
features_path,
tokenizer,
feature_framerate=1.0,
max_words=30,
max_frames=100,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.data_dict = pickle.load(open(data_path, 'rb'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv["feature_file"].values[0]].shape[-1]
# Get iterator video ids
video_id_list = [itm for itm in self.csv['video_id'].values]
self.video_id2idx_dict = {video_id: id for id, video_id in enumerate(video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
iter_idx_ = 0
for video_id in video_id_list:
data_dict = self.data_dict[video_id]
n_caption = len(data_dict['start'])
for sub_id in range(n_caption):
self.iter2video_pairs_dict[iter_idx_] = (video_id, sub_id)
iter_idx_ += 1
def __len__(self):
return len(self.iter2video_pairs_dict)
def _get_text(self, video_id, sub_id):
data_dict = self.data_dict[video_id]
k = 1
r_ind = [sub_id]
starts = np.zeros(k)
ends = np.zeros(k)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
start_, end_ = data_dict['start'][ind], data_dict['end'][ind]
starts[i], ends[i] = start_, end_
total_length_with_CLS = self.max_words - 1
words = self.tokenizer.tokenize(data_dict['transcript'][ind])
words = ["[CLS]"] + words
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
# For generate captions
caption_words = self.tokenizer.tokenize(data_dict['text'][ind])
if len(caption_words) > total_length_with_CLS:
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = ["[CLS]"] + caption_words
output_caption_words = caption_words + ["[SEP]"]
# For generate captions
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = [1] * len(input_caption_ids)
while len(input_caption_ids) < self.max_words:
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert len(input_caption_ids) == self.max_words
assert len(output_caption_ids) == self.max_words
assert len(decoder_mask) == self.max_words
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels,\
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, starts, ends
def _get_video(self, idx, s, e):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
video_features = self.feature_dict[self.csv["feature_file"].values[idx]]
video = np.zeros((len(s), self.max_frames, self.feature_size), dtype=np.float)
for i in range(len(s)):
start = int(s[i] * self.feature_framerate)
end = int(e[i] * self.feature_framerate) + 1
video_slice = video_features[start:end]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}, start: {}, end: {}".format(self.csv["video_id"].values[idx], start, end))
# pass
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(s))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, feature_idx):
video_id, sub_id = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[video_id]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, \
pairs_decoder_mask, pairs_output_caption_ids, starts, ends = self._get_text(video_id, sub_id)
video, video_mask, masked_video, video_labels_index = self._get_video(idx, starts, ends)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids
| 9,662 | 41.756637 | 113 | py |
UniVL | UniVL-main/dataloaders/dataloader_msrvtt_retrieval.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from torch.utils.data import Dataset
import numpy as np
import pickle
import pandas as pd
from collections import defaultdict
import json
import random
class MSRVTT_DataLoader(Dataset):
"""MSRVTT dataset loader."""
def __init__(
self,
csv_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
):
self.data = pd.read_csv(csv_path)
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.data['video_id'].values[0]].shape[-1]
def __len__(self):
return len(self.data)
def _get_text(self, video_id, sentence):
choice_video_ids = [video_id]
n_caption = len(choice_video_ids)
k = n_caption
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
words = self.tokenizer.tokenize(sentence)
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, choice_video_ids
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}".format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, idx):
video_id = self.data['video_id'].values[idx]
sentence = self.data['sentence'].values[idx]
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, choice_video_ids = self._get_text(video_id, sentence)
video, video_mask, masked_video, video_labels_index = self._get_video(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index
class MSRVTT_TrainDataLoader(Dataset):
"""MSRVTT train dataset loader."""
def __init__(
self,
csv_path,
json_path,
features_path,
tokenizer,
max_words=30,
feature_framerate=1.0,
max_frames=100,
unfold_sentences=False,
):
self.csv = pd.read_csv(csv_path)
self.data = json.load(open(json_path, 'r'))
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
self.feature_size = self.feature_dict[self.csv['video_id'].values[0]].shape[-1]
self.unfold_sentences = unfold_sentences
self.sample_len = 0
if self.unfold_sentences:
train_video_ids = list(self.csv['video_id'].values)
self.sentences_dict = {}
for itm in self.data['sentences']:
if itm['video_id'] in train_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption'])
self.sample_len = len(self.sentences_dict)
else:
num_sentences = 0
self.sentences = defaultdict(list)
s_video_id_set = set()
for itm in self.data['sentences']:
self.sentences[itm['video_id']].append(itm['caption'])
num_sentences += 1
s_video_id_set.add(itm['video_id'])
# Use to find the clips in the same video
self.parent_ids = {}
self.children_video_ids = defaultdict(list)
for itm in self.data['videos']:
vid = itm["video_id"]
url_posfix = itm["url"].split("?v=")[-1]
self.parent_ids[vid] = url_posfix
self.children_video_ids[url_posfix].append(vid)
self.sample_len = len(self.csv)
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
for i, video_id in enumerate(choice_video_ids):
if caption is not None:
words = self.tokenizer.tokenize(caption)
else:
words = self._get_single_text(video_id)
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
# Mask Language Model <-----
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
token_labels.append(self.tokenizer.vocab["[UNK]"])
# print("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
token_labels.append(-1)
# -----> Mask Language Model
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
masked_token_ids.append(0)
token_labels.append(-1)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, choice_video_ids
def _get_single_text(self, video_id):
rind = random.randint(0, len(self.sentences[video_id]) - 1)
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = [0] * len(choice_video_ids)
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for i, video_id in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
print("video_id: {}".format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(choice_video_ids))]
masked_video = video.copy()
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def __getitem__(self, idx):
if self.unfold_sentences:
video_id, caption = self.sentences_dict[idx]
else:
video_id, caption = self.csv['video_id'].values[idx], None
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, choice_video_ids = self._get_text(video_id, caption)
video, video_mask, masked_video, video_labels_index = self._get_video(choice_video_ids)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index
| 15,263 | 42.240793 | 113 | py |
UniVL | UniVL-main/dataloaders/dataloader_howto100m.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import random
class Youtube_DataLoader(Dataset):
"""
Youtube dataset loader.
Note: Use transcript as caption, for mask decoder pretrain task.
"""
def __init__(
self,
csv,
features_path,
data_dict,
tokenizer,
min_time=10.0,
feature_framerate=1.0,
max_words=30,
min_words=0,
n_pair=-1,
max_frames=100,
with_long_context=True,
use_mil=False,
only_sim=False, # set automatically from model choice
sampled_use_mil=False,
pretrain_enhance_vmodal=False,
video_dim=1024,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.features_path = features_path
self.data_dict = data_dict
self.min_time = min_time
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.min_words = min_words
self.tokenizer = tokenizer
self.n_pair = n_pair
self.with_long_context = with_long_context
self.feature_size = video_dim
self.only_sim = only_sim
self.pretrain_enhance_vmodal = pretrain_enhance_vmodal
self.iter_num = len(self.csv)
self.use_mil = use_mil
self.sampled_use_mil = sampled_use_mil
if self.sampled_use_mil: # sample from each video, has a higher priority than use_mil.
self.use_mil = True
if self.use_mil:
positive_n_pair = self.n_pair
# Get iterator video ids
video_id_list = [itm for itm in self.csv['video_id'].values]
self.video_id2idx_dict = {video_id: id for id, video_id in enumerate(video_id_list)}
# Get all captions
self.iter2video_pairs_dict = {}
self.iter2video_pairslist_dict = {}
iter_idx_mil_ = 0
for video_id in video_id_list:
data_dict = self.data_dict[video_id]
n_caption = len(data_dict['start'])
sub_list = []
if self.n_pair < 0 or self.n_pair == 1:
for sub_id in range(n_caption):
sub_list.append([sub_id])
else:
sb_ls_ = list(range(n_caption))
if self.n_pair > n_caption:
sb_ls_ = sb_ls_ * (self.n_pair // n_caption + 1)
sb_ls_ = sb_ls_[:self.n_pair]
for sub_id in np.arange(0, len(sb_ls_), self.n_pair):
sub_list.append(sb_ls_[sub_id: sub_id + self.n_pair])
else:
sb_ls_ = sb_ls_ + sb_ls_[:(((n_caption+positive_n_pair-1)//positive_n_pair)*positive_n_pair-n_caption)]
for sub_id in np.arange(0, len(sb_ls_), positive_n_pair):
pos_ls = sb_ls_[sub_id: sub_id + positive_n_pair]
sub_list.append(pos_ls)
for sub_e in sub_list:
self.iter2video_pairs_dict[iter_idx_mil_] = (video_id, sub_e)
iter_idx_mil_ += 1
self.iter2video_pairslist_dict[video_id] = sub_list
if self.use_mil and self.sampled_use_mil is False:
self.iter_num = len(self.iter2video_pairs_dict)
def __len__(self):
return self.iter_num
def _mask_tokens(self, words):
token_labels = []
masked_tokens = words.copy()
for token_id, token in enumerate(masked_tokens):
if token_id == 0 or token_id == len(masked_tokens) - 1:
token_labels.append(-1)
continue
prob = random.random()
if prob < 0.15:
prob /= 0.15
if prob < 0.8:
masked_tokens[token_id] = "[MASK]"
elif prob < 0.9:
masked_tokens[token_id] = random.choice(list(self.tokenizer.vocab.items()))[0]
try:
token_labels.append(self.tokenizer.vocab[token])
except KeyError:
token_labels.append(self.tokenizer.vocab["[UNK]"])
else:
token_labels.append(-1)
return masked_tokens, token_labels
def _get_text(self, video_id, n_pair_max, sub_ids=None, only_sim=False, enhance_vmodel=False):
data_dict = self.data_dict[video_id]
if self.use_mil:
k = len(sub_ids)
r_ind = sub_ids
else:
n_caption = len(data_dict['start'])
if n_pair_max == -1:
k = n_caption
r_ind = range(n_caption)
else:
k = n_pair_max
if k <= n_caption:
r_ind = np.random.choice(range(n_caption), k, replace=False)
else:
r_ind_must = np.array(range(n_caption))
r_ind_rand = np.random.choice(range(n_caption), k-n_caption, replace=True)
r_ind = np.concatenate((r_ind_must, r_ind_rand), axis=0)
np.random.shuffle(r_ind)
starts = np.zeros(k)
ends = np.zeros(k)
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
pairs_masked_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_token_labels = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for i in range(k):
ind = r_ind[i]
words, start_, end_ = self._get_single_transcript(data_dict, ind, with_long_context=self.with_long_context)
caption_words = words.copy()
starts[i], ends[i] = start_, end_
if enhance_vmodel:
words = [] # mask all input text
words = ["[CLS]"] + words
total_length_with_CLS = self.max_words - 1
if len(words) > total_length_with_CLS:
words = words[:total_length_with_CLS]
words = words + ["[SEP]"]
input_ids = self.tokenizer.convert_tokens_to_ids(words)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < self.max_words:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.max_words
assert len(input_mask) == self.max_words
assert len(segment_ids) == self.max_words
pairs_text[i] = np.array(input_ids)
pairs_mask[i] = np.array(input_mask)
pairs_segment[i] = np.array(segment_ids)
if only_sim is False:
# For generate captions
if len(caption_words) > total_length_with_CLS:
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = ["[CLS]"] + caption_words
output_caption_words = caption_words + ["[SEP]"]
masked_tokens, token_labels = self._mask_tokens(words)
masked_token_ids = self.tokenizer.convert_tokens_to_ids(masked_tokens)
masked_input_caption_words, input_token_labels = self._mask_tokens(input_caption_words)
input_caption_words = masked_input_caption_words.copy()
while len(masked_token_ids) < self.max_words:
masked_token_ids.append(0)
token_labels.append(-1)
assert len(masked_token_ids) == self.max_words
assert len(token_labels) == self.max_words
# For generate captions
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = [1] * len(input_caption_ids)
while len(input_caption_ids) < self.max_words:
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert len(input_caption_ids) == self.max_words
assert len(output_caption_ids) == self.max_words
assert len(decoder_mask) == self.max_words
pairs_masked_text[i] = np.array(masked_token_ids)
pairs_token_labels[i] = np.array(token_labels)
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, starts, ends
def _get_single_transcript(self, data_dict, ind, with_long_context=True):
start, end = ind, ind
words = self.tokenizer.tokenize(str(data_dict['text'][ind]))
diff = data_dict['end'][end] - data_dict['start'][start]
while with_long_context and (len(words) < self.min_words or diff < self.min_time):
if start > 0 and end < len(data_dict['end']) - 1:
next_words = self.tokenizer.tokenize(str(data_dict['text'][end + 1]))
prev_words = self.tokenizer.tokenize(str(data_dict['text'][start - 1]))
d1 = data_dict['end'][end + 1] - data_dict['start'][start]
d2 = data_dict['end'][end] - data_dict['start'][start - 1]
if (self.min_time > 0 and d2 <= d1) or \
(self.min_time == 0 and len(next_words) <= len(prev_words)):
start -= 1
words = prev_words + words
else:
end += 1
words.extend(next_words)
elif start > 0:
words = self.tokenizer.tokenize(str(data_dict['text'][start - 1])) + words
start -= 1
elif end < len(data_dict['end']) - 1:
words.extend(self.tokenizer.tokenize(str(data_dict['text'][end + 1])))
end += 1
else:
break
diff = data_dict['end'][end] - data_dict['start'][start]
return words, data_dict['start'][start], data_dict['end'][end]
def _expand_video_slice(self, s, e, si, ei, fps, video_features):
start = int(s[si] * fps)
end = int(e[ei] * fps) + 1
if start > end:
start, end = end, start
video_slice = video_features[start:end]
expand_left = True
while len(video_slice) < 1:
if si==0 and ei==len(s)-1:
break
if expand_left:
expand_left = False
si = si-1 if si>0 else si
else:
expand_left = True
ei = ei+1 if ei<len(e)-1 else ei
start = int(s[si] * fps)
end = int(e[ei] * fps) + 1
if start > end:
start, end = end, start
video_slice = video_features[start:end]
if self.max_frames < video_slice.shape[0]:
video_slice = video_slice[:self.max_frames]
return video_slice, start, end
def _get_video(self, idx, s, e, only_sim=False):
video_mask = np.zeros((len(s), self.max_frames), dtype=np.long)
max_video_length = [0] * len(s)
video = np.zeros((len(s), self.max_frames, self.feature_size), dtype=np.float)
feature_file = os.path.join(self.features_path, self.csv["feature_file"].values[idx])
try:
video_features = np.load(feature_file)
for i in range(len(s)):
if len(video_features) < 1:
raise ValueError("{} is empty.".format(feature_file))
video_slice, start, end = self._expand_video_slice(s, e, i, i, self.feature_framerate, video_features)
slice_shape = video_slice.shape
max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_shape[0] else slice_shape[0]
if len(video_slice) < 1:
pass
else:
video[i][:slice_shape[0]] = video_slice
except Exception as e:
print("video_id: {} error.".format(feature_file))
for i, v_length in enumerate(max_video_length):
video_mask[i][:v_length] = [1] * v_length
# Mask Frame Model <-----
video_labels_index = [[] for _ in range(len(s))]
masked_video = video.copy()
if only_sim is False:
for i, video_pair_ in enumerate(masked_video):
for j, _ in enumerate(video_pair_):
if j < max_video_length[i]:
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
masked_video[i][j] = [0.] * video.shape[-1]
video_labels_index[i].append(j)
else:
video_labels_index[i].append(-1)
else:
video_labels_index[i].append(-1)
video_labels_index = np.array(video_labels_index, dtype=np.long)
# -----> Mask Frame Model
return video, video_mask, masked_video, video_labels_index
def second_to_stamp(self, in_seconds):
m, s = divmod(in_seconds, 60)
h, m2 = divmod(m, 60)
return "%02d:%02d:%02d" % (h, m2, s)
def __getitem__(self, feature_idx):
if self.sampled_use_mil: # sample from each video, has a higher priority than use_mil.
idx = feature_idx
video_id = self.csv['video_id'].values[idx]
sub_list = self.iter2video_pairslist_dict[video_id]
ranint = np.random.randint(0, len(sub_list))
sub_ids = sub_list[ranint]
elif self.use_mil:
video_id, sub_ids = self.iter2video_pairs_dict[feature_idx]
idx = self.video_id2idx_dict[video_id]
else:
idx = feature_idx
video_id = self.csv['video_id'].values[idx]
sub_ids = None
enhance_vmodel = False
if self.only_sim is False and self.pretrain_enhance_vmodal:
prob = random.random()
if prob < 0.15: # mask all text by rate 0.15
enhance_vmodel = True
pairs_text, pairs_mask, pairs_segment, \
pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, \
pairs_decoder_mask, pairs_output_caption_ids, \
starts, ends = self._get_text(video_id, self.n_pair, sub_ids, only_sim=self.only_sim, enhance_vmodel=enhance_vmodel)
video, video_mask, masked_video, video_labels_index = self._get_video(idx, starts, ends, only_sim=self.only_sim)
return pairs_text, pairs_mask, pairs_segment, video, video_mask, \
pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, \
pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids
| 15,835 | 41.8 | 127 | py |
nuts-ml | nuts-ml-master/nutsml/network.py | """
.. module:: network
:synopsis: Wrapper around other network APIs such as Lasagne, Keras and
Pytorch to enable usage within nuts-flow/ml.
For instance, with a wrapped network one can write:
samples >> build_batch >> network.train() >> log_loss >> Consume()
"""
from __future__ import print_function
import numpy as np
from nutsflow.common import itemize
from nutsflow import (nut_processor, nut_sink, Collect, Map,
Flatten, Get)
@nut_processor
def TrainValNut(batches, func, **kwargs):
"""
batches >> TrainValNut(func, **kwargs)
Create nut to train or validate a network.
:param iterable over batches batches: Batches to train/validate.
:param function func: Training or validation function of network.
:param kwargs kwargs: Keyword arguments passed on to function.
:return: Result(s) of training/validation function, e.g. loss, accuracy, ...
:rtype: float or array/tuple of floats
"""
for batch in batches:
yield func(*batch, **kwargs)
@nut_processor
def PredictNut(batches, func, flatten=True):
"""
batches >> PredictNut(func)
Create nut to perform network predictions.
:param iterable over batches batches: Batches to create predictions for.
:param function func: Prediction function
:param bool flatten: True: flatten output. Instead of returning batch of
predictions return individual predictions
:return: Result(s) of prediction
:rtype: typically array with class probabilities (softmax vector)
"""
for batch in batches:
pred_batch = func(batch)
if flatten:
for prediction in pred_batch:
yield prediction
else:
yield pred_batch
@nut_sink
def EvalNut(batches, network, metrics, compute, predcol=None):
"""
batches >> EvalNut(network, metrics)
Create nut to evaluate network performance for given metrics.
Returned when network.evaluate() is called.
:param iterable over batches batches: Batches to evaluate
:param nutmsml.Network network:
:param list of functions metrics: List of functions that compute
some metric, e.g. accuracy, F1, kappa-score.
Each metric function must take vectors with true and
predicted classes/probabilities and must compute the
metric over the entire input (not per sample/mini-batch).
:param function compute: Function of the form f(metric, targets, preds)
that computes the given metric (e.g. mean accuracy) for the given
targets and predictions.
:param int|None predcol: Index of column in prediction to extract
for evaluation. If None a single prediction output is
expected.
:return: Result(s) of evaluation, e.g. accuracy, precision, ...
:rtype: float or tuple of floats if there is more than one metric
"""
targets = []
def accumulate(batch):
inputs, outputs = batch
target = outputs[0] if isinstance(outputs, list) else outputs
targets.extend(target)
return inputs
preds = (batches >> Map(accumulate) >> network.predict(flatten=False) >>
Get(predcol) >> Flatten() >> Collect())
targets, preds = np.vstack(targets), np.vstack(preds)
targets = targets.astype(np.float)
results = tuple(compute(m, targets, preds) for m in metrics)
return results if len(results) > 1 else results[0]
class Network(object):
"""
Abstract base class for networks. Allows to wrap existing network APIs
such as Lasagne, Keras or Pytorch into an API that enables direct usage
of the network as a Nut in a nuts flow.
"""
def __init__(self, weightspath):
"""
Constructs base wrapper for networks.
:param string weightspath: Filepath where network weights are saved to
and loaded from.
"""
self.weightspath = weightspath
self.best_score = None # score of best scoring network so far
def _weightspath(self, weightspath):
"""
Return give weightspath if not None else return self.weightspath.
:param string|None weightspath: Path to network weights or None.
:return: Return weightspath
"""
return self.weightspath if weightspath is None else weightspath
def train(self):
"""
Train network
>>> train_losses = samples >> batcher >> network.train() >> Collect() # doctest: +SKIP
:return: Typically returns training loss per batch.
"""
raise NotImplementedError('Implement train()!')
def validate(self):
"""
Validate network
>>> val_losses = samples >> batcher >> network.validate() >> Collect() # doctest: +SKIP
:return: Typically returns validation loss per batch.
"""
raise NotImplementedError('Implement validate()!')
def predict(self, flatten=True):
"""
Get network predictions
>>> predictions = samples >> batcher >> network.predict() >> Collect() # doctest: +SKIP
:param bool flatten: True: return individual predictions instead
of batched prediction
:return: Typically returns softmax class probabilities.
:rtype: ndarray
"""
raise NotImplementedError('Implement predict()!')
def evaluate(self, metrics, predcol=None, targetcol=-1):
"""
Evaluate performance of network for given metrices
>>> acc, f1 = samples >> batcher >> network.evaluate([accuracy, f1_score]) # doctest: +SKIP
:param list metric: List of metrics. See EvalNut for details.
:param int|None predcol: Index of column in prediction to extract
for evaluation. If None a single prediction output is
expected.
:param int targetcol: Index of batch column that contain targets.
:return: Result for each metric as a tuple or a single float if
there is only one metric.
"""
raise NotImplementedError('Implement evaluate()!')
def save_best(self, score, isloss=True):
"""
Save weights of best network
:param float score: Score of the network, e.g. loss, accuracy
:param bool isloss: True means lower score is better, e.g. loss
and the network with the lower score score is saved.
"""
if (not self.best_score or
(isloss is True and score <= self.best_score) or
(isloss is False and score >= self.best_score)):
self.best_score = score
self.save_weights()
def save_weights(self, weightspath=None):
"""
Save network weights.
| network.save_weights()
:param string weightspath: Path to network weights.
self.weightspath is used if weightspath is None.
"""
raise NotImplementedError('Implement save_weights()!')
def load_weights(self, weightspath=None):
"""
Load network weights.
| network.load_weights()
:param string weightspath: Path to network weights.
self.weightspath is used if weightspath is None.
"""
raise NotImplementedError('Implement load_weights()!')
def print_layers(self):
"""Print description of the network layers"""
raise NotImplementedError('Implement print_layers()!')
class LasagneNetwork(Network): # pragma no cover
"""
Wrapper for Lasagne models: https://lasagne.readthedocs.io/en/latest/
"""
def __init__(self, out_layer, train_fn, val_fn, pred_fn,
weightspath='weights_lasagne_net.npz'):
"""
Construct wrapper around Lasagne network.
:param Lasgane layer out_layer: Output layer of Lasagne network.
:param Theano function train_fn: Training function
:param Theano function val_fn: Validation function
:param Theano function pred_fn: Prediction function
:param string weightspath: Filepath to save/load model weights.
"""
Network.__init__(self, weightspath)
self.out_layer = out_layer
self.train_fn = train_fn
self.val_fn = val_fn
self.pred_fn = pred_fn
@staticmethod
def _layers(layer, ret_input=False):
"""Return network layers. InputLayer is returned if ret_input==True."""
while hasattr(layer, 'input_layer'):
yield layer
layer = layer.input_layer
if ret_input:
yield layer
@staticmethod
def _get_named_params(network):
"""Return layer parameters and names"""
for l_num, layer in enumerate(LasagneNetwork._layers(network)):
for p_num, param in enumerate(layer.get_params()):
name = '{}_{}'.format(l_num, p_num)
yield name, param
def train(self, **kwargs):
return TrainValNut(self.train_fn, **kwargs)
def validate(self, **kwargs):
return TrainValNut(self.val_fn, **kwargs)
def predict(self, flatten=True):
return PredictNut(self.pred_fn, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
result = metric(targets, preds)
return result.eval() if hasattr(result, 'eval') else result
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
weightspath = super(LasagneNetwork, self)._weightspath(weightspath)
weights = {name: p.get_value() for name, p in
LasagneNetwork._get_named_params(self.out_layer)}
np.savez_compressed(weightspath, **weights)
def load_weights(self, weightspath=None):
weightspath = super(LasagneNetwork, self)._weightspath(weightspath)
weights = np.load(weightspath)
for name, param in LasagneNetwork._get_named_params(self.out_layer):
param.set_value(weights[name])
def print_layers(self):
import lasagne as la
layers = list(LasagneNetwork._layers(self.out_layer, ret_input=True))
for i, layer in enumerate(reversed(layers)):
name = layer.__class__.__name__
shape = la.layers.get_output_shape(layer)
print('{:3d} {:30s} {}'.format(i, name, shape), end=' ')
if hasattr(layer, 'filter_size'):
print('{}'.format(layer.filter_size[0]), end='//')
elif hasattr(layer, 'pool_size'):
is_int = isinstance(layer.pool_size, int)
size = layer.pool_size if is_int else layer.pool_size[0]
print('{}'.format(size), end='//')
if hasattr(layer, 'p'):
print(' [{:.2f}]'.format(layer.p), end='')
if hasattr(layer, 'stride'):
print('{}'.format(layer.stride[0]), end='')
if hasattr(layer, 'learning_rate_scale'):
if layer.learning_rate_scale != 1.0:
lr_scale = layer.learning_rate_scale
print(' [lr_scale={:.2f}]'.format(lr_scale), end='')
if hasattr(layer, 'params'):
for param in layer.params:
if 'trainable' not in layer.params[param]:
print(' [NT]', end='')
print()
class KerasNetwork(Network): # pragma no cover
"""
Wrapper for Keras models: https://keras.io/
"""
def __init__(self, model, weightspath='weights_keras_net.hd5'):
"""
Construct wrapper around Keras model.
:param Keras model model: Keras model to wrap. See
https://keras.io/models/sequential/
https://keras.io/models/model/
:param string weightspath: Filepath to save/load model weights.
"""
Network.__init__(self, weightspath)
self.model = model
# Since Keras with tensorflow 2.x the function train_on_batch()
# does not accept a batch format of [[inputs],[outputs]] anymore,
# while other similar function such as test_on_batch, predict_on_batch
# are still fine with it. Therefore only fixing for train_on_batch
# where sublist are removed if inputs and/or outputs are single items.
def _train_on_batch(self, x_batches, y_batches, **kwargs):
x_batches, y_batches = itemize(x_batches), itemize(y_batches)
return self.model.train_on_batch(x_batches, y_batches, kwargs)
def train(self, **kwargs):
return TrainValNut(self._train_on_batch, **kwargs)
def validate(self, **kwargs):
return TrainValNut(self.model.test_on_batch, **kwargs)
def predict(self, flatten=True):
return PredictNut(self.model.predict_on_batch, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
result = metric(targets, preds).numpy()
is_vector = hasattr(result, '__iter__')
return float(np.mean(result) if is_vector else result)
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
weightspath = super(KerasNetwork, self)._weightspath(weightspath)
self.model.save_weights(weightspath)
def load_weights(self, weightspath=None):
weightspath = super(KerasNetwork, self)._weightspath(weightspath)
self.model.load_weights(weightspath)
def print_layers(self):
self.model.summary()
class PytorchNetwork(Network): # pragma no cover
"""
Wrapper for Pytorch models:
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html
"""
def __init__(self, model, weightspath='weights_pytorch_net.pt'):
"""
Construct wrapper around Pytorch model.
:param Pytorch model model: Pytorch model to wrap.
model needs to have three attributes:
| model.device:, e.g 'cuda:0' or 'cpu'
| model.optimizer: e.g. torch.optim.SGD
| model.losses: (list of) loss functions, e.g. F.cross_entropy
:param string weightspath: Filepath to save/load model weights.
"""
Network.__init__(self, weightspath)
assert hasattr(model, 'device')
assert hasattr(model, 'optimizer')
assert hasattr(model, 'losses')
self.model = model
model.to(model.device)
def _to_tensor(self, batches, flatten):
"""
Convert batches into Pytorch tensors.
:param list|ndarray batches: Numpy array or list of arrays.
:param bool flatten: If true and batch contains only one column
return single tensor instead of list of tensors.
:return: List of batches as PyTorch tensors or a single tensor
:rtype: [tensors] or tensor
"""
import torch
T = lambda b: torch.as_tensor(b, device=self.model.device)
batches = self._to_list(batches)
tensors = [T(b) for b in batches if not isinstance(b, str)]
if flatten and len(tensors) == 1:
return tensors[0]
return tensors
def _to_list(self, x):
"""
Wraps x in a list if it is not already a list.
:param object x: Any object.
:return: x wrapped in list
:rtype: list
"""
return x if isinstance(x, list) else [x]
def _train_batch(self, x_batches, y_batches, *args):
"""
Performs a single gradient step on a batch.
:param ndarray|[ndarray] x_batches: Input batch or list of batches
:param ndarray|[ndarray] y_batches: Output batch or list of batches
:return: losses. If there is multiple outputs then a list with
the losses for each output and the mean over these losses
is returned. Otherwise a single float with the loss is returned.
:rtype: float|[float]
"""
x_tensors = self._to_tensor(x_batches, True)
y_tensors = self._to_tensor(y_batches, False)
model = self.model
model.optimizer.zero_grad()
y_preds = self._to_list(model(x_tensors, *args))
loss_fns = self._to_list(model.losses)
losses = []
for loss_fn, y_pred, y_true in zip(loss_fns, y_preds, y_tensors):
loss = loss_fn(y_pred, y_true)
loss.backward()
losses.append(loss.item())
model.optimizer.step()
return [np.mean(losses)] + losses if len(losses) > 1 else losses[0]
def _validate_batch(self, x_batches, y_batches, *args):
"""
Performs a forward step to compute losses.
:param [ndarray] x_batches: List of input batches
:param [ndarray] y_batches: List of output/target batches
:return: losses. If there is multiple outputs then a list with
the losses for each output and the mean over these losses
is returned. Otherwise a single float with the loss is returned.
:rtype: float|[float]
"""
import torch
losses = []
with torch.no_grad():
x_tensors = self._to_tensor(x_batches, True)
y_tensors = self._to_tensor(y_batches, False)
model = self.model
y_preds = self._to_list(model(x_tensors, *args))
loss_fns = self._to_list(model.losses)
for loss_fn, y_pred, y_true in zip(loss_fns, y_preds, y_tensors):
loss = loss_fn(y_pred, y_true)
losses.append(loss.item())
return [np.mean(losses)] + losses if len(losses) > 1 else losses[0]
def _predict_batch(self, x_batches, *args):
"""
Performs a forward step to compute output.
:param [ndarray] x_batches: List of input batches
:return: network outputs
:rtype: list
"""
import torch
with torch.no_grad():
x_tensors = self._to_tensor(x_batches, True)
y_preds = self.model(x_tensors, *args)
return [p.cpu().numpy() for p in y_preds]
def train(self, **kwargs):
self.model.train()
return TrainValNut(self._train_batch, **kwargs)
def validate(self, **kwargs):
self.model.eval()
return TrainValNut(self._validate_batch, **kwargs)
def predict(self, flatten=True):
self.model.eval()
return PredictNut(self._predict_batch, flatten)
def evaluate(self, metrics, predcol=None):
def compute(metric, targets, preds):
result = metric(targets, preds)
return result.item() if hasattr(result, 'item') else result
self.model.eval()
return EvalNut(self, metrics, compute, predcol)
def save_weights(self, weightspath=None):
import torch
weightspath = super(PytorchNetwork, self)._weightspath(weightspath)
torch.save(self.model.state_dict(), weightspath)
def load_weights(self, weightspath=None):
import torch
weightspath = super(PytorchNetwork, self)._weightspath(weightspath)
self.model.load_state_dict(torch.load(weightspath))
def print_layers(self, input_shape=None):
"""
Print network architecture (and layer dimensions).
:param tuple|None input_shape: (C, H, W) or None
If None, layer dimensions and param numbers are not printed.
"""
if input_shape:
from torchsummary import summary
device = self.model.device[:4] # remove GPU id, e.g. cuda:0
summary(self.model, input_shape, device=device)
else:
print(str(self.model))
| 19,583 | 36.302857 | 100 | py |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/mnist/cnn_train.py | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a CNN on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
BATCHSIZE = 64
EPOCHS = 3
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device='cpu'):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(True),
nn.BatchNorm2d(10),
nn.Conv2d(10, 20, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(True),
nn.BatchNorm2d(20),
nn.Flatten(),
nn.Linear(320, 50),
nn.ReLU(True),
nn.Linear(50, 10),
)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = F.cross_entropy # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
return self.layers(x)
build_batch = (nm.BuildBatch(BATCHSIZE)
.input(0, 'image', 'float32', True)
.output(1, 'number', 'int64'))
build_pred_batch = (nm.BuildBatch(BATCHSIZE)
.input(0, 'image', 'float32', True))
augment = (nm.AugmentImage(0)
.by('identical', 1)
.by('translate', 0.2, [-3, +3], [-3, +3])
.by('rotate', 0.2, [-30, +30])
.by('shear', 0.2, [0, 0.2])
.by('elastic', 0.2, [5, 5], [100, 100], [0, 100])
)
vec2img = nf.MapCol(0, lambda x: (x.reshape([28, 28]) * 255).astype('uint8'))
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def train(network, x, y, epochs):
"""Train network for given number of epochs"""
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
augment >> nf.Shuffle(1000) >> build_batch >>
network.train() >> nf.Collect())
print('train loss: %.4f' % np.mean(losses))
def validate(network, x, y):
"""Compute validation/test loss (= mean over batch losses)"""
losses = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
build_batch >> network.validate() >> nf.Collect())
print('val loss: %.4f' % np.mean(losses))
def predict(network, x, y):
"""Compute network outputs and print accuracy"""
preds = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
build_pred_batch >> network.predict() >> nf.Collect())
acc = accuracy(y, preds)
print('test acc %.1f %%' % acc)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
result = (zip(x, y) >> nf.PrintProgress(x) >> vec2img >>
build_batch >> network.evaluate(metrics))
return result
def view_misclassified_images(network, x, y):
"""Show misclassified images"""
make_label = nf.Map(lambda s: (s[0], 'true:%d pred:%d' % (s[1], s[2])))
filter_error = nf.Filter(lambda s: s[1] != s[2])
view_image = nm.ViewImageAnnotation(0, 1, pause=1)
preds = (zip(x, y) >> vec2img >> build_pred_batch >>
network.predict() >> nf.Map(np.argmax) >> nf.Collect())
(zip(x, y, preds) >> vec2img >> filter_error >> make_label >>
view_image >> nf.Consume())
def view_augmented_images(x, y, n=10):
"""Show n augmented images"""
view_image = nm.ViewImageAnnotation(0, 1, pause=1)
zip(x, y) >> vec2img >> augment >> nf.Take(n) >> view_image >> nf.Consume()
if __name__ == '__main__':
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
print('creating model...')
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((1, 28, 28))
print('training ...')
train(network, x_train, y_train, EPOCHS)
network.save_weights()
print('evaluating ...')
print('train acc:', evaluate(network, x_train, y_train))
print('test acc:', evaluate(network, x_test, y_test))
print('validating ...')
validate(network, x_test, y_test)
print('predicting ...')
predict(network, x_test, y_test)
# print('viewing images...')
# view_augmented_images(x_test, y_test)
# print('showing errors ...')
# view_misclassified_images(network, x_test, y_test)
| 5,025 | 30.810127 | 79 | py |
nuts-ml | nuts-ml-master/nutsml/examples/pytorch_/mnist/mlp_train.py | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a MLP on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = nn.CrossEntropyLoss() # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
build_batch = (nm.BuildBatch(64)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
acc = zip(x, y) >> build_batch >> network.evaluate(metrics)
return acc
def train(network, epochs=3):
"""Train network for given number of epochs"""
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
plot = nm.PlotLines(None, every_sec=0.2)
build_batch = (nm.BuildBatch(128)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x_train, y_train) >> nf.PrintProgress(x_train) >>
nf.Shuffle(1000) >> build_batch >>
network.train() >> plot >> nf.Collect())
acc_test = evaluate(network, x_test, y_test)
acc_train = evaluate(network, x_train, y_train)
print('train loss : {:.6f}'.format(np.mean(losses)))
print('train acc : {:.1f}'.format(acc_train))
print('test acc : {:.1f}'.format(acc_test))
if __name__ == '__main__':
print('creating model...')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(device)
network = PytorchNetwork(model)
# network.load_weights()
network.print_layers((28 * 28,))
print('training network...')
train(network, epochs=3)
| 2,959 | 30.157895 | 76 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.