repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/train.py | import torch
import sys, os
sys.path.insert(0, os.path.dirname(__file__))
from LaFan import LaFan1
from torch.utils.data import Dataset, DataLoader
from model import StateEncoder, \
OffsetEncoder, \
TargetEncoder, \
LSTM, \
Decoder, \
ShortMotionDiscriminator, \
LongMotionDiscriminator
from skeleton import Skeleton
import torch.optim as optim
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from functions import gen_ztta
import yaml
import time
import shutil
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='train-base.yaml')
args = parser.parse_args()
# opt = yaml.load(open('./config/test-base.yaml', 'r').read())
opt = yaml.load(open('./config/' + args.cfg, 'r').read())
# opt = yaml.load(open('.\\config\\train-base.yaml', 'r').read())
# opt = yaml.load(open('./config/train-base.yaml', 'r').read())
stamp = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
stamp = stamp + '-' + opt['train']['method']
# print(local_time)
# assert 0
if opt['train']['debug']:
stamp = 'debug'
log_dir = os.path.join('../log', stamp)
model_dir = os.path.join('../model', stamp)
if not os.path.exists(log_dir):
os.makedirs(log_dir, exist_ok=True)
if not os.path.exists(model_dir):
os.makedirs(model_dir, exist_ok=True)
def copydirs(from_file, to_file):
if not os.path.exists(to_file):
os.makedirs(to_file)
files = os.listdir(from_file)
for f in files:
if os.path.isdir(from_file + '/' + f):
copydirs(from_file + '/' + f, to_file + '/' + f)
else:
if '.git' not in f and '.zip' not in f and '.bvh' not in f:
# if '.py' in f or '.yaml' in f or '.yml' in f:
shutil.copy(from_file + '/' + f, to_file + '/' + f)
copydirs('./', log_dir + '/src')
## initilize the skeleton ##
skeleton_mocap = Skeleton(offsets=opt['data']['offsets'], parents=opt['data']['parents'])
skeleton_mocap.cuda()
skeleton_mocap.remove_joints(opt['data']['joints_to_remove'])
## load train data ##
lafan_data_train = LaFan1(opt['data']['data_dir'], \
seq_len = opt['model']['seq_length'], \
offset = opt['data']['offset'],\
train = True, debug=opt['train']['debug'])
x_mean = lafan_data_train.x_mean.cuda()
x_std = lafan_data_train.x_std.cuda().view(1, 1, opt['model']['num_joints'], 3)
if opt['train']['debug']:
opt['data']['num_workers'] = 1
lafan_loader_train = DataLoader(lafan_data_train, \
batch_size=opt['train']['batch_size'], \
shuffle=True, num_workers=opt['data']['num_workers'])
## load test data ##
# lafan_data_test = LaFan1(opt['data']['data_dir'], \
# seq_len = opt['model']['seq_length'], \
# train = False, debug=False)
# lafan_loader_test = DataLoader(lafan_data_test, \
# batch_size=opt['train']['batch_size'], \
# shuffle=True, num_workers=opt['data']['num_workers'])
## initialize model ##
state_encoder = StateEncoder(in_dim=opt['model']['state_input_dim'])
state_encoder = state_encoder.cuda()
offset_encoder = OffsetEncoder(in_dim=opt['model']['offset_input_dim'])
offset_encoder = offset_encoder.cuda()
target_encoder = TargetEncoder(in_dim=opt['model']['target_input_dim'])
target_encoder = target_encoder.cuda()
lstm = LSTM(in_dim=opt['model']['lstm_dim'], hidden_dim = opt['model']['lstm_dim'] * 2)
lstm = lstm.cuda()
decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2, out_dim=opt['model']['state_input_dim'])
decoder = decoder.cuda()
if len(opt['train']['pretrained']) > 0:
state_encoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'state_encoder.pkl')))
offset_encoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'offset_encoder.pkl')))
target_encoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'target_encoder.pkl')))
lstm.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'lstm.pkl')))
decoder.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'decoder.pkl')))
print('generator model loaded')
if opt['train']['use_adv']:
short_discriminator = ShortMotionDiscriminator(in_dim = (opt['model']['num_joints'] * 3 * 2))
short_discriminator = short_discriminator.cuda()
long_discriminator = LongMotionDiscriminator(in_dim = (opt['model']['num_joints'] * 3 * 2))
long_discriminator = long_discriminator.cuda()
if len(opt['train']['pretrained']) > 0:
short_discriminator.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'short_discriminator.pkl')))
long_discriminator.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'long_discriminator.pkl')))
print('discriminator model loaded')
# print('ztta:', ztta.size())
# assert 0
## initilize optimizer_g ##
optimizer_g = optim.Adam(lr = opt['train']['lr'], params = list(state_encoder.parameters()) +\
list(offset_encoder.parameters()) +\
list(target_encoder.parameters()) +\
list(lstm.parameters()) +\
list(decoder.parameters()), \
betas = (opt['train']['beta1'], opt['train']['beta2']), \
weight_decay = opt['train']['weight_decay'])
if len(opt['train']['pretrained']) > 0:
optimizer_g.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'optimizer_g.pkl')))
print('optimizer_g model loaded')
## initialize optimizer_d ##
if opt['train']['use_adv']:
optimizer_d = optim.Adam(lr = opt['train']['lr'] * 0.1, params = list(short_discriminator.parameters()) +\
list(long_discriminator.parameters()), \
betas = (opt['train']['beta1'], opt['train']['beta2']), \
weight_decay = opt['train']['weight_decay'])
if len(opt['train']['pretrained']) > 0:
optimizer_d.load_state_dict(torch.load(os.path.join(opt['train']['pretrained'], 'optimizer_d.pkl')))
print('optimizer_d model loaded')
writer = SummaryWriter(log_dir)
loss_total_min = 10000000.0
for epoch in range(opt['train']['num_epoch']):
state_encoder.train()
offset_encoder.train()
target_encoder.train()
lstm.train()
decoder.train()
loss_total_list = []
if opt['train']['progressive_training']:
## get positional code ##
if opt['train']['use_ztta']:
ztta = gen_ztta(length = lafan_data_train.cur_seq_length).cuda()
if (10 + (epoch // 2)) < opt['model']['seq_length']:
lafan_data_train.cur_seq_length = 10 + (epoch // 2)
else:
lafan_data_train.cur_seq_length = opt['model']['seq_length']
else:
## get positional code ##
if opt['train']['use_ztta']:
lafan_data_train.cur_seq_length = opt['model']['seq_length']
ztta = gen_ztta(length = opt['model']['seq_length']).cuda()
for i_batch, sampled_batch in tqdm(enumerate(lafan_loader_train)):
# print(i_batch, sample_batched['local_q'].size())
loss_pos = 0
loss_quat = 0
loss_contact = 0
loss_root = 0
# with torch.no_grad():
if True:
# state input
local_q = sampled_batch['local_q'].cuda()
root_v = sampled_batch['root_v'].cuda()
contact = sampled_batch['contact'].cuda()
# offset input
root_p_offset = sampled_batch['root_p_offset'].cuda()
local_q_offset = sampled_batch['local_q_offset'].cuda()
local_q_offset = local_q_offset.view(local_q_offset.size(0), -1)
# target input
target = sampled_batch['target'].cuda()
target = target.view(target.size(0), -1)
# root pos
root_p = sampled_batch['root_p'].cuda()
# X
X = sampled_batch['X'].cuda()
if False:
print('local_q:', local_q.size(), \
'root_v:', root_v.size(), \
'contact:', contact.size(), \
'root_p_offset:', root_p_offset.size(), \
'local_q_offset:', local_q_offset.size(), \
'target:', target.size())
lstm.init_hidden(local_q.size(0))
h_list = []
pred_list = []
pred_list.append(X[:,0])
# for t in range(opt['model']['seq_length'] - 1):
for t in range(lafan_data_train.cur_seq_length - 1):
# root pos
if t == 0:
root_p_t = root_p[:,t]
local_q_t = local_q[:,t]
local_q_t = local_q_t.view(local_q_t.size(0), -1)
contact_t = contact[:,t]
root_v_t = root_v[:,t]
else:
root_p_t = root_pred[0]
local_q_t = local_q_pred[0]
contact_t = contact_pred[0]
root_v_t = root_v_pred[0]
# state input
state_input = torch.cat([local_q_t, root_v_t, contact_t], -1)
# offset input
# print('root_p_offset:', root_p_offset.size(), 'root_p_t:', root_p_t.size())
# print('local_q_offset:', local_q_offset.size(), 'local_q_t:', local_q_t.size())
root_p_offset_t = root_p_offset - root_p_t
local_q_offset_t = local_q_offset - local_q_t
# print('root_p_offset_t:', root_p_offset_t.size(), 'local_q_offset_t:', local_q_offset_t.size())
offset_input = torch.cat([root_p_offset_t, local_q_offset_t], -1)
# target input
target_input = target
# print('state_input:',state_input.size())
h_state = state_encoder(state_input)
h_offset = offset_encoder(offset_input)
h_target = target_encoder(target_input)
if opt['train']['use_ztta']:
h_state += ztta[:, t]
h_offset += ztta[:, t]
h_target += ztta[:, t]
# print('h_state:', h_state.size(),\
# 'h_offset:', h_offset.size(),\
# 'h_target:', h_target.size())
if opt['train']['use_adv']:
tta = lafan_data_train.cur_seq_length - 2 - t
if tta < 5:
lambda_target = 0.0
elif tta >=5 and tta < 30:
lambda_target = (tta - 5) / 25.0
else:
lambda_target = 1.0
h_offset += 0.5 * lambda_target * torch.cuda.FloatTensor(h_offset.size()).normal_()
h_target += 0.5 * lambda_target * torch.cuda.FloatTensor(h_target.size()).normal_()
h_in = torch.cat([h_state, h_offset, h_target], -1).unsqueeze(0)
h_out = lstm(h_in)
# print('h_out:', h_out.size())
h_pred, contact_pred = decoder(h_out)
local_q_v_pred = h_pred[:,:,:opt['model']['target_input_dim']]
local_q_pred = local_q_v_pred + local_q_t
# print('q_pred:', q_pred.size())
local_q_pred_ = local_q_pred.view(local_q_pred.size(0), local_q_pred.size(1), -1, 4)
local_q_pred_ = local_q_pred_ / torch.norm(local_q_pred_, dim = -1, keepdim = True)
root_v_pred = h_pred[:,:,opt['model']['target_input_dim']:]
root_pred = root_v_pred + root_p_t
# print(''contact:'', contact_pred.size())
# print('root_pred:', root_pred.size())
pos_pred = skeleton_mocap.forward_kinematics(local_q_pred_, root_pred)
pos_next = X[:,t+1]
local_q_next = local_q[:,t+1]
local_q_next = local_q_next.view(local_q_next.size(0), -1)
root_p_next = root_p[:,t+1]
contact_next = contact[:,t+1]
# print(pos_pred.size(), x_std.size())
loss_pos += torch.mean(torch.abs(pos_pred[0] - pos_next) / x_std) / lafan_data_train.cur_seq_length #opt['model']['seq_length']
loss_quat += torch.mean(torch.abs(local_q_pred[0] - local_q_next)) / lafan_data_train.cur_seq_length #opt['model']['seq_length']
loss_root += torch.mean(torch.abs(root_pred[0] - root_p_next) / x_std[:,:,0]) / lafan_data_train.cur_seq_length #opt['model']['seq_length']
loss_contact += torch.mean(torch.abs(contact_pred[0] - contact_next)) / lafan_data_train.cur_seq_length #opt['model']['seq_length']
pred_list.append(pos_pred[0])
if opt['train']['use_adv']:
fake_input = torch.cat([x.reshape(x.size(0), -1).unsqueeze(-1) for x in pred_list], -1)
fake_v_input = torch.cat([fake_input[:,:,1:] - fake_input[:,:,:-1], torch.zeros_like(fake_input[:,:,0:1]).cuda()], -1)
fake_input = torch.cat([fake_input, fake_v_input], 1)
real_input = torch.cat([X[:, i].view(X.size(0), -1).unsqueeze(-1) for i in range(lafan_data_train.cur_seq_length)], -1)
real_v_input = torch.cat([real_input[:,:,1:] - real_input[:,:,:-1], torch.zeros_like(real_input[:,:,0:1]).cuda()], -1)
real_input = torch.cat([real_input, real_v_input], 1)
optimizer_d.zero_grad()
short_fake_logits = torch.mean(short_discriminator(fake_input.detach())[:,0], 1)
short_real_logits = torch.mean(short_discriminator(real_input)[:,0], 1)
short_d_fake_loss = torch.mean((short_fake_logits) ** 2)
short_d_real_loss = torch.mean((short_real_logits - 1) ** 2)
short_d_loss = (short_d_fake_loss + short_d_real_loss) / 2.0
long_fake_logits = torch.mean(long_discriminator(fake_input.detach())[:,0], 1)
long_real_logits = torch.mean(long_discriminator(real_input)[:,0], 1)
long_d_fake_loss = torch.mean((long_fake_logits) ** 2)
long_d_real_loss = torch.mean((long_real_logits - 1) ** 2)
long_d_loss = (long_d_fake_loss + long_d_real_loss) / 2.0
total_d_loss = opt['train']['loss_adv_weight'] * long_d_loss + \
opt['train']['loss_adv_weight'] * short_d_loss
total_d_loss.backward()
optimizer_d.step()
optimizer_g.zero_grad()
pred_pos = torch.cat([x.reshape(x.size(0), -1).unsqueeze(-1) for x in pred_list], -1)
pred_vel = (pred_pos[:,opt['data']['foot_index'],1:] - pred_pos[:,opt['data']['foot_index'],:-1])
pred_vel = pred_vel.view(pred_vel.size(0), 4, 3, pred_vel.size(-1))
loss_slide = torch.mean(torch.abs(pred_vel * contact[:,:-1].permute(0, 2, 1).unsqueeze(2)))
loss_total = opt['train']['loss_pos_weight'] * loss_pos + \
opt['train']['loss_quat_weight'] * loss_quat + \
opt['train']['loss_root_weight'] * loss_root + \
opt['train']['loss_slide_weight'] * loss_slide + \
opt['train']['loss_contact_weight'] * loss_contact
if opt['train']['use_adv']:
short_fake_logits = torch.mean(short_discriminator(fake_input)[:,0], 1)
short_g_loss = torch.mean((short_fake_logits - 1) ** 2)
long_fake_logits = torch.mean(long_discriminator(fake_input)[:,0], 1)
long_g_loss = torch.mean((long_fake_logits - 1) ** 2)
total_g_loss = opt['train']['loss_adv_weight'] * long_g_loss + \
opt['train']['loss_adv_weight'] * short_g_loss
loss_total += total_g_loss
loss_total.backward()
torch.nn.utils.clip_grad_norm_(state_encoder.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(offset_encoder.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(target_encoder.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(lstm.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(decoder.parameters(), 0.5)
optimizer_g.step()
# print("epoch: %03d, batch: %03d, pos: %.3f, quat: %.3f, root: %.3f, cont: %.3f"%\
# (epoch, \
# i_batch, \
# loss_pos.item(), \
# loss_quat.item(), \
# loss_root.item(), \
# loss_contact.item()))
writer.add_scalar('loss_pos', loss_pos.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_quat', loss_quat.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_root', loss_root.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_slide', loss_slide.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_contact', loss_contact.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_total', loss_total.item(), global_step = epoch * 317 + i_batch)
if opt['train']['use_adv']:
writer.add_scalar('loss_short_g', short_g_loss.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_long_g', long_g_loss.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_short_d_real', short_d_real_loss.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_short_d_fake', short_d_fake_loss.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_long_d_real', long_d_real_loss.item(), global_step = epoch * 317 + i_batch)
writer.add_scalar('loss_long_d_fake', long_d_fake_loss.item(), global_step = epoch * 317 + i_batch)
loss_total_list.append(loss_total.item())
loss_total_cur = np.mean(loss_total_list)
if loss_total_cur < loss_total_min:
loss_total_min = loss_total_cur
torch.save(state_encoder.state_dict(), model_dir + '/state_encoder.pkl')
torch.save(target_encoder.state_dict(), model_dir + '/target_encoder.pkl')
torch.save(offset_encoder.state_dict(), model_dir + '/offset_encoder.pkl')
torch.save(lstm.state_dict(), model_dir + '/lstm.pkl')
torch.save(decoder.state_dict(), model_dir + '/decoder.pkl')
torch.save(optimizer_g.state_dict(), model_dir + '/optimizer_g.pkl')
if opt['train']['use_adv']:
torch.save(short_discriminator.state_dict(), model_dir + '/short_discriminator.pkl')
torch.save(long_discriminator.state_dict(), model_dir + '/long_discriminator.pkl')
torch.save(optimizer_d.state_dict(), model_dir + '/optimizer_d.pkl')
print("train epoch: %03d, cur total loss:%.3f, cur best loss:%.3f" % (epoch, loss_total_cur, loss_total_min))
| 21,083 | 55.074468 | 159 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/probe/plot_clusters.py | import torch
import os
import sys
import argparse
import importlib
import numpy as np
from os.path import join as pjoin
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0, pjoin(BASEPATH))
sys.path.insert(0, pjoin(BASEPATH, '..'))
from data_loader import get_dataloader
from latent_plot_utils import get_all_plots, get_demo_plots
from trainer import Trainer
from py_utils import to_float, ensure_dirs
def get_all_codes(cfg, output_path):
print(output_path)
if os.path.exists(output_path):
return np.load(output_path, allow_pickle=True)['data'].item()
ensure_dirs(os.path.dirname(output_path))
print("start over")
# Dataloader
train_loader = get_dataloader(cfg, 'train', shuffle=False)
test_loader = get_dataloader(cfg, 'test', shuffle=False)
# Trainer
trainer = Trainer(cfg)
trainer.to(cfg.device)
trainer.resume()
with torch.no_grad():
vis_dicts = {}
for phase, loader in [['train', train_loader],
['test', test_loader]]:
vis_dict = None
for t, data in enumerate(loader):
vis_codes = trainer.get_latent_codes(data)
if vis_dict is None:
vis_dict = {}
for key, value in vis_codes.items():
vis_dict[key] = [value]
else:
for key, value in vis_codes.items():
vis_dict[key].append(value)
for key, value in vis_dict.items():
if phase == "test" and key == "content_code":
continue
if key == "meta":
secondary_keys = value[0].keys()
num = len(value)
vis_dict[key] = {
secondary_key: [to_float(item) for i in range(num) for item in value[i][secondary_key]]
for secondary_key in secondary_keys}
else:
vis_dict[key] = torch.cat(vis_dict[key], 0)
vis_dict[key] = vis_dict[key].cpu().numpy()
vis_dict[key] = to_float(vis_dict[key].reshape(vis_dict[key].shape[0], -1))
vis_dicts[phase] = vis_dict
np.savez_compressed(output_path, data=vis_dicts)
return vis_dicts
def plot_all(cfg):
output_path = pjoin(cfg.main_dir, 'test_probe')
vis_dicts = get_all_codes(cfg, pjoin(output_path, 'output_codes.npz'))
get_all_plots(vis_dicts, output_path, {}, 0, summary=False,
style_cluster_protocols=('tsne'),
separate_compute=True)
def plot_demo(cfg):
BASEPATH = pjoin(os.path.dirname(__file__), '..')
output_path = pjoin(BASEPATH, "demo_results", "figures")
vis_dicts = get_all_codes(cfg, pjoin(output_path, 'output_codes.npz'))
get_demo_plots(vis_dicts, output_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--config', type=str, default='config')
return parser.parse_args()
def main(args):
config_module = importlib.import_module(args.config)
config = config_module.Config()
config.initialize(args)
plot_demo(config)
if __name__ == '__main__':
args = parse_args()
main(args)
| 3,359 | 31.307692 | 111 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/probe/latent_plot_utils.py | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from matplotlib import cm
from matplotlib.legend_handler import HandlerLine2D, HandlerTuple
import tikzplotlib
from os.path import join as pjoin
BASEPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, BASEPATH)
sys.path.insert(0, pjoin(BASEPATH, '..'))
from py_utils import ensure_dirs
def distinct_labels_and_indices(labels):
distinct_labels = list(set(labels))
distinct_labels.sort()
num_labels = len(distinct_labels)
indices_i = {label: [] for label in distinct_labels}
for i, label in enumerate(labels):
indices_i[label].append(i)
indices_i = {label: np.array(indices) for label, indices in indices_i.items()}
return num_labels, distinct_labels, indices_i
def plot2D(data, labels, title):
x_min, x_max = np.min(data, axis=0), np.max(data, axis=0)
data = (data - x_min) / (x_max - x_min)
fig, ax = plt.subplots(figsize=(8, 8))
cjet = cm.get_cmap("jet")
num_labels, distinct_labels, indices = distinct_labels_and_indices(labels)
for i, label in enumerate(distinct_labels):
index = indices[label]
ax.scatter(data[index, 0], data[index, 1], label=label, c=[cjet(1.0 * i / num_labels)], linewidths=0.)
ax.legend(loc="center left", bbox_to_anchor=(1, 0, 1, 1),
title=title.split('/')[-1])
fig.tight_layout()
tikzplotlib.save("%s.tex" % title, figure=fig, strict=True)
plt.savefig("%s.png" % title)
return fig
def plot2D_overlay(data_list, labels_list, alpha_list, title):
x_min, x_max = np.array((1e9, 1e9)), np.array((-1e9, -1e9))
for data in data_list:
x_min = np.minimum(x_min, np.min(data, axis=0))
x_max = np.maximum(x_max, np.max(data, axis=0))
for i in range(len(data_list)):
data_list[i] = (data_list[i] - x_min) / (x_max - x_min)
fig, ax = plt.subplots(figsize=(8, 8))
cjet = cm.get_cmap("jet")
indices_list = []
distinct_labels = []
for labels in labels_list:
_, cur_labels, indices = distinct_labels_and_indices(labels)
indices_list.append(indices)
for label in cur_labels:
if label not in distinct_labels:
distinct_labels.append(label)
num_labels = len(distinct_labels)
for i, label in enumerate(distinct_labels):
res = 0.0
for data, labels, indices, alpha in zip(data_list, labels_list, indices_list, alpha_list):
if label in indices.keys():
index = indices[label]
else:
index = np.array([])
c = cjet((1.0 * i + res) / (num_labels + 1))
ax.scatter(data[index, 0], data[index, 1], label=label, c=[c], alpha=alpha, linewidths=0.)
res += 0.3
handles, labels = ax.get_legend_handles_labels()
paired_handles = []
handles_tot = len(handles) // 2
for i in range(handles_tot):
paired_handles.append((handles[i * 2], handles[i * 2 + 1]))
ax.legend(handles=paired_handles, labels=distinct_labels, numpoints=1,
handler_map={tuple: HandlerTuple(ndivide=None)},
loc="center left", bbox_to_anchor=(1, 0, 1, 1),
title=title.split('/')[-1])
fig.tight_layout()
tikzplotlib.save("%s.tex" % title, figure=fig, strict=True)
plt.savefig("%s.png" % title)
return fig
def plot2D_phase(data, labels, title):
x_min, x_max = np.min(data, axis=0), np.max(data, axis=0)
data = (data - x_min) / (x_max - x_min)
figsize = (8, 8)
add_width = 2
new_width = figsize[0] + add_width
fig = plt.figure(figsize=(new_width, figsize[1]))
fac_l, fac_r = figsize[0] / new_width, add_width / new_width
rect_l = [0.1, 0.1, 0.8, 0.8]
rect_r = [0., 0.1, 0.2, 0.8]
ax = fig.add_axes(np.array(rect_l) * np.array([fac_l, 1, fac_l, 1]))
cax = fig.add_axes(np.array(rect_r) * np.array([fac_r, 1, fac_r, 1]) + np.array([fac_l, 0, 0, 0]))
sin_labels = list(map(lambda l: np.sin(float(l)), labels))
bla = ax.scatter(data[:, 0], data[:, 1], c=sin_labels, cmap="jet", alpha=1.0)
# plt.colorbar(bla, cax=cax) <- some problem with the color bar..
# fig.tight_layout()
tikzplotlib.save("%s.tex" % title, figure=fig, strict=True)
plt.savefig("%s.png" % title)
return fig
tsne = None
def calc_tsne(raw):
global tsne
if tsne is None:
tsne = TSNE(n_components=2, init='pca', random_state=7) # n_iter = xxx
result = tsne.fit_transform(raw)
return result
pca = None
def calc_pca(raw):
global pca
if pca is None:
pca = PCA(n_components=2)
return pca.fit_transform(raw)
def calc_pca_curve(raw):
pcan = PCA()
pcan.fit_transform(raw)
pct = pcan.explained_variance_ratio_
prefix = np.cumsum(pct / np.sum(pct))
fig = plt.figure(figsize=(4, 4))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6])
ax.plot(list(range(1, 6)), prefix[:5])
ax.plot(2, prefix[1], "ro")
ax.annotate("{:.3f}% of variation".format(prefix[1] * 100),
(2, prefix[1]),
textcoords="offset points",
xytext=(60, -20),
ha="center")
ax.set_xticks(list(range(1, 6)))
ax.set_yticks(list(np.arange(0.5, 1.01, 0.1)))
ax.set_xlabel("number of components")
ax.set_ylabel("explained variance ratio")
name = "pca_curve"
tikzplotlib.save(name + ".tex", figure=fig, strict=True)
plt.savefig("pca_curve.png")
return pct
def plot_tsne(raw, labels, title):
result = calc_tsne(raw)
return plot2D(result, labels, title)
def plot_content_tsne(raw, slabels, clabels, title):
name = title + "_tsne"
path = name + ".npz"
if os.path.exists(path):
print("%s already exists" % path)
result = np.load(path, allow_pickle=True)["result"]
else:
print("start to produce %s" % path)
result = calc_tsne(raw)
np.savez_compressed(name, result=result)
plot2D(result, slabels, title + "_style_labels")
plot2D(result, clabels, title + "_content_labels")
def calc_many_blas(raws, calc_single):
lens = list(map(lambda x: len(x), raws))
whole = np.concatenate(raws, axis=0)
proj = calc_single(whole)
ret = ()
suml = 0
for l in lens:
ret += (proj[suml: suml + l],)
suml += l
return ret
def get_all_plots(data, output_path, writers, iter, summary=True,
style_cluster_protocols=('pca'),
separate_compute=False):
"""
data: {"train": dict_train, "test": dict_test}
dict_train: {"style2d_code": blabla, etc.}
separate_compute: compute t-SNE for 2D & 3D separately
"""
ensure_dirs(output_path)
def fig_title(title):
return pjoin(output_path, title)
def add_fig(fig, title, phase):
if summary:
writers[phase].add_figure(title, fig, global_step=iter)
keys = data["train"].keys()
has2d = "style2d_code" in keys
has3d = "style3d_code" in keys
# style codes & adain params
for suffix in ["_code", "_adain"]:
codes_raw = []
titles = []
phases = []
data_keys = []
if has2d: data_keys.append("style2d" + suffix)
if has3d: data_keys.append("style3d" + suffix)
for key in data_keys:
for phase in ["train", "test"]:
codes_raw.append(data[phase][key])
titles.append(f'{phase}_{key}')
phases.append(phase)
# calc tsne with style2/3d, train/test altogether
for name, protocol in zip(['pca', 'tsne'], [calc_pca, calc_tsne]):
if name not in style_cluster_protocols:
continue
style_codes = calc_many_blas(codes_raw, protocol)
fig = plot2D_overlay([style_codes[0], style_codes[2]],
[data["train"]["meta"]["style"], data["train"]["meta"]["style"]],
[1.0, 0.5],
fig_title(f'joint_embedding_{name}{suffix}'))
add_fig(fig, f'joint_embedding_{name}{suffix}', "train")
for i, (code, phase, title) in enumerate(zip(style_codes, phases, titles)):
if separate_compute:
code = protocol(codes_raw[i])
for label_type in ["style", "content"]:
fig = plot2D(code, data[phase]["meta"][label_type], fig_title(f'{title}_{name}_{label_type}'))
add_fig(fig, f'{title}_{name}_{label_type}', phase)
# content codes (train only)
content_code_pca = calc_pca(data["train"]["content_code"])
for label in ["style", "content", "phase"]:
if label == "phase":
indices = [i for i in range(len(data["train"]["meta"]["content"])) if data["train"]["meta"]["content"][i] == "walk"]
walk_code = content_code_pca[np.array(indices)]
phase_labels = [data["train"]["meta"]["phase"][i] for i in indices]
fig = plot2D_phase(walk_code, phase_labels, fig_title(f'content_by_{label}'))
else:
fig = plot2D(content_code_pca, data["train"]["meta"][label], fig_title(f'content_by_{label}'))
add_fig(fig, f'content_by_{label}', "train")
"""
fig = show_images_from_disk("", all_titles, 2, output_path + "all_codes")
if summary:
writers["train"].add_figure("all codes", fig, global_step=iter)
"""
def get_demo_plots(data, output_path):
"""
data: {"train": dict_train, "test": dict_test}
dict_train: {"style2d_code": blabla, etc.}
"""
ensure_dirs(output_path)
def fig_title(title):
return pjoin(output_path, title)
style_labels = data["train"]["meta"]["style"]
adain_raw = []
for key in ["style2d_adain", "style3d_adain"]:
for phase in ["train", "test"]:
adain_raw.append(data[phase][key])
adain_tsne = calc_many_blas(adain_raw, calc_tsne)
plot2D_overlay([adain_tsne[0], adain_tsne[2]],
[style_labels, style_labels],
[1.0, 0.5],
fig_title(f'joint_embedding_adain_tsne'))
for key in ["style3d_code", "style3d_adain"]:
tsne_code = calc_tsne(data["train"][key])
plot2D(tsne_code, style_labels, fig_title(f'{key}_tsne'))
content_code_pca = calc_pca(data["train"]["content_code"])
indices = [i for i in range(len(data["train"]["meta"]["content"])) if data["train"]["meta"]["content"][i] == "walk"]
walk_code = content_code_pca[np.array(indices)]
phase_labels = [data["train"]["meta"]["phase"][i] for i in indices]
plot2D_phase(walk_code, phase_labels, fig_title(f'content_by_phase'))
plot2D(content_code_pca, style_labels, fig_title(f'content_by_style'))
def show_images_from_disk(path, titles, rows, this_title):
images = []
for title in titles:
name = "%s.png" % title
input_path = os.path.join(path, name)
images.append(plt.imread(input_path))
this_title = os.path.join(path, this_title)
return show_images(images, titles, this_title, rows)
def show_images(images, titles, this_title, rows=1):
"""Display a list of images in a single figure with matplotlib.
Parameters
---------
images: List of np.arrays compatible with plt.imshow.
cols (Default = 1): Number of columns in figure (number of rows is
set to np.ceil(n_images/float(cols))).
titles: List of titles corresponding to each image. Must have
the same length as titles.
"""
assert (len(images) == len(titles))
n_images = len(images)
cols = np.ceil(n_images / float(rows))
# if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]
size = np.array((8, 8)) * np.array(rows, cols)
fig = plt.figure(figsize=size)
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(rows, cols, n + 1)
if image.ndim == 2:
plt.gray()
a.set_axis_off()
plt.imshow(image)
a.set_title(title)
fig.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.subplots_adjust(wspace=0, hspace=0)
# plt.show()
plt.savefig("%s.png" % this_title, dpi=150, bbox_inches='tight', pad_inches=0)
return fig
| 12,382 | 32.833333 | 128 | py |
PoseTriplet | PoseTriplet-main/hallucinator/code_rib/probe/anim_view.py | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patheffects as pe
from matplotlib import cm
import torch
import argparse
import sys
from os.path import join as pjoin
BASEPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, BASEPATH)
sys.path.insert(0, pjoin(BASEPATH, '..'))
sys.path.insert(0, pjoin(BASEPATH, '..', '..'))
from py_utils import to_float
"""
Motion info:
joint parents, foot_idx
"""
J = 21
parents = np.array([-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 10, 13, 14, 15, 10, 17, 18, 19])
joint_foot_indices = [3, 4, 7, 8]
joint_sizes = [3 for i in range(J)]
head_index = 12
joint_sizes[head_index] = 7
"""
Anim info:
limb_colors
joint_colors
scale
centered
"""
cmap = cm.get_cmap("Pastel2")
limb_colors = [cmap(x) for x in np.arange(0, 1, 0.125)]
cmap = cm.get_cmap("Set2")
joint_colors = [cmap(x) for x in np.arange(0, 1, 0.125)]
scale = 0.75
centered = True
def init_2d_plot(fig, subplot_pos, scale):
ax = fig.add_subplot(subplot_pos)
ax.set_xlim(-scale*40, scale*40)
ax.set_ylim(-scale*40, scale*40)
ax.set_xticks([], [])
ax.set_yticks([], [])
return ax
def init_3d_plot(fig, subplot_pos, scale):
ax = fig.add_subplot(subplot_pos, projection='3d') # This projection type determines the #axes
rscale = scale * 20 # 15
ax.set_xlim3d(-rscale, rscale)
ax.set_zlim3d(-rscale, rscale)
ax.set_ylim3d(-rscale, rscale)
facec = (254, 254, 254)
linec = (240, 240, 240)
facec = list(np.array(facec) / 256.0) + [1.0]
linec = list(np.array(linec) / 256.0) + [1.0]
ax.w_zaxis.set_pane_color(facec)
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
X = np.arange(-20, 25, 5)
Y = np.arange(-20, 25, 5)
xlen = len(X)
ylen = len(Y)
X, Y = np.meshgrid(X, Y)
Z = np.zeros(X.shape) - rscale # place it at a lower surface
colortuple = (facec, linec)
colors = np.zeros((Z.shape + (4, )))
for y in range(ylen):
for x in range(xlen):
colors[y, x] = colortuple[(x + y) % len(colortuple)]
# Plot the surface with face colors taken from the array we made.
surf = ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0., zorder=-1, shade=False)
ax.w_zaxis.line.set_lw(0.)
ax.w_yaxis.line.set_lw(0.)
ax.w_yaxis.line.set_color(linec)
ax.w_xaxis.line.set_lw(0.)
ax.w_xaxis.line.set_color(linec)
ax.set_xticks([], [])
ax.set_yticks([], [])
ax.set_zticks([], [])
ax.view_init(20, -60) # -40 for the other direction
return ax
def init_lines(ax, anim, dim, color=limb_colors[0], scale=1.0):
init_pos = [[0, 0] for i in range(dim)]
return [ax.plot(*init_pos, color=color, zorder=3,
linewidth=2 * scale, solid_capstyle='round',
path_effects=[pe.Stroke(linewidth=3 * scale, foreground='black'),
pe.Normal()])[0] for _ in range(anim.shape[1])]
def init_dots(ax, anim, dim, color='white', scale=1.0):
init_pos = [[0] for i in range(dim)]
return [ax.plot(*init_pos, color=color, zorder=3,
linewidth=2, linestyle='',
marker="o", markersize=joint_sizes[i] * scale,
path_effects=[pe.Stroke(linewidth=1.5 * scale, foreground='black'), pe.Normal()]
)[0] for i in range(anim.shape[1])]
def _anim_skel(lines, dots, anim, dim, i):
i = min(i, len(anim) - 1)
if dim == 3:
for j in range(len(parents)):
if parents[j] != -1:
lines[j].set_data(
[ anim[i, j, 0], anim[i, parents[j], 0]],
[-anim[i, j, 2], -anim[i, parents[j], 2]])
lines[j].set_3d_properties(
[ anim[i, j, 1], anim[i, parents[j], 1]])
dots[j].set_data([anim[i, j, 0]], [-anim[i, j, 2]])
dots[j].set_3d_properties([anim[i, j, 1]])
else:
for j in range(len(parents)):
if parents[j] != -1:
lines[j].set_data(
[anim[i, j, 0], anim[i, parents[j], 0]],
[anim[i, j, 1], anim[i, parents[j], 1]])
dots[j].set_data([anim[i, j, 0]], [anim[i, j, 1]])
return [lines, dots]
def _anim_foot_contact(dots, foot_contact, i):
i = min(i, len(foot_contact) - 1)
for j, f_idx in enumerate(joint_foot_indices):
color = 'red' if foot_contact[i, j] == 1.0 else 'blue'
dots[f_idx].set_color(color)
return [dots]
class Motion4Anim:
def __init__(self, title, motion, foot, limb_color=limb_colors[0], joint_color=joint_colors[0]):
self.title = title
self.motion = motion
if centered:
self.motion -= self.motion[0:1, 0:1, :]
self.motion = glb2centered(motion)
self.T = motion.shape[0]
self.dims = motion.shape[-1]
self.foot = foot
self.ax = None
self.lines = None
self.dots = None
self.limb_color = limb_color
self.joint_color = joint_color
def set_anim(self, fig, pos, single=False):
if self.dims == 2:
self.ax = init_2d_plot(fig, pos, scale)
else:
self.ax = init_3d_plot(fig, pos, scale)
if self.title is not None:
self.ax.set_title(self.title)
plot_scale = 2.0 if single else 1.0
self.lines = init_lines(self.ax, self.motion, self.dims, self.limb_color, scale=plot_scale)
self.dots = init_dots(self.ax, self.motion, self.dims, self.joint_color, scale=plot_scale)
def anim_skel(self, i):
return _anim_skel(self.lines, self.dots, self.motion, self.dims, i)
def anim_foot_contact(self, i):
if self.foot is not None:
return _anim_foot_contact(self.dots, self.foot, i)
else:
return []
def anim_i(self, i):
return self.anim_skel(i) + self.anim_foot_contact(i)
def plot_motions(motions, size=4, interval=26.67, fps=10, save=False, save_path=None):
"""motions: list of Motion4Anim}"""
if not isinstance(motions, list):
motions = [motions]
N = len(motions)
T = 0
for mt in motions:
T = max(T, mt.T)
fig = plt.figure(figsize=(N * size, size))
init_pos = 100 + 10 * N + 1
for i, mt in enumerate(motions):
mt.set_anim(fig, init_pos + i)
def animate(i):
changed = []
for mt in motions:
changed += mt.anim_i(i)
return changed
plt.tight_layout()
ani = animation.FuncAnimation(fig, animate, np.arange(T), interval=interval)
if save:
assert save_path is not None, "save_path is None!"
print(f'Start saving motion to {save_path}')
if not os.path.exists(os.path.dirname(save_path)):
os.mkdir(os.path.dirname(save_path))
if not save_path.endswith('.mp4'):
save_path += '.mp4'
ani.save(save_path, writer='ffmpeg', fps=fps)
print(f'Motion saved to {save_path}')
else:
plt.show()
opt = input("save? Yes/No/Exit")
return opt
def glb2centered(glb):
"""
input: positions - glb [T, J, (3/2)] -- single clip!
output: motion with average root (x(, z)) = (0(, 0))
"""
root_avg = np.mean(glb[:, 0:1, :], axis=0, keepdims=True)
root_avg[0, 0, 1] = 0 # y shouldn't change
return glb - root_avg
def rotate_motion(mt):
def rotate_motion3d(mt):
if mt[-1, 0, 0] - mt[0, 0, 0] < 0:
mt[..., 0] = -mt[..., 0]
if mt[-1, 0, 2] - mt[0, 0, 2] < 0:
mt[..., 2] = -mt[..., 2]
if mt[-1, 0, 0] - mt[0, 0, 0] < mt[-1, 0, 2] - mt[0, 0, 2]: # move in z dir
tmp = mt[..., 0].copy()
mt[..., 0] = mt[..., 2].copy()
mt[..., 2] = tmp
return mt
def rotate_motion2d(mt):
"""
if mt[-1, 0, 0] > mt[0, 0, 0]:
mt[..., 0] = -mt[..., 0]
"""
return mt
if mt.shape[-1] == 2:
return rotate_motion2d(mt)
elif mt.shape[-1] == 3:
return rotate_motion3d(mt)
else:
assert 0, "motion dimension is {mt.shape[-1]}"
def visualize(data, save=False, save_path=None):
"""data: dict {title: {motion:xxx, foot_contact:xxx}}"""
motions = []
for i, (title, motion_dict) in enumerate(data.items()):
motion = to_float(motion_dict['motion']).copy()
motion = rotate_motion(motion) # [T, J, 2/3]
foot_contact = motion_dict['foot_contact'] # [T, 4]
motions.append(Motion4Anim(title,
motion,
foot_contact,
limb_colors[i],
joint_colors[i]
))
plot_motions(motions, save=save, save_path=save_path)
def to_numpy(data):
output = []
for d in data:
if isinstance(d, torch.Tensor):
output.append(d.detach().numpy())
else:
output.append(d)
return output
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str)
args = parser.parse_args()
return args
def load_output(filename):
data = torch.load(filename, map_location='cpu')
print(list(data.keys()))
return data
def main(args):
from utils.animation_data import AnimationData
from utils.animation_2d_data import AnimationData2D
data = load_output(args.file)
total = len(data["trans"])
content, style, foot_contact, trans, recon = data["content"], data["style"], data["foot_contact"], data["trans"], data["recon"]
content_meta, style_meta = data["content_meta"], data["style_meta"]
selected = list(range(total))
print(total)
# for test, selected = [6, 12, 7, 11, 4]
for i in selected:
if style_meta[i] == 0:
style_meta[i] = {"style": [str(i)]}
if content_meta[i] == 0:
content_meta[i] = {"style": [str(i)]}
vis_dict = {}
cur_foot_contact = foot_contact[i].transpose(1, 0)
if style[i].shape[0] == content[i].shape[0]: # 3d
cur_style = AnimationData.from_network_output(to_float(style[i])).get_global_positions()
else: # 2d
cur_style = AnimationData2D.from_style2d(to_float(style[i])).get_projection()
raws = [trans[i], recon[i], content[i]]
cur_trans, cur_recon, cur_content = [AnimationData.from_network_output(to_float(raw)).get_global_positions() for raw in raws]
vis_dict[" ".join(("style", style_meta[i]["style"][0]))] = {"motion": cur_style, "foot_contact": None}
vis_dict["trans"] = {"motion": cur_trans, "foot_contact": cur_foot_contact}
vis_dict["recon"] = {"motion": cur_recon, "foot_contact": cur_foot_contact}
vis_dict[" ".join(("content", content_meta[i]["style"][0]))] = {"motion": cur_content, "foot_contact": cur_foot_contact}
visualize(vis_dict)
if __name__ == "__main__":
args = parse_args()
main(args)
| 11,180 | 30.764205 | 133 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/videopose-j16-wild-eval_run.py | import pickle
from common.arguments import parse_args
from common.camera import camera_to_world, normalize_screen_coordinates, image_coordinates
from common.generators import UnchunkedGenerator
from common.utils import evaluate, add_path
from tool.utils import *
import scipy.signal
import glob
add_path()
"""
inference code for in the wild case.
and save 3D poses for in the wild imitation.
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# for test augmentation
metadata = {'layout_name': 'std', 'num_joints': 16,
# 'keypoints_symmetry': [[1, 3, 5, 7, 9, 11, 13, 15], [2, 4, 6, 8, 10, 12, 14, 16]]}
'keypoints_symmetry': [[4, 5, 6, 10, 11, 12], [1, 2, 3, 13, 14, 15]]}
class Visualization(object):
def __init__(self, ckpt_path):
self.current_time = time0
self.ckpt_path = ckpt_path
self.root_trajectory = None
self.set_param()
self.get_video_wh()
self.get_keypoints()
def redering(self):
# poseaug result
# architecture = '3,3,3,1,1' # add for custom
architecture = args.architecture
result = self.get_prediction(architecture, self.ckpt_path)
anim_output = {
'result': result,
}
self.visalizatoin(anim_output)
def get_video_wh(self):
vid = cv2.VideoCapture(args.viz_video_path)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
args.video_width = int(width) # cv2 read (height, width)
args.video_height = int(height)
self.update_time('prepare video clip')
def set_param(self):
dir_name = os.path.dirname(args.viz_video_path)
basename = os.path.basename(args.viz_video_path)
self.video_name = basename[:basename.rfind('.')]
self.viz_output_path = f'{dir_name}/{self.video_name}_{args.detector_2d}.mp4'.replace(
'source_video', args.architecture.replace(',', '')+'_scale2D_{:0>3d}'.format(int(args.pose2dscale * 10)))
# prepare folder
mkd(args.viz_video_path, get_parent=True)
mkd(self.viz_output_path, get_parent=True)
# init some property
keypoints_symmetry = metadata['keypoints_symmetry']
self.kps_left, self.kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])
self.joints_left, self.joints_right = list([4, 5, 6, 10, 11, 12]), list([1, 2, 3, 13, 14, 15])
def update_time(self, task):
time_cost, self.current_time = update_time(self.current_time)
print('-------------- {} spends {:.2f} seconds'.format(task, time_cost))
def keypoint_square_padding(self, keypoint):
"""
square_padding
the same as take the longer one as width.
"""
tmp_keypoint = keypoint.copy()
if args.video_width > args.video_height: # up down padding
pad = int((args.video_width - args.video_height)*0.5)
tmp_keypoint[:, :, 1] = tmp_keypoint[:, :, 1] + pad
args.video_height = args.video_width
elif args.video_width < args.video_height: # left right padding
pad = int((args.video_height - args.video_width)*0.5)
tmp_keypoint[:, :, 0] = tmp_keypoint[:, :, 0] + pad
args.video_width = args.video_height
else:
print('image are square, no need padding')
return tmp_keypoint
def get_keypoints(self):
# 2D kpts loads or generate
tmp_npz_path = args.viz_video_path.replace('.mp4', '_det2D.npz').replace('source_video', 'det2D_'+args.detector_2d)
args.input_npz = tmp_npz_path if os.path.isfile(tmp_npz_path) else None
if not args.input_npz:
# get detector for unlabeled video
detector_2d = get_detector_2d(args.detector_2d)
assert detector_2d, 'detector_2d should be in ({alpha, hr, open}_pose)'
# detect keypoints
self.keypoints = detector_2d(args.viz_video_path)
# save for next time use
mkd(tmp_npz_path)
kpts = np.array(self.keypoints).astype(np.float32)
print('kpts npz save in ', tmp_npz_path)
np.savez_compressed(tmp_npz_path, kpts=kpts)
else:
# load keypoint
npz = np.load(args.input_npz)
self.keypoints = npz['kpts'] # (N, 17, 2) - coco format
if args.pose2d_smoothing:
self.keypoints = self.keypoint_smoothing(self.keypoints)
# convert to standard 16 joint
if args.detector_2d == 'alpha_pose': # for coco format -> std 16 j
self.keypoints = convert_AlphaOpenposeCoco_to_standard16Joint(
self.keypoints.copy()) # Nx16x2
self.keypoints_imgunnorm = self.keypoint_square_padding(self.keypoints)
# normlization keypoints
self.keypoints_imgnorm = normalize_screen_coordinates(self.keypoints_imgunnorm[..., :2], w=args.video_width,
h=args.video_height)
self.update_time('load keypoint')
# analysis scale
self.keypoints_imgnorm = self.keypoints_imgnorm * args.pose2dscale
def keypoint_smoothing(self, keypoints):
x = keypoints.copy()
window_length = 5
polyorder = 2
out = scipy.signal.savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, axis=0, mode='interp', cval=0.0)
return out
def get_prediction(self, architecture, ckpt_path):
model_pos = self._get_model(architecture, ckpt_path)
data_loader = self._get_dataloader(model_pos)
prediction = self._evaluate(model_pos, data_loader)
if args.add_trajectory:
model_traj = self._get_modelTraj(architecture, ckpt_path)
data_loader = self._get_dataloaderTraj(model_traj)
self.root_trajectory = self._evaluate(model_traj, data_loader)
return prediction
def _get_model(self, architecture, ckpt_path):
from common.model import TemporalModel
filter_widths = [int(x) for x in architecture.split(',')]
model_pos = TemporalModel(16, 2, 16, filter_widths=filter_widths, causal=args.causal, dropout=args.dropout,
channels=args.channels,
dense=args.dense).cuda()
# load trained model
print('Loading checkpoint', ckpt_path)
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
model_pos.load_state_dict(checkpoint['model_pos'])
self.update_time('load 3D model')
return model_pos
def _get_modelTraj(self, architecture, ckpt_path):
from common.model import TemporalModel
filter_widths = [int(x) for x in architecture.split(',')]
model_traj = TemporalModel(16, 2, 1, filter_widths=filter_widths, causal=args.causal, dropout=args.dropout,
channels=args.channels,
dense=args.dense).cuda()
# load trained model
print('Loading checkpoint', ckpt_path)
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
model_traj.load_state_dict(checkpoint['model_traj'])
self.update_time('load 3D Traj model')
return model_traj
def _get_dataloader(self, model_pos):
# Receptive field: 243 frames for args.arc [3, 3, 3, 3, 3]
receptive_field = model_pos.receptive_field()
pad = (receptive_field - 1) // 2 # Padding on each side
causal_shift = 0
data_loader = UnchunkedGenerator(None, None, [self.keypoints_imgnorm],
pad=pad, causal_shift=causal_shift, augment=args.test_time_augmentation,
kps_left=self.kps_left, kps_right=self.kps_right, joints_left=self.joints_left,
joints_right=self.joints_right)
return data_loader
def _get_dataloaderTraj(self, model_pos):
# Receptive field: 243 frames for args.arc [3, 3, 3, 3, 3]
receptive_field = model_pos.receptive_field()
pad = (receptive_field - 1) // 2 # Padding on each side
causal_shift = 0
data_loader = UnchunkedGenerator(None, None, [self.keypoints_imgnorm],
pad=pad, causal_shift=causal_shift, augment=False,
kps_left=self.kps_left, kps_right=self.kps_right, joints_left=self.joints_left,
joints_right=self.joints_right)
return data_loader
def _evaluate(self, model_pos, data_loader):
# get result
prediction = evaluate(data_loader, model_pos, return_predictions=True,
joints_leftright=(self.joints_left, self.joints_right))
self.update_time('generate reconstruction 3D data')
return prediction
def _postprocess(self, prediction):
if args.add_trajectory:
# add root trajectory
prediction -= prediction[:, :1, :]
prediction += self.root_trajectory
# # camera rotation
rot = np.array([0.14070565, -0.15007018, -0.7552408, 0.62232804], dtype=np.float32)
prediction_world = camera_to_world(prediction, R=rot, t=0)
# We don't have the trajectory, but at least we can rebase the height
prediction_world[:, :, 2] -= np.min(prediction_world[:, :, 2])
return prediction_world
def visalizatoin(self, anim_output):
from common.visualization import render_animation
# anim_output = {'Reconstruction': prediction}
self.save_3d_prediction(anim_output)
for tmp_key in anim_output:
anim_output[tmp_key] = self._postprocess(anim_output[tmp_key])
if args.pure_background:
viz_video_path = None
else:
viz_video_path = args.viz_video_path
print('Rendering... save to {}'.format(self.viz_output_path))
render_animation(self.keypoints, anim_output,
Skeleton(), args.frame_rate, args.viz_bitrate, np.array(70., dtype=np.float32), self.viz_output_path,
limit=args.viz_limit, downsample=args.viz_downsample, size=args.viz_size,
input_video_path=viz_video_path, viewport=(args.video_width, args.video_height),
input_video_skip=args.viz_skip)
self.update_time('render animation')
def save_3d_prediction(self, anim_output):
tmp_anim_output = {}
for tmp_key in anim_output:
prediction = anim_output[tmp_key] * 1.
if args.add_trajectory:
# add root trajectory
prediction -= prediction[:, :1, :]
prediction += self.root_trajectory
tmp_anim_output[tmp_key] = prediction * 1.
# save 3D joint points
tmp_pkl_path = args.viz_video_path.replace('.mp4', '_pred3D.pkl').replace('source_video', 'pred3D_pose') # rename to save
mkd(tmp_pkl_path)
with open(tmp_pkl_path, 'wb') as handle:
pickle.dump(tmp_anim_output, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
time0 = update_time()
args = parse_args()
# model 2d detection detail
args.detector_2d = 'alpha_pose'
# redering detail
args.pure_background = False # False/True
args.add_trajectory = True #False
# args.viz_limit = 200
###########################
# model 2d-3d detail
###########################
args.architecture = '3,3,3' # model arch
# args.architecture = '3,1,3,1,3' # model arch
args.test_time_augmentation = True # False
args.pose2d_smoothing = True
ckpt_path = './checkpoint/ckpt_ep_045.bin'
# ================================================================================================
# seletcted clip run
# ================================================================================================
for args.architecture in ['3,3,3']:
# for args.pose2dscale in [0.5, 0.7, 0.85, 1]:
for args.pose2dscale in [1]:
args.eval_data = 'bilibili-clip'
eval_video_list = glob.glob('./wild_eval/source_video/{}/*.mp4'.format(args.eval_data))
for path_name in eval_video_list[:1]:
args.viz_video_path = path_name
args.frame_rate = 30
Vis = Visualization(ckpt_path)
Vis.redering()
| 12,683 | 43.041667 | 129 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/matching.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import numpy as np
import torch
from scipy.optimize import linear_sum_assignment
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89])
def candidate_reselect(bboxes, bboxes_scores, pose_preds):
'''
Grouping
'''
# Group same keypointns together
kp_groups = grouping(bboxes, bboxes_scores, pose_preds)
'''
Re-select
'''
# Generate Matrix
human_num = len(pose_preds.keys())
costMatrix = []
for k in range(17):
kp_group = kp_groups[k]
joint_num = len(kp_group.keys())
costMatrix.append(np.zeros((human_num, joint_num)))
group_size = {k: {} for k in range(17)}
for n, person in pose_preds.items():
h_id = n
assert 0 <= h_id < human_num
for k in range(17):
g_id = person['group_id'][k]
if g_id is not None:
if g_id not in group_size[k].keys():
group_size[k][g_id] = 0
group_size[k][g_id] += 1
g_id = int(g_id) - 1
_, _, score = person[k][0]
h_score = person['human_score']
if score < 0.05:
costMatrix[k][h_id][g_id] = 0
else:
costMatrix[k][h_id][g_id] = -(h_score * score)
pose_preds = matching(pose_preds, costMatrix, kp_groups)
# To JSON
final_result = []
for n, person in pose_preds.items():
final_pose = torch.zeros(17, 2)
final_score = torch.zeros(17, 1)
max_score = 0
mean_score = 0
xmax, xmin = 0, 1e5
ymax, ymin = 0, 1e5
for k in range(17):
assert len(person[k]) > 0
x, y, s = person[k][0]
xmax = max(xmax, x)
xmin = min(xmin, x)
ymax = max(ymax, y)
ymin = min(ymin, y)
final_pose[k][0] = x.item() - 0.3
final_pose[k][1] = y.item() - 0.3
final_score[k] = s.item()
mean_score += (s.item() / 17)
max_score = max(max_score, s.item())
if torch.max(final_score).item() < 0.1:
continue
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < 40 * 40):
continue
final_result.append({
'keypoints': final_pose,
'kp_score': final_score,
'proposal_score': mean_score + max_score + person['bbox_score']
})
return final_result
def grouping(bboxes, bboxes_scores, pose_preds):
kp_groups = {}
for k in range(17):
kp_groups[k] = {}
ids = np.zeros(17)
for n, person in pose_preds.items():
pose_preds[n]['bbox'] = bboxes[n]
pose_preds[n]['bbox_score'] = bboxes_scores[n]
pose_preds[n]['group_id'] = {}
s = 0
for k in range(17):
pose_preds[n]['group_id'][k] = None
pose_preds[n][k] = np.array(pose_preds[n][k])
assert len(pose_preds[n][k]) > 0
s += pose_preds[n][k][0][-1]
s = s / 17
pose_preds[n]['human_score'] = s
for k in range(17):
latest_id = ids[k]
kp_group = kp_groups[k]
assert len(person[k]) > 0
x0, y0, s0 = person[k][0]
if s0 < 0.05:
continue
for g_id, g in kp_group.items():
x_c, y_c = kp_group[g_id]['group_center']
'''
Get Average Box Size
'''
group_area = kp_group[g_id]['group_area']
group_area = group_area[0] * group_area[1] / (group_area[2] ** 2)
'''
Groupingn Criterion
'''
# Joint Group
dist = np.sqrt(
((x_c - x0) ** 2 + (y_c - y0) ** 2) / group_area)
if dist <= 0.1 * sigmas[k]: # Small Distance
if s0 >= 0.3:
kp_group[g_id]['kp_list'][0] += x0 * s0
kp_group[g_id]['kp_list'][1] += y0 * s0
kp_group[g_id]['kp_list'][2] += s0
kp_group[g_id]['group_area'][0] += (person['bbox'][2] - person['bbox'][0]) * person['human_score']
kp_group[g_id]['group_area'][1] += (person['bbox'][3] - person['bbox'][1]) * person['human_score']
kp_group[g_id]['group_area'][2] += person['human_score']
x_c = kp_group[g_id]['kp_list'][0] / kp_group[g_id]['kp_list'][2]
y_c = kp_group[g_id]['kp_list'][1] / kp_group[g_id]['kp_list'][2]
kp_group[g_id]['group_center'] = (x_c, y_c)
pose_preds[n]['group_id'][k] = g_id
break
else:
# A new keypoint group
latest_id += 1
kp_group[latest_id] = {
'kp_list': None,
'group_center': person[k][0].copy()[:2],
'group_area': None
}
x, y, s = person[k][0]
kp_group[latest_id]['kp_list'] = np.array((x * s, y * s, s))
# Ref Area
ref_width = person['bbox'][2] - person['bbox'][0]
ref_height = person['bbox'][3] - person['bbox'][1]
ref_score = person['human_score']
kp_group[latest_id]['group_area'] = np.array((
ref_width * ref_score, ref_height * ref_score, ref_score))
pose_preds[n]['group_id'][k] = latest_id
ids[k] = latest_id
return kp_groups
def matching(pose_preds, matrix, kp_groups):
index = []
for k in range(17):
human_ind, joint_ind = linear_sum_assignment(matrix[k])
# human_ind, joint_ind = greedy_matching(matrix[k])
index.append(list(zip(human_ind, joint_ind)))
for n, person in pose_preds.items():
for k in range(17):
g_id = person['group_id'][k]
if g_id is not None:
g_id = int(g_id) - 1
h_id = n
x, y, s = pose_preds[n][k][0]
if ((h_id, g_id) not in index[k]) and len(pose_preds[n][k]) > 1:
pose_preds[n][k] = np.delete(pose_preds[n][k], 0, 0)
elif ((h_id, g_id) not in index[k]) and len(person[k]) == 1:
x, y, _ = pose_preds[n][k][0]
pose_preds[n][k][0] = (x, y, 1e-5)
pass
elif ((h_id, g_id) in index[k]):
x, y = kp_groups[k][g_id + 1]['group_center']
s = pose_preds[n][k][0][2]
pose_preds[n][k][0] = (x, y, s)
return pose_preds
def greedy_matching(matrix):
num_human, num_joint = matrix.shape
if num_joint <= num_human or True:
human_ind = np.argmin(matrix, axis=0)
joint_ind = np.arange(num_joint)
else:
pass
return human_ind.tolist(), joint_ind.tolist()
| 7,289 | 30.695652 | 122 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/online_demo.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import WebcamLoader, DataWriter, crop_from_dets, Mscoco
from yolo.darknet import Darknet
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
from SPPE.src.utils.img import im_to_torch
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
import cv2
from pPose_nms import write_json
args = opt
args.dataset = 'coco'
def loop():
n = 0
while True:
yield n
n += 1
if __name__ == "__main__":
webcam = args.webcam
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
# Load input video
fvs = WebcamLoader(webcam).start()
(fourcc, fps, frameSize) = fvs.videoinfo()
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_webcam' + webcam + '.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
# Load YOLO model
print('Loading YOLO model..')
sys.stdout.flush()
det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
det_model.load_weights('models/yolo/yolov3-spp.weights')
det_model.net_info['height'] = args.inp_dim
det_inp_dim = int(det_model.net_info['height'])
assert det_inp_dim % 32 == 0
assert det_inp_dim > 32
det_model.cuda()
det_model.eval()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'ld': [],
'dt': [],
'dn': [],
'pt': [],
'pn': []
}
print('Starting webcam demo, press Ctrl + C to terminate...')
sys.stdout.flush()
im_names_desc = tqdm(loop())
for i in im_names_desc:
try:
start_time = getTime()
(img, orig_img, inp, im_dim_list) = fvs.read()
ckpt_time, load_time = getTime(start_time)
runtime_profile['ld'].append(load_time)
with torch.no_grad():
# Human Detection
img = Variable(img).cuda()
im_dim_list = im_dim_list.cuda()
prediction = det_model(img, CUDA=True)
ckpt_time, det_time = getTime(ckpt_time)
runtime_profile['dt'].append(det_time)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
writer.save(None, None, None, None, None, orig_img, im_name=str(i) + '.jpg')
continue
im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
scaling_factor = torch.min(det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
ckpt_time, detNMS_time = getTime(ckpt_time)
runtime_profile['dn'].append(detNMS_time)
# Pose Estimation
inps = torch.zeros(boxes.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes.size(0), 2)
pt2 = torch.zeros(boxes.size(0), 2)
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
inps = Variable(inps.cuda())
hm = pose_model(inps)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
writer.save(boxes, scores, hm.cpu(), pt1, pt2, orig_img, im_name=str(i) + '.jpg')
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
# TQDM
im_names_desc.set_description(
'load time: {ld:.4f} | det time: {dt:.4f} | det NMS: {dn:.4f} | pose time: {pt:.4f} | post process: {pn:.4f}'.format(
ld=np.mean(runtime_profile['ld']), dt=np.mean(runtime_profile['dt']), dn=np.mean(runtime_profile['dn']),
pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
except KeyboardInterrupt:
break
print(' ')
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while writer.running():
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
| 5,619 | 35.732026 | 144 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/video_demo.py | import os
from SPPE.src.main_fast_inference import *
from dataloader import ImageLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from fn import getTime
from opt import opt
from pPose_nms import write_json
from tqdm import tqdm
def main(args):
inputpath = args.inputpath
inputlist = args.inputlist
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if len(inputlist):
im_names = open(inputlist, 'r').readlines()
elif len(inputpath) and inputpath != '/':
for root, dirs, files in os.walk(inputpath):
im_names = files
else:
raise IOError('Error: must contain either --indir/--list')
# Load input images
data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Init data writer
writer = DataWriter(args.save_video).start()
data_len = data_loader.length()
im_names_desc = tqdm(range(data_len))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu()
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while (writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
if __name__ == "__main__":
args = opt
args.dataset = 'coco'
args.sp = True
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
video_name = 'kunkun'
args.inputpath = f'data/split_{video_name}'
args.outputpath = f'data/alphapose_{video_name}'
args.save_img = True
main(args)
| 4,055 | 32.520661 | 144 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/visualization_copy.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation, writers
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from tqdm import tqdm
# from common.utils import read_video
import sys
def read_video(filename, fps=None, skip=0, limit=-1):
stream = cv2.VideoCapture(filename)
i = 0
while True:
grabbed, frame = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
print('===========================> This video get ' + str(i) + ' frames in total.')
sys.stdout.flush()
break
i += 1
if i > skip:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
yield np.array(frame)
if i == limit:
break
#### extra
class Skeleton:
def parents(self):
# return np.array([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15])
return np.array([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 8, 10, 11, 8, 13, 14])
# connected son: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13,14, 15, 16 #### for 16 joint.
def joints_right(self):
# return [1, 2, 3, 9, 10]
return [1, 2, 3, 13, 14, 15]
import torch
def qrot(q, v):
"""
Rotate vector(s) v about the rotation described by 四元数quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
qvec = q[..., 1:]
uv = torch.cross(qvec, v, dim=len(q.shape) - 1)
uuv = torch.cross(qvec, uv, dim=len(q.shape) - 1)
return (v + 2 * (q[..., :1] * uv + uuv))
def camera_to_world_bynumpy(X, R, t):
tmp1 = torch.from_numpy(np.tile(R, (*X.shape[:-1], 1)).astype('float32'))
tmp2 = torch.from_numpy(X.astype('float32'))
# tmp3 = torch.from_numpy(t.astype('float32'))
# return wrap(qrot, np.tile(R, (*X.shape[:-1], 1)), X) + t
out = qrot(tmp1, tmp2) + t
return out.numpy()
####
def ckpt_time(ckpt=None, display=0, desc=''):
if not ckpt:
return time.time()
else:
if display:
print(desc + ' consume time {:0.4f}'.format(time.time() - float(ckpt)))
return time.time() - float(ckpt), time.time()
def set_equal_aspect(ax, data):
"""
Create white cubic bounding box to make sure that 3d axis is in equal aspect.
:param ax: 3D axis
:param data: shape of(frames, 3), generated from BVH using convert_bvh2dataset.py
"""
X, Y, Z = data[..., 0], data[..., 1], data[..., 2]
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
def downsample_tensor(X, factor):
length = X.shape[0] // factor * factor
return np.mean(X[:length].reshape(-1, factor, *X.shape[1:]), axis=1)
def render_animation(keypoints, poses, skeleton, fps, bitrate, azim, output, viewport,
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0):
"""
TODO
Render an animation. The supported output modes are:
-- 'interactive': display an interactive figure
(also works on notebooks if associated with %matplotlib inline)
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
"""
plt.ioff()
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
ax_in.set_axis_off()
ax_in.set_title('Input')
# prevent wired error
_ = Axes3D.__class__.__name__
ax_3d = []
lines_3d = []
trajectories = []
radius = 1.7
for index, (title, data) in enumerate(poses.items()):
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
ax.view_init(elev=15., azim=azim)
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
# ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 12.5
ax.set_title(title) # , pad=35
for line in ax.xaxis.get_ticklines():
line.set_visible(False)
for line in ax.yaxis.get_ticklines():
line.set_visible(False)
for line in ax.zaxis.get_ticklines():
line.set_visible(False)
ax_3d.append(ax)
lines_3d.append([])
trajectories.append(data[:, 0, [0, 1]])
poses = list(poses.values())
# Decode video
if input_video_path is None:
# Black background
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
else:
# Load video using ffmpeg
all_frames = []
for f in read_video(input_video_path, fps=None, skip=input_video_skip):
all_frames.append(f)
effective_length = min(keypoints.shape[0], len(all_frames))
all_frames = all_frames[:effective_length]
if downsample > 1:
keypoints = downsample_tensor(keypoints, downsample)
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
for idx in range(len(poses)):
poses[idx] = downsample_tensor(poses[idx], downsample)
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
fps /= downsample
initialized = False
image = None
lines = []
points = None
if limit < 1:
limit = len(all_frames)
else:
limit = min(limit, len(all_frames))
parents = skeleton.parents()
pbar = tqdm(total=limit)
def update_video(i):
nonlocal initialized, image, lines, points
for n, ax in enumerate(ax_3d):
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
# Update 2D poses
if not initialized:
image = ax_in.imshow(all_frames[i], aspect='equal')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
if len(parents) == keypoints.shape[1]: # and 1 == 2:
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color='pink'))
col = 'red' if j in skeleton.joints_right() else 'black'
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=col))
points = ax_in.scatter(*keypoints[i].T, 5, color='red', edgecolors='white', zorder=10)
initialized = True
else:
image.set_data(all_frames[i])
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
if len(parents) == keypoints.shape[1]: # and 1 == 2:
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
points.set_offsets(keypoints[i])
pbar.update()
fig.tight_layout()
anim = FuncAnimation(fig, update_video, frames=limit, interval=1000.0 / fps, repeat=False)
if output.endswith('.mp4'):
Writer = writers['ffmpeg']
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
anim.save(output, writer=writer)
elif output.endswith('.gif'):
anim.save(output, dpi=60, writer='imagemagick')
else:
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
pbar.close()
plt.close()
def render_animation_test(keypoints, poses, skeleton, fps, bitrate, azim, output, viewport, limit=-1, downsample=1, size=6, input_video_frame=None,
input_video_skip=0, num=None):
t0 = ckpt_time()
fig = plt.figure(figsize=(12, 6))
canvas = FigureCanvas(fig)
fig.add_subplot(121)
plt.imshow(input_video_frame)
# 3D
ax = fig.add_subplot(122, projection='3d')
ax.view_init(elev=15., azim=azim)
# set 长度范围
radius = 1.7
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
ax.set_aspect('equal')
# 坐标轴刻度
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 7.5
# lxy add
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
# array([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15])
parents = skeleton.parents()
pos = poses['Reconstruction'][-1]
_, t1 = ckpt_time(t0, desc='1 ')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
if len(parents) == keypoints.shape[1]:
color_pink = 'pink'
if j == 1 or j == 2:
color_pink = 'black'
col = 'red' if j in skeleton.joints_right() else 'black'
# 画图3D
ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=col)
# plt.savefig('test/3Dimage_{}.png'.format(1000+num))
width, height = fig.get_size_inches() * fig.get_dpi()
_, t2 = ckpt_time(t1, desc='2 ')
canvas.draw() # draw the canvas, cache the renderer
image = np.fromstring(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)
cv2.imshow('im', image)
cv2.waitKey(5)
_, t3 = ckpt_time(t2, desc='3 ')
return image
def render_animation_double(keypoints, poses, skeleton, fps, bitrate, azim, output, viewport,
poses_gt=None, limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0):
"""
1107: 添加GT的火柴人.
poses_gt: 和poses一样的一个dict, 用来覆盖.
TODO
Render an animation. The supported output modes are:
-- 'interactive': display an interactive figure
(also works on notebooks if associated with %matplotlib inline)
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
"""
plt.ioff()
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
ax_in.set_axis_off()
ax_in.set_title('Input')
# prevent wired error
_ = Axes3D.__class__.__name__
ax_3d = []
lines_3d = []
lines_3d_gt = []
trajectories = []
radius = 1.7
for index, (title, data) in enumerate(poses.items()):
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
ax.view_init(elev=15., azim=azim)
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
# ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 12.5
ax.set_title(title) # , pad=35
for line in ax.xaxis.get_ticklines():
line.set_visible(False)
for line in ax.yaxis.get_ticklines():
line.set_visible(False)
for line in ax.zaxis.get_ticklines():
line.set_visible(False)
ax_3d.append(ax)
lines_3d.append([])
lines_3d_gt.append([])
trajectories.append(data[:, 0, [0, 1]])
poses = list(poses.values())
poses_gt = list(poses_gt.values()) #1107
# Decode video
if input_video_path is None:
# Black background
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
else:
# Load video using ffmpeg
all_frames = []
for f in read_video(input_video_path, fps=None, skip=input_video_skip):
all_frames.append(f)
effective_length = min(keypoints.shape[0], len(all_frames))
all_frames = all_frames[:effective_length]
if downsample > 1:
keypoints = downsample_tensor(keypoints, downsample)
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
for idx in range(len(poses)):
poses[idx] = downsample_tensor(poses[idx], downsample)
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
fps /= downsample
initialized = False
image = None
lines = []
points = None
if limit < 1:
limit = len(all_frames)
else:
limit = min(limit, len(all_frames))
parents = skeleton.parents()
pbar = tqdm(total=limit)
def update_video(i):
nonlocal initialized, image, lines, points
for n, ax in enumerate(ax_3d):
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
# Update 2D poses
if not initialized:
image = ax_in.imshow(all_frames[i], aspect='equal')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
if len(parents) == keypoints.shape[1]: # and 1 == 2:
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color='pink'))
# col = 'red' if j in skeleton.joints_right() else 'black'
col = 'red' if j in skeleton.joints_right() else 'red'
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=col))
# for gt
col = 'green' if j in skeleton.joints_right() else 'green'
for n, ax in enumerate(ax_3d):
pos_gt = poses_gt[n][i]
lines_3d_gt[n].append(ax.plot([pos_gt[j, 0], pos_gt[j_parent, 0]],
[pos_gt[j, 1], pos_gt[j_parent, 1]],
[pos_gt[j, 2], pos_gt[j_parent, 2]], zdir='z', c=col))
points = ax_in.scatter(*keypoints[i].T, 5, color='red', edgecolors='white', zorder=10)
initialized = True
else:
image.set_data(all_frames[i])
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
if len(parents) == keypoints.shape[1]: # and 1 == 2:
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
pos_gt = poses_gt[n][i]
lines_3d_gt[n][j - 1][0].set_xdata([pos_gt[j, 0], pos_gt[j_parent, 0]])
lines_3d_gt[n][j - 1][0].set_ydata([pos_gt[j, 1], pos_gt[j_parent, 1]])
lines_3d_gt[n][j - 1][0].set_3d_properties([pos_gt[j, 2], pos_gt[j_parent, 2]], zdir='z')
points.set_offsets(keypoints[i])
pbar.update()
fig.tight_layout()
anim = FuncAnimation(fig, update_video, frames=limit, interval=1000.0 / fps, repeat=False)
if output.endswith('.mp4'):
Writer = writers['ffmpeg']
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
anim.save(output, writer=writer)
elif output.endswith('.gif'):
anim.save(output, dpi=60, writer='imagemagick')
else:
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
pbar.close()
plt.close()
| 18,486 | 36.883197 | 147 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/fn.py | import collections
import math
import re
import time
import cv2
import numpy as np
import torch
from torch._six import string_classes, int_classes
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
_use_shared_memory = True
def collate_fn(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: collate_fn([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_fn(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def collate_fn_list(batch):
img, inp, im_name = zip(*batch)
img = collate_fn(img)
im_name = collate_fn(im_name)
return img, inp, im_name
def vis_frame_fast(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), # Nose, LEye, REye, LEar, REar
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77),
# LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127),
(0, 255, 255)] # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
(77, 222, 255), (255, 156, 127),
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5, :] + kp_preds[6, :]) / 2, 0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5, :] + kp_scores[6, :]) / 2, 0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy, line_color[i], 2 * (kp_scores[start_p] + kp_scores[end_p]) + 1)
return img
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), # Nose, LEye, REye, LEar, REar
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77),
# LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127),
(0, 255, 255)] # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
(77, 222, 255), (255, 156, 127),
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
height, width = img.shape[:2]
img = cv2.resize(img, (int(width / 2), int(height / 2)))
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5, :] + kp_preds[6, :]) / 2, 0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5, :] + kp_scores[6, :]) / 2, 0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (int(cor_x / 2), int(cor_y / 2))
bg = img.copy()
cv2.circle(bg, (int(cor_x / 2), int(cor_y / 2)), 2, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
# transparency = max(0, min(1, kp_scores[n]))
transparency = float(max(0, min(1, kp_scores[n])))
img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
# Draw proposal score on the head
middle_eye = (kp_preds[1] + kp_preds[2]) / 4
middle_cor = int(middle_eye[0]) - 10, int(middle_eye[1]) - 12
cv2.putText(img, f"{human['proposal_score'].item():.2f}", middle_cor, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255))
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
bg = img.copy()
X = (start_xy[0], end_xy[0])
Y = (start_xy[1], end_xy[1])
mX = np.mean(X)
mY = np.mean(Y)
length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
stickwidth = (kp_scores[start_p] + kp_scores[end_p]) + 1
polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), int(stickwidth)), int(angle), 0, 360, 1)
cv2.fillConvexPoly(bg, polygon, line_color[i])
# cv2.line(bg, start_xy, end_xy, line_color[i], (2 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
transparency = max(0, min(1, 0.5 * (kp_scores[start_p] + kp_scores[end_p])))
transparency = float(transparency)
img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
return img
def getTime(time1=0):
if not time1:
return time.time()
else:
interval = time.time() - time1
return time.time(), interval
| 9,863 | 40.79661 | 122 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/dataloader.py | import os
import sys
import time
from multiprocessing import Queue as pQueue
from threading import Thread
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
from torch.autograd import Variable
from SPPE.src.utils.eval import getPrediction, getMultiPeakPrediction
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from matching import candidate_reselect as matching
from opt import opt
from pPose_nms import pose_nms
from yolo.darknet import Darknet
from yolo.preprocess import prep_image, prep_frame
from yolo.util import dynamic_write_results
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class Image_loader(data.Dataset):
def __init__(self, im_names, format='yolo'):
super(Image_loader, self).__init__()
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
def getitem_ssd(self, index):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
return im, inp, im_name
def getitem_yolo(self, index):
inp_dim = int(opt.inp_dim)
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im, orig_img, im_dim = prep_image(im_name, inp_dim)
# im_dim = torch.FloatTensor([im_dim]).repeat(1, 2)
inp = load_image(im_name)
return im, inp, orig_img, im_name, im_dim
def __getitem__(self, index):
if self.format == 'ssd':
return self.getitem_ssd(index)
elif self.format == 'yolo':
return self.getitem_yolo(index)
else:
raise NotImplementedError
def __len__(self):
return len(self.imglist)
class ImageLoader:
def __init__(self, im_names, batchSize=1, format='yolo', queueSize=50):
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
self.batchSize = batchSize
self.datalen = len(self.imglist)
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if self.format == 'ssd':
if opt.sp:
p = Thread(target=self.getitem_ssd, args=())
else:
p = mp.Process(target=self.getitem_ssd, args=())
elif self.format == 'yolo':
if opt.sp:
p = Thread(target=self.getitem_yolo, args=())
else:
p = mp.Process(target=self.getitem_yolo, args=())
else:
raise NotImplementedError
p.daemon = True
p.start()
return self
def getitem_ssd(self):
length = len(self.imglist)
for index in range(length):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
while self.Q.full():
time.sleep(2)
self.Q.put((im, inp, im_name))
def getitem_yolo(self):
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
im_name_k = os.path.join(self.img_dir, im_name_k)
img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def getitem(self):
return self.Q.get()
def length(self):
return len(self.imglist)
def len(self):
return self.Q.qsize()
class VideoLoader:
def __init__(self, path, batchSize=1, queueSize=50):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.path = path
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def length(self):
return self.datalen
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.Q.put((None, None, None, None))
print('===========================> This video get ' + str(k) + ' frames in total.')
sys.stdout.flush()
return
# process and add the frame to the queue
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(k) + '.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc, fps, frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
return self.Q.qsize()
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("joints_detectors/Alphapose/yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('joints_detectors/Alphapose/models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
self.datalen = self.dataloder.length()
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=(), daemon=True)
# p = mp.Process(target=self.update, args=())
# p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.num_batches):
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
if img is None:
self.Q.put((None, None, None, None, None, None, None))
return
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:, 0] == k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:, 0] == k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
self.datalen = self.detectionLoader.datalen
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = pQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
# t = Thread(target=self.update, args=(), daemon=True)
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=(), daemon=True)
# p = mp.Process(target=self.update, args=())
# p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
if orig_img is None:
self.Q.put((None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class VideoDetectionLoader:
def __init__(self, path, batchSize=4, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
def length(self):
return self.datalen
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole video
for i in range(self.num_batches):
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], boxes[dets[:, 0] == k], scores[dets[:, 0] == k]))
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc, fps, frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class WebcamLoader:
def __init__(self, webcam, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img, orig_img, dim = prep_frame(frame, inp_dim)
inp = im_to_torch(orig_img)
im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
self.Q.put((img, orig_img, inp, im_dim_list))
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc, fps, frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640, 480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=(), daemon=True)
# t = Thread(target=self.update, args=())
# t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
if opt.matching:
preds = getMultiPeakPrediction(
hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = matching(boxes, scores.numpy(), preds)
else:
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(
boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
try:
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
except IndexError:
print(tmp_img.shape)
print(upLeft)
print(bottomRight)
print('===')
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
| 28,847 | 35.842912 | 124 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/webcam_demo.py | from opt import opt
import os
import numpy as np
import cv2
from tqdm import tqdm
from SPPE.src.main_fast_inference import *
from dataloader_webcam import WebcamLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from fn import getTime
from opt import opt
from pPose_nms import write_json
args = opt
args.dataset = 'coco'
def loop():
n = 0
while True:
yield n
n += 1
if __name__ == "__main__":
webcam = args.webcam
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
# Load input video
data_loader = WebcamLoader(webcam).start()
(fourcc, fps, frameSize) = data_loader.videoinfo()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_webcam' + webcam + '.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
print('Starting webcam demo, press Ctrl + C to terminate...')
sys.stdout.flush()
im_names_desc = tqdm(loop())
batchSize = args.posebatch
for i in im_names_desc:
try:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
except KeyboardInterrupt:
break
print(' ')
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while (writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
| 3,903 | 32.655172 | 144 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/opt.py | import argparse
parser = argparse.ArgumentParser(description='PyTorch AlphaPose Training')
"----------------------------- General options -----------------------------"
parser.add_argument('--expID', default='default', type=str,
help='Experiment ID')
parser.add_argument('--dataset', default='coco', type=str,
help='Dataset choice: mpii | coco')
parser.add_argument('--nThreads', default=30, type=int,
help='Number of data loading threads')
parser.add_argument('--debug', default=False, type=bool,
help='Print the debug information')
parser.add_argument('--snapshot', default=1, type=int,
help='How often to take a snapshot of the model (0 = never)')
"----------------------------- AlphaPose options -----------------------------"
parser.add_argument('--addDPG', default=False, type=bool,
help='Train with data augmentation')
parser.add_argument('--sp', default=False, action='store_true',
help='Use single process for pytorch')
parser.add_argument('--profile', default=False, action='store_true',
help='add speed profiling at screen output')
"----------------------------- Model options -----------------------------"
parser.add_argument('--netType', default='hgPRM', type=str,
help='Options: hgPRM | resnext')
parser.add_argument('--loadModel', default=None, type=str,
help='Provide full path to a previously trained model')
parser.add_argument('--Continue', default=False, type=bool,
help='Pick up where an experiment left off')
parser.add_argument('--nFeats', default=256, type=int,
help='Number of features in the hourglass')
parser.add_argument('--nClasses', default=33, type=int,
help='Number of output channel')
parser.add_argument('--nStack', default=4, type=int,
help='Number of hourglasses to stack')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--fast_inference', default=True, type=bool,
help='Fast inference')
parser.add_argument('--use_pyranet', default=True, type=bool,
help='use pyranet')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--LR', default=2.5e-4, type=float,
help='Learning rate')
parser.add_argument('--momentum', default=0, type=float,
help='Momentum')
parser.add_argument('--weightDecay', default=0, type=float,
help='Weight decay')
parser.add_argument('--crit', default='MSE', type=str,
help='Criterion type')
parser.add_argument('--optMethod', default='rmsprop', type=str,
help='Optimization method: rmsprop | sgd | nag | adadelta')
"----------------------------- Training options -----------------------------"
parser.add_argument('--nEpochs', default=50, type=int,
help='Number of hourglasses to stack')
parser.add_argument('--epoch', default=0, type=int,
help='Current epoch')
parser.add_argument('--trainBatch', default=40, type=int,
help='Train-batch size')
parser.add_argument('--validBatch', default=20, type=int,
help='Valid-batch size')
parser.add_argument('--trainIters', default=0, type=int,
help='Total train iters')
parser.add_argument('--valIters', default=0, type=int,
help='Total valid iters')
parser.add_argument('--init', default=None, type=str,
help='Initialization')
"----------------------------- Data options -----------------------------"
parser.add_argument('--inputResH', default=320, type=int,
help='Input image height')
parser.add_argument('--inputResW', default=256, type=int,
help='Input image width')
parser.add_argument('--outputResH', default=80, type=int,
help='Output heatmap height')
parser.add_argument('--outputResW', default=64, type=int,
help='Output heatmap width')
parser.add_argument('--scale', default=0.25, type=float,
help='Degree of scale augmentation')
parser.add_argument('--rotate', default=30, type=float,
help='Degree of rotation augmentation')
parser.add_argument('--hmGauss', default=1, type=int,
help='Heatmap gaussian size')
"----------------------------- PyraNet options -----------------------------"
parser.add_argument('--baseWidth', default=9, type=int,
help='Heatmap gaussian size')
parser.add_argument('--cardinality', default=5, type=int,
help='Heatmap gaussian size')
parser.add_argument('--nResidual', default=1, type=int,
help='Number of residual modules at each location in the pyranet')
"----------------------------- Distribution options -----------------------------"
parser.add_argument('--dist', dest='dist', type=int, default=1,
help='distributed training or not')
parser.add_argument('--backend', dest='backend', type=str, default='gloo',
help='backend for distributed training')
parser.add_argument('--port', dest='port',
help='port of server')
"----------------------------- Detection options -----------------------------"
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
default='res152')
parser.add_argument('--indir', dest='inputpath',
help='image-directory', default="")
parser.add_argument('--list', dest='inputlist',
help='image-list', default="")
parser.add_argument('--mode', dest='mode',
help='detection mode, fast/normal/accurate', default="normal")
parser.add_argument('--outdir', dest='outputpath',
help='output-directory', default="examples/res/")
parser.add_argument('--inp_dim', dest='inp_dim', type=str, default='608',
help='inpdim')
parser.add_argument('--conf', dest='confidence', type=float, default=0.05,
help='bounding box confidence threshold')
parser.add_argument('--nms', dest='nms_thesh', type=float, default=0.6,
help='bounding box nms threshold')
parser.add_argument('--save_img', default=False, action='store_true',
help='save result as image')
parser.add_argument('--vis', default=False, action='store_true',
help='visualize image')
parser.add_argument('--matching', default=False, action='store_true',
help='use best matching')
parser.add_argument('--format', type=str,
help='save in the format of cmu or coco or openpose, option: coco/cmu/open')
parser.add_argument('--detbatch', type=int, default=1,
help='detection batch size')
parser.add_argument('--posebatch', type=int, default=80,
help='pose estimation maximum batch size')
"----------------------------- Video options -----------------------------"
parser.add_argument('--video', dest='video',
help='video-name', default="")
parser.add_argument('--webcam', dest='webcam', type=str,
help='webcam number', default='0')
parser.add_argument('--save_video', dest='save_video',
help='whether to save rendered video', default=False, action='store_true')
parser.add_argument('--vis_fast', dest='vis_fast',
help='use fast rendering', action='store_true', default=False)
opt = parser.parse_args()
opt.num_classes = 80
| 7,755 | 51.761905 | 96 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/pPose_nms.py | # -*- coding: utf-8 -*-
import torch
import json
import os
import zipfile
import time
from multiprocessing.dummy import Pool as ThreadPool
import numpy as np
from opt import opt
''' Constant Configuration '''
delta1 = 1
mu = 1.7
delta2 = 2.65
gamma = 22.48
scoreThreds = 0.3
matchThreds = 5
areaThres = 0#40 * 40.5
alpha = 0.1
#pool = ThreadPool(4)
def pose_nms(bboxes, bbox_scores, pose_preds, pose_scores):
'''
Parametric Pose NMS algorithm
bboxes: bbox locations list (n, 4)
bbox_scores: bbox scores list (n,)
pose_preds: pose locations list (n, 17, 2)
pose_scores: pose scores list (n, 17, 1)
'''
#global ori_pose_preds, ori_pose_scores, ref_dists
pose_scores[pose_scores == 0] = 1e-5
final_result = []
ori_bbox_scores = bbox_scores.clone()
ori_pose_preds = pose_preds.clone()
ori_pose_scores = pose_scores.clone()
xmax = bboxes[:, 2]
xmin = bboxes[:, 0]
ymax = bboxes[:, 3]
ymin = bboxes[:, 1]
widths = xmax - xmin
heights = ymax - ymin
ref_dists = alpha * np.maximum(widths, heights)
nsamples = bboxes.shape[0]
human_scores = pose_scores.mean(dim=1)
human_ids = np.arange(nsamples)
# Do pPose-NMS
pick = []
merge_ids = []
while(human_scores.shape[0] != 0):
# Pick the one with highest score
pick_id = torch.argmax(human_scores)
pick.append(human_ids[pick_id])
# num_visPart = torch.sum(pose_scores[pick_id] > 0.2)
# Get numbers of match keypoints by calling PCK_match
ref_dist = ref_dists[human_ids[pick_id]]
simi = get_parametric_distance(pick_id, pose_preds, pose_scores, ref_dist)
num_match_keypoints = PCK_match(pose_preds[pick_id], pose_preds, ref_dist)
# Delete humans who have more than matchThreds keypoints overlap and high similarity
delete_ids = torch.from_numpy(np.arange(human_scores.shape[0]))[(simi > gamma) | (num_match_keypoints >= matchThreds)]
if delete_ids.shape[0] == 0:
delete_ids = pick_id
#else:
# delete_ids = torch.from_numpy(delete_ids)
merge_ids.append(human_ids[delete_ids])
pose_preds = np.delete(pose_preds, delete_ids, axis=0)
pose_scores = np.delete(pose_scores, delete_ids, axis=0)
human_ids = np.delete(human_ids, delete_ids)
human_scores = np.delete(human_scores, delete_ids, axis=0)
bbox_scores = np.delete(bbox_scores, delete_ids, axis=0)
assert len(merge_ids) == len(pick)
preds_pick = ori_pose_preds[pick]
scores_pick = ori_pose_scores[pick]
bbox_scores_pick = ori_bbox_scores[pick]
#final_result = pool.map(filter_result, zip(scores_pick, merge_ids, preds_pick, pick, bbox_scores_pick))
#final_result = [item for item in final_result if item is not None]
for j in range(len(pick)):
ids = np.arange(17)
max_score = torch.max(scores_pick[j, ids, 0])
if max_score < scoreThreds:
continue
# Merge poses
merge_id = merge_ids[j]
merge_pose, merge_score = p_merge_fast(
preds_pick[j], ori_pose_preds[merge_id], ori_pose_scores[merge_id], ref_dists[pick[j]])
max_score = torch.max(merge_score[ids])
if max_score < scoreThreds:
continue
xmax = max(merge_pose[:, 0])
xmin = min(merge_pose[:, 0])
ymax = max(merge_pose[:, 1])
ymin = min(merge_pose[:, 1])
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < areaThres):
continue
final_result.append({
'keypoints': merge_pose - 0.3,
'kp_score': merge_score,
'proposal_score': torch.mean(merge_score) + bbox_scores_pick[j] + 1.25 * max(merge_score)
})
return final_result
def filter_result(args):
score_pick, merge_id, pred_pick, pick, bbox_score_pick = args
global ori_pose_preds, ori_pose_scores, ref_dists
ids = np.arange(17)
max_score = torch.max(score_pick[ids, 0])
if max_score < scoreThreds:
return None
# Merge poses
merge_pose, merge_score = p_merge_fast(
pred_pick, ori_pose_preds[merge_id], ori_pose_scores[merge_id], ref_dists[pick])
max_score = torch.max(merge_score[ids])
if max_score < scoreThreds:
return None
xmax = max(merge_pose[:, 0])
xmin = min(merge_pose[:, 0])
ymax = max(merge_pose[:, 1])
ymin = min(merge_pose[:, 1])
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < 40 * 40.5):
return None
return {
'keypoints': merge_pose - 0.3,
'kp_score': merge_score,
'proposal_score': torch.mean(merge_score) + bbox_score_pick + 1.25 * max(merge_score)
}
def p_merge(ref_pose, cluster_preds, cluster_scores, ref_dist):
'''
Score-weighted pose merging
INPUT:
ref_pose: reference pose -- [17, 2]
cluster_preds: redundant poses -- [n, 17, 2]
cluster_scores: redundant poses score -- [n, 17, 1]
ref_dist: reference scale -- Constant
OUTPUT:
final_pose: merged pose -- [17, 2]
final_score: merged score -- [17]
'''
dist = torch.sqrt(torch.sum(
torch.pow(ref_pose[np.newaxis, :] - cluster_preds, 2),
dim=2
)) # [n, 17]
kp_num = 17
ref_dist = min(ref_dist, 15)
mask = (dist <= ref_dist)
final_pose = torch.zeros(kp_num, 2)
final_score = torch.zeros(kp_num)
if cluster_preds.dim() == 2:
cluster_preds.unsqueeze_(0)
cluster_scores.unsqueeze_(0)
if mask.dim() == 1:
mask.unsqueeze_(0)
for i in range(kp_num):
cluster_joint_scores = cluster_scores[:, i][mask[:, i]] # [k, 1]
cluster_joint_location = cluster_preds[:, i, :][mask[:, i].unsqueeze(
-1).repeat(1, 2)].view((torch.sum(mask[:, i]), -1))
# Get an normalized score
normed_scores = cluster_joint_scores / torch.sum(cluster_joint_scores)
# Merge poses by a weighted sum
final_pose[i, 0] = torch.dot(cluster_joint_location[:, 0], normed_scores.squeeze(-1))
final_pose[i, 1] = torch.dot(cluster_joint_location[:, 1], normed_scores.squeeze(-1))
final_score[i] = torch.dot(cluster_joint_scores.transpose(0, 1).squeeze(0), normed_scores.squeeze(-1))
return final_pose, final_score
def p_merge_fast(ref_pose, cluster_preds, cluster_scores, ref_dist):
'''
Score-weighted pose merging
INPUT:
ref_pose: reference pose -- [17, 2]
cluster_preds: redundant poses -- [n, 17, 2]
cluster_scores: redundant poses score -- [n, 17, 1]
ref_dist: reference scale -- Constant
OUTPUT:
final_pose: merged pose -- [17, 2]
final_score: merged score -- [17]
'''
dist = torch.sqrt(torch.sum(
torch.pow(ref_pose[np.newaxis, :] - cluster_preds, 2),
dim=2
))
kp_num = 17
ref_dist = min(ref_dist, 15)
mask = (dist <= ref_dist)
final_pose = torch.zeros(kp_num, 2)
final_score = torch.zeros(kp_num)
if cluster_preds.dim() == 2:
cluster_preds.unsqueeze_(0)
cluster_scores.unsqueeze_(0)
if mask.dim() == 1:
mask.unsqueeze_(0)
# Weighted Merge
masked_scores = cluster_scores.mul(mask.float().unsqueeze(-1))
normed_scores = masked_scores / torch.sum(masked_scores, dim=0)
final_pose = torch.mul(cluster_preds, normed_scores.repeat(1, 1, 2)).sum(dim=0)
final_score = torch.mul(masked_scores, normed_scores).sum(dim=0)
return final_pose, final_score
def get_parametric_distance(i, all_preds, keypoint_scores, ref_dist):
pick_preds = all_preds[i]
pred_scores = keypoint_scores[i]
dist = torch.sqrt(torch.sum(
torch.pow(pick_preds[np.newaxis, :] - all_preds, 2),
dim=2
))
mask = (dist <= 1)
# Define a keypoints distance
score_dists = torch.zeros(all_preds.shape[0], 17)
keypoint_scores.squeeze_()
if keypoint_scores.dim() == 1:
keypoint_scores.unsqueeze_(0)
if pred_scores.dim() == 1:
pred_scores.unsqueeze_(1)
# The predicted scores are repeated up to do broadcast
pred_scores = pred_scores.repeat(1, all_preds.shape[0]).transpose(0, 1)
score_dists[mask] = torch.tanh(pred_scores[mask] / delta1) * torch.tanh(keypoint_scores[mask] / delta1)
point_dist = torch.exp((-1) * dist / delta2)
final_dist = torch.sum(score_dists, dim=1) + mu * torch.sum(point_dist, dim=1)
return final_dist
def PCK_match(pick_pred, all_preds, ref_dist):
dist = torch.sqrt(torch.sum(
torch.pow(pick_pred[np.newaxis, :] - all_preds, 2),
dim=2
))
ref_dist = min(ref_dist, 7)
num_match_keypoints = torch.sum(
dist / ref_dist <= 1,
dim=1
)
return num_match_keypoints
def write_json(all_results, outputpath, for_eval=False):
'''
all_result: result dict of predictions
outputpath: output directory
'''
form = opt.format
json_results = []
json_results_cmu = {}
for im_res in all_results:
im_name = im_res['imgname']
for human in im_res['result']:
keypoints = []
result = {}
if for_eval:
result['image_id'] = int(im_name.split('/')[-1].split('.')[0].split('_')[-1])
else:
result['image_id'] = im_name.split('/')[-1]
result['category_id'] = 1
kp_preds = human['keypoints']
kp_scores = human['kp_score']
pro_scores = human['proposal_score']
for n in range(kp_scores.shape[0]):
keypoints.append(float(kp_preds[n, 0]))
keypoints.append(float(kp_preds[n, 1]))
keypoints.append(float(kp_scores[n]))
result['keypoints'] = keypoints
result['score'] = float(pro_scores)
if form == 'cmu': # the form of CMU-Pose
if result['image_id'] not in json_results_cmu.keys():
json_results_cmu[result['image_id']]={}
json_results_cmu[result['image_id']]['version']="AlphaPose v0.2"
json_results_cmu[result['image_id']]['bodies']=[]
tmp={'joints':[]}
result['keypoints'].append((result['keypoints'][15]+result['keypoints'][18])/2)
result['keypoints'].append((result['keypoints'][16]+result['keypoints'][19])/2)
result['keypoints'].append((result['keypoints'][17]+result['keypoints'][20])/2)
indexarr=[0,51,18,24,30,15,21,27,36,42,48,33,39,45,6,3,12,9]
for i in indexarr:
tmp['joints'].append(result['keypoints'][i])
tmp['joints'].append(result['keypoints'][i+1])
tmp['joints'].append(result['keypoints'][i+2])
json_results_cmu[result['image_id']]['bodies'].append(tmp)
elif form == 'open': # the form of OpenPose
if result['image_id'] not in json_results_cmu.keys():
json_results_cmu[result['image_id']]={}
json_results_cmu[result['image_id']]['version']="AlphaPose v0.2"
json_results_cmu[result['image_id']]['people']=[]
tmp={'pose_keypoints_2d':[]}
result['keypoints'].append((result['keypoints'][15]+result['keypoints'][18])/2)
result['keypoints'].append((result['keypoints'][16]+result['keypoints'][19])/2)
result['keypoints'].append((result['keypoints'][17]+result['keypoints'][20])/2)
indexarr=[0,51,18,24,30,15,21,27,36,42,48,33,39,45,6,3,12,9]
for i in indexarr:
tmp['pose_keypoints_2d'].append(result['keypoints'][i])
tmp['pose_keypoints_2d'].append(result['keypoints'][i+1])
tmp['pose_keypoints_2d'].append(result['keypoints'][i+2])
json_results_cmu[result['image_id']]['people'].append(tmp)
else:
json_results.append(result)
if form == 'cmu': # the form of CMU-Pose
with open(os.path.join(outputpath,'alphapose-results.json'), 'w') as json_file:
json_file.write(json.dumps(json_results_cmu))
if not os.path.exists(os.path.join(outputpath,'sep-json')):
os.mkdir(os.path.join(outputpath,'sep-json'))
for name in json_results_cmu.keys():
with open(os.path.join(outputpath,'sep-json',name.split('.')[0]+'.json'),'w') as json_file:
json_file.write(json.dumps(json_results_cmu[name]))
elif form == 'open': # the form of OpenPose
with open(os.path.join(outputpath,'alphapose-results.json'), 'w') as json_file:
json_file.write(json.dumps(json_results_cmu))
if not os.path.exists(os.path.join(outputpath,'sep-json')):
os.mkdir(os.path.join(outputpath,'sep-json'))
for name in json_results_cmu.keys():
with open(os.path.join(outputpath,'sep-json',name.split('.')[0]+'.json'),'w') as json_file:
json_file.write(json.dumps(json_results_cmu[name]))
else:
with open(os.path.join(outputpath,'alphapose-results.json'), 'w') as json_file:
json_file.write(json.dumps(json_results))
| 13,452 | 35.958791 | 126 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/detector_api.py | import ntpath
import os
import shutil
import numpy as np
import torch.utils.data
from tqdm import tqdm
from SPPE.src.main_fast_inference import *
from common.utils import calculate_area
from dataloader import DetectionLoader, DetectionProcessor, DataWriter, Mscoco, VideoLoader
from fn import getTime
from opt import opt
from pPose_nms import write_json
args = opt
args.vis_fast = False # True # add for speed
args.dataset = 'coco'
args.fast_inference = False
args.save_img = False # save time and space.
args.sp = True
############################
########## FAST ###########
args.fast_inference = True
# --conf: Confidence threshold for human detection.
# Lower the value can improve the final accuracy but decrease the speed. Default is 0.1.
args.conf = 0.1
# --nms: Confidence threshold for human detection.
# Increase the value can improve the final accuracy but decrease the speed. Default is 0.6.
args.nms = 0.6
# --inp_dim: The input size of detection network. The inp_dim should be multiple of 32. Default is 608.
args.inp_dim = 32 * 2 # default 608
############################
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
def model_load():
model = None
return model
def image_interface(model, image):
pass
def generate_kpts(video_file):
final_result, video_name = handle_video(video_file)
# ============ Changing ++++++++++
kpts = []
no_person = []
for i in range(len(final_result)):
if not final_result[i]['result']: # No people
no_person.append(i)
kpts.append(None)
continue
kpt = max(final_result[i]['result'],
key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints']
kpts.append(kpt.data.numpy())
for n in no_person:
kpts[n] = kpts[-1]
no_person.clear()
for n in no_person:
kpts[n] = kpts[-1] if kpts[-1] else kpts[n-1]
# ============ Changing End ++++++++++
# name = f'{args.outputpath}/{video_name}_det2D.npz'
# npy_folder = os.path.abspath(os.path.join(video_file, os.pardir))
# name = video_file.replace('.mp4', '_det2D.npz').replace('source_video', 'det2D')
# mkd(name)
# kpts = np.array(kpts).astype(np.float32)
# print('kpts npz save in ', name)
# np.savez_compressed(name, kpts=kpts) # move to main file for both detectors
return kpts
def handle_video(video_file):
# =========== common ===============
args.video = video_file
base_name = os.path.basename(args.video)
video_name = base_name[:base_name.rfind('.')]
# =========== end common ===============
# =========== image ===============
# img_path = f'outputs/alpha_pose_{video_name}/split_image/'
# args.inputpath = img_path
# args.outputpath = f'outputs/alpha_pose_{video_name}'
# if os.path.exists(args.outputpath):
# shutil.rmtree(f'{args.outputpath}/vis', ignore_errors=True)
# else:
# os.mkdir(args.outputpath)
#
# # if not len(video_file):
# # raise IOError('Error: must contain --video')
#
# if len(img_path) and img_path != '/':
# for root, dirs, files in os.walk(img_path):
# im_names = sorted([f for f in files if 'png' in f or 'jpg' in f])
# else:
# raise IOError('Error: must contain either --indir/--list')
#
# # Load input images
# data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()
# print(f'Totally {data_loader.datalen} images')
# =========== end image ===============
# =========== video ===============
args.outputpath = f'outputs/alpha_pose_{video_name}'
if os.path.exists(args.outputpath):
shutil.rmtree(f'{args.outputpath}/vis', ignore_errors=True)
else:
os.mkdir(args.outputpath)
videofile = args.video
mode = args.mode
if not len(videofile):
raise IOError('Error: must contain --video')
# Load input video
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc, fps, frameSize) = data_loader.videoinfo()
print('the video is {} f/s'.format(fps))
print('the video frameSize: {}'.format(frameSize))
# =========== end video ===============
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
# start a thread to read frames from the file video stream
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_' + ntpath.basename(video_file).split('.')[0] + '.avi')
# writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
writer = DataWriter(args.save_video).start()
print('Start pose estimation...')
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
print(f'{i}-th image read None: handle_video')
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if datalen % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.4f} | pose time: {pt:.4f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while writer.running():
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
return final_result, video_name
def mkd(target_dir, get_parent=True):
# get parent path and create
if get_parent:
savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
else:
savedir = target_dir
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
if __name__ == "__main__":
os.chdir('../..')
print(os.getcwd())
# handle_video(img_path='outputs/image/kobe')
generate_kpts('outputs/dance.mp4')
| 8,005 | 34.114035 | 144 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/detector_api_realtime.py | import ntpath
import os
import shutil
import numpy as np
import torch.utils.data
from tqdm import tqdm
from SPPE.src.main_fast_inference import *
from common.utils import calculate_area
from dataloader import DetectionLoader, DetectionProcessor, DataWriter, Mscoco, VideoLoader
from fn import getTime
from opt import opt
from pPose_nms import write_json
##
from SPPE.src.utils.eval import getPrediction, getMultiPeakPrediction
from pPose_nms import pose_nms
from visualization_copy import render_animation, Skeleton, camera_to_world_bynumpy
##
args = opt
args.vis_fast = False # True # add for speed
args.dataset = 'coco'
args.fast_inference = False
args.save_img = False # save time and space.
args.sp = True
############################
########## FAST ###########
args.fast_inference = True
# --conf: Confidence threshold for human detection.
# Lower the value can improve the final accuracy but decrease the speed. Default is 0.1.
args.conf = 0.1
# --nms: Confidence threshold for human detection.
# Increase the value can improve the final accuracy but decrease the speed. Default is 0.6.
args.nms = 0.6
# --inp_dim: The input size of detection network. The inp_dim should be multiple of 32. Default is 608.
args.inp_dim = 32 * 2 # default 608
############################
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
# def model_load():
# model = None
# return model
#
#
# def image_interface(model, image):
# pass
# def generate_kpts(video_file):
# final_result, video_name = handle_video(video_file)
#
# # ============ Changing ++++++++++
#
# kpts = []
# no_person = []
# for i in range(len(final_result)):
# if not final_result[i]['result']: # No people
# no_person.append(i)
# kpts.append(None)
# continue
#
# kpt = max(final_result[i]['result'],
# key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints']
#
# kpts.append(kpt.data.numpy())
#
# for n in no_person:
# kpts[n] = kpts[-1]
# no_person.clear()
#
# for n in no_person:
# kpts[n] = kpts[-1] if kpts[-1] else kpts[n-1]
#
# # ============ Changing End ++++++++++
#
# # name = f'{args.outputpath}/{video_name}_det2D.npz'
# # npy_folder = os.path.abspath(os.path.join(video_file, os.pardir))
# # name = video_file.replace('.mp4', '_det2D.npz').replace('source_video', 'det2D')
# # mkd(name)
# # kpts = np.array(kpts).astype(np.float32)
# # print('kpts npz save in ', name)
# # np.savez_compressed(name, kpts=kpts) # move to main file for both detectors
#
# return kpts
# def handle_video(video_file):
# # =========== common ===============
# args.video = video_file
# base_name = os.path.basename(args.video)
# video_name = base_name[:base_name.rfind('.')]
# # =========== end common ===============
#
# # =========== video ===============
# args.outputpath = f'outputs/alpha_pose_{video_name}'
# if os.path.exists(args.outputpath):
# shutil.rmtree(f'{args.outputpath}/vis', ignore_errors=True)
# else:
# os.mkdir(args.outputpath)
# videofile = args.video
# mode = args.mode
# if not len(videofile):
# raise IOError('Error: must contain --video')
# # Load input video
# data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
# (fourcc, fps, frameSize) = data_loader.videoinfo()
# print('the video is {} f/s'.format(fps))
# print('the video frameSize: {}'.format(frameSize))
# # =========== end video ===============
# # Load detection loader
# print('Loading YOLO model..')
# sys.stdout.flush()
# det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
# # start a thread to read frames from the file video stream
# det_processor = DetectionProcessor(det_loader).start()
# # Load pose model
# pose_dataset = Mscoco()
# if args.fast_inference:
# pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
# else:
# pose_model = InferenNet(4 * 1 + 1, pose_dataset)
# pose_model.cuda()
# pose_model.eval()
# runtime_profile = {
# 'dt': [],
# 'pt': [],
# 'pn': []
# }
# # Data writer
# save_path = os.path.join(args.outputpath, 'AlphaPose_' + ntpath.basename(video_file).split('.')[0] + '.avi')
# # writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
# writer = DataWriter(args.save_video).start()
# print('Start pose estimation...')
# im_names_desc = tqdm(range(data_loader.length()))
# batchSize = args.posebatch
#
# final_result_kh = []
# for i in im_names_desc:
#
# start_time = getTime()
# with torch.no_grad():
# (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
# if orig_img is None:
# print(f'{i}-th image read None: handle_video')
# break
# if boxes is None or boxes.nelement() == 0:
# writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
# continue
#
# ckpt_time, det_time = getTime(start_time)
# runtime_profile['dt'].append(det_time)
# # Pose Estimation
#
# datalen = inps.size(0)
# leftover = 0
# if datalen % batchSize:
# leftover = 1
# num_batches = datalen // batchSize + leftover
# hm = []
# for j in range(num_batches):
# inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
# hm_j = pose_model(inps_j)
# hm.append(hm_j)
# hm = torch.cat(hm)
# ckpt_time, pose_time = getTime(ckpt_time)
# runtime_profile['pt'].append(pose_time)
#
# hm = hm.cpu().data
# writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
#
# ##>>>>>>>
# boxes, scores, hm_data, pt1, pt2, orig_img, im_name = boxes, scores, hm, pt1, pt2, orig_img, im_name
# preds_hm, preds_img, preds_scores = getPrediction(
# hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
# result = pose_nms(
# boxes, scores, preds_img, preds_scores)
# result = {
# 'imgname': im_name,
# 'result': result
# }
# final_result_kh.append(result)
# ##<<<<<<<<
#
# ckpt_time, post_time = getTime(ckpt_time)
# runtime_profile['pn'].append(post_time)
#
# if args.profile:
# # TQDM
# im_names_desc.set_description(
# 'det time: {dt:.4f} | pose time: {pt:.4f} | post processing: {pn:.4f}'.format(
# dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
# )
# if (args.save_img or args.save_video) and not args.vis_fast:
# print('===========================> Rendering remaining images in the queue...')
# print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
# while writer.running():
# pass
# writer.stop()
# final_result = writer.results()
# write_json(final_result, args.outputpath)
#
# return final_result, video_name
class Detector(object):
def __init__(self, video_file, cam_wh=None):
self.img_wh = cam_wh # camera resolution
self.prepare_datastream(video_file)
self.prepare_model()
def prepare_datastream(self, video_file):
# Load input video
self.data_loader = VideoLoader(video_file, batchSize=args.detbatch).start()
(fourcc, fps, frameSize) = self.data_loader.videoinfo()
print('the video is {} f/s'.format(fps))
print('the video frameSize: {}'.format(frameSize))
if self.img_wh is None:
self.img_wh = max(frameSize)
# =========== end video ===============
def prepare_model(self):
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(self.data_loader, batchSize=args.detbatch).start()
# start a thread to read frames from the file video stream
self.det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
self.pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
self.pose_model = InferenNet(4 * 1 + 1, pose_dataset)
self.pose_model.cuda()
self.pose_model.eval()
# time measurement
self.runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
def get_kpt(self):
batchSize = args.posebatch
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = self.det_processor.read()
if orig_img is None:
assert False, f'image read None: handle_video'
if boxes is None or boxes.nelement() == 0:
# continue
return None
ckpt_time, det_time = getTime(start_time)
self.runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if datalen % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
hm_j = self.pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
self.runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
boxes, scores, hm_data, pt1, pt2, orig_img, im_name = boxes, scores, hm, pt1, pt2, orig_img, im_name
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(
boxes, scores, preds_img, preds_scores)
ckpt_time, post_time = getTime(ckpt_time)
self.runtime_profile['pn'].append(post_time)
# assume the largest one person
kpt = max(result, key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints']
keypoint = kpt.data.numpy()
# post process
# keypoint = self.convert_AlphaOpenposeCoco_to_standard16Joint(keypoint.copy()) # Nx16x2
return keypoint
def post_process(self, pose17j):
"""
把img space的17 joint转换成 我用的16 joint.
:return:
"""
pose16j = self.convert_AlphaOpenposeCoco_to_standard16Joint(pose17j)
# normlization keypoint
keypoint_imgnorm = self.normalize_screen_coordinates(pose16j[..., :2], w=self.img_wh, h=self.img_wh)
return {
'pose16j': pose16j,
'keypoint_imgnorm': keypoint_imgnorm,
}
def convert_AlphaOpenposeCoco_to_standard16Joint(self, pose_x):
"""
pose_x: nx17x2
https://zhuanlan.zhihu.com/p/367707179
"""
single_pose = False
if not len(pose_x.shape) == 3:
single_pose = True
pose_x = pose_x.reshape(1, 17, 2)
hip = 0.5 * (pose_x[:, 11] + pose_x[:, 12])
neck = 0.5 * (pose_x[:, 5] + pose_x[:, 6])
spine = 0.5 * (neck + hip)
# head = 0.5 * (pose_x[:, 1] + pose_x[:, 2])
head_0 = pose_x[:, 0] # by noise
head_1 = (neck - hip) * 0.5 + neck # by backbone
head_2 = 0.5 * (pose_x[:, 1] + pose_x[:, 2]) # by two eye
head_3 = 0.5 * (pose_x[:, 3] + pose_x[:, 4]) # by two ear
head = head_0 * 0.1 + head_1 * 0.6 + head_2 * 0.1 + head_3 * 0.2
combine = np.stack([hip, spine, neck, head]) # 0 1 2 3 ---> 17, 18, 19 ,20
combine = np.transpose(combine, (1, 0, 2))
combine = np.concatenate([pose_x, combine], axis=1)
reorder = [17, 12, 14, 16, 11, 13, 15, 18, 19, 20, 5, 7, 9, 6, 8, 10]
standart_16joint = combine[:, reorder]
if single_pose:
standart_16joint = standart_16joint[0]
return standart_16joint
def normalize_screen_coordinates(self, X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X / w * 2 - [1, h / w]
def image_coordinates(self, X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h / w]) * w / 2
def save_video(self, viz_video_path, viz_output_path, keypoints, prediction, camspace=True):
"""
把拿到的2D keypoint, 3D pose, 保存成video, 用来debug.
:param viz_video_path: 原始video地址
:param viz_output_path: 输出结果的video地址.
:param keypoints: 2D keypoint, # tx16x2 用pixcel地址
:param prediction: 3D prediction, # tx16x3 看是world space还是camera space
:return:
"""
def _postprocess(prediction):
# # camera rotation
rot = np.array([0.14070565, -0.15007018, -0.7552408, 0.62232804], dtype=np.float32)
prediction_world = camera_to_world_bynumpy(prediction, R=rot, t=0)
# We don't have the trajectory, but at least we can rebase the height
prediction_world[:, :, 2] -= np.min(prediction_world[:, :, 2])
return prediction_world
# 准备需要可视化的 3D pose 序列
if camspace:
prediction = _postprocess(prediction)
anim_output = {'Reconstruction': prediction}
args.frame_rate = 25
args.viz_bitrate = 30000 # bitrate for mp4 videos
args.viz_limit = -1 # only render first N frames
args.viz_downsample = 1 # downsample FPS by a factor N
args.viz_size = 5 # image size
args.viz_skip = 0 # skip first N frames of input video
args.video_width, args.video_height = self.data_loader.videoinfo()[2]
# 可视化生成.
print('Rendering... save to {}'.format(viz_output_path))
render_animation(keypoints, anim_output,
Skeleton(), args.frame_rate, args.viz_bitrate, np.array(70., dtype=np.float32), viz_output_path,
limit=args.viz_limit, downsample=args.viz_downsample, size=args.viz_size,
input_video_path=viz_video_path, viewport=(args.video_width, args.video_height),
input_video_skip=args.viz_skip)
def generate_kpts_byclass(video_file):
"""
一个样本测试文件, 输入video的地址, 输出kpts.
:param video_file:
:return:
"""
detclass = Detector(video_file)
kpts=[]
im_names_desc = range(detclass.data_loader.length())
for i in im_names_desc:
kpts.append(detclass.get_kpt())
print("---------------- finish kpts ............")
return kpts
# def mkd(target_dir, get_parent=True):
# # get parent path and create
# if get_parent:
# savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
# else:
# savedir = target_dir
# if not os.path.exists(savedir):
# os.makedirs(savedir, exist_ok=True)
if __name__ == "__main__":
os.chdir('../..')
print(os.getcwd())
# handle_video(img_path='outputs/image/kobe')
# generate_kpts('outputs/dance.mp4')
| 15,919 | 36.725118 | 146 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/demo.py | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy as np
from tqdm import tqdm
from SPPE.src.main_fast_inference import *
from dataloader import ImageLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from fn import getTime
from opt import opt
from pPose_nms import write_json
from in_the_wild_data import split_frame
def main(args):
inputpath = args.inputpath
inputlist = args.inputlist
mode = args.mode
if not os.path.exists(args.outputpath):
os.makedirs(args.outputpath, exist_ok=True)
if len(inputlist):
im_names = open(inputlist, 'r').readlines()
elif len(inputpath) and inputpath != '/':
for root, dirs, files in os.walk(inputpath):
im_names = [f for f in files if 'png' in f or 'jpg' in f]
else:
raise IOError('Error: must contain either --indir/--list')
# Load input images
data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Init data writer
writer = DataWriter(args.save_video).start()
data_len = data_loader.length()
im_names_desc = tqdm(range(data_len))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu()
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while writer.running():
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
if __name__ == "__main__":
args = opt
args.dataset = 'coco'
args.sp = True
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
video_name = 'kobe'
args.inputpath = f'../in_the_wild_data/split_{video_name}'
if not os.listdir(args.inputpath):
split_frame.split(f'../in_the_wild_data/{video_name}.mp4')
args.outputpath = f'../in_the_wild_data/alphapose_{video_name}'
args.save_img = True
args.detbatch = 4
main(args)
| 4,420 | 32.492424 | 144 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/gene_npz.py | import ntpath
import os
import shutil
import numpy as np
import torch.utils.data
from tqdm import tqdm
from SPPE.src.main_fast_inference import *
from common.utils import calculate_area
from dataloader import DetectionLoader, DetectionProcessor, DataWriter, Mscoco, VideoLoader
from fn import getTime
from opt import opt
from pPose_nms import write_json
args = opt
args.vis_fast = False # True # add for speed
args.dataset = 'coco'
args.fast_inference = False
args.save_img = False # save time and space.
args.sp = True
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
def model_load():
model = None
return model
def image_interface(model, image):
pass
def generate_kpts(video_file):
final_result, video_name = handle_video(video_file)
# ============ Changing ++++++++++
kpts = []
no_person = []
for i in range(len(final_result)):
if not final_result[i]['result']: # No people
no_person.append(i)
kpts.append(None)
continue
kpt = max(final_result[i]['result'],
key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints']
kpts.append(kpt.data.numpy())
for n in no_person:
kpts[n] = kpts[-1]
no_person.clear()
for n in no_person:
kpts[n] = kpts[-1] if kpts[-1] else kpts[n-1]
# ============ Changing End ++++++++++
# name = f'{args.outputpath}/{video_name}_det2D.npz'
# npy_folder = os.path.abspath(os.path.join(video_file, os.pardir))
# name = video_file.replace('.mp4', '_det2D.npz').replace('source_video', 'det2D')
# mkd(name)
# kpts = np.array(kpts).astype(np.float32)
# print('kpts npz save in ', name)
# np.savez_compressed(name, kpts=kpts) # move to main file for both detectors
return kpts
def handle_video(video_file):
# =========== common ===============
args.video = video_file
base_name = os.path.basename(args.video)
video_name = base_name[:base_name.rfind('.')]
# =========== end common ===============
# =========== image ===============
# img_path = f'outputs/alpha_pose_{video_name}/split_image/'
# args.inputpath = img_path
# args.outputpath = f'outputs/alpha_pose_{video_name}'
# if os.path.exists(args.outputpath):
# shutil.rmtree(f'{args.outputpath}/vis', ignore_errors=True)
# else:
# os.mkdir(args.outputpath)
#
# # if not len(video_file):
# # raise IOError('Error: must contain --video')
#
# if len(img_path) and img_path != '/':
# for root, dirs, files in os.walk(img_path):
# im_names = sorted([f for f in files if 'png' in f or 'jpg' in f])
# else:
# raise IOError('Error: must contain either --indir/--list')
#
# # Load input images
# data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()
# print(f'Totally {data_loader.datalen} images')
# =========== end image ===============
# =========== video ===============
args.outputpath = f'outputs/alpha_pose_{video_name}'
if os.path.exists(args.outputpath):
shutil.rmtree(f'{args.outputpath}/vis', ignore_errors=True)
else:
os.mkdir(args.outputpath)
videofile = args.video
mode = args.mode
if not len(videofile):
raise IOError('Error: must contain --video')
# Load input video
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc, fps, frameSize) = data_loader.videoinfo()
print('the video is {} f/s'.format(fps))
print('the video frameSize: {}'.format(frameSize))
# =========== end video ===============
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
# start a thread to read frames from the file video stream
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_' + ntpath.basename(video_file).split('.')[0] + '.avi')
# writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
writer = DataWriter(args.save_video).start()
print('Start pose estimation...')
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
print(f'{i}-th image read None: handle_video')
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if datalen % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.4f} | pose time: {pt:.4f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while writer.running():
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
return final_result, video_name
def mkd(target_dir, get_parent=True):
# get parent path and create
if get_parent:
savedir = os.path.abspath(os.path.join(target_dir, os.pardir))
else:
savedir = target_dir
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
if __name__ == "__main__":
os.chdir('../..')
print(os.getcwd())
# handle_video(img_path='outputs/image/kobe')
generate_kpts('outputs/dance.mp4')
| 7,432 | 33.896714 | 144 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/dataloader_webcam.py | import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from SPPE.src.utils.eval import getPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class WebcamLoader:
def __init__(self, webcam, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.batchSize = batchSize
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
i = 0
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(i)+'.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
self.Q.put((img, orig_img, im_name, im_dim_list))
i = i+1
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
with self.dataloder.Q.mutex:
self.dataloder.Q.queue.clear()
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:,0]==k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
# initialize the queue used to store data
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
with self.detectionLoader.Q.mutex:
self.detectionLoader.Q.queue.clear()
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class WebcamDetectionLoader:
def __init__(self, webcam = 0, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot open webcam'
self.stopped = False
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping
while True:
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
if not grabbed:
continue
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], boxes[dets[:,0]==k], scores[dets[:,0]==k]))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640,480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
| 19,581 | 36.585413 | 115 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/evaluation.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.data
from .predict.annot.coco_minival import Mscoco_minival
from .predict.p_poseNMS import pose_nms, write_json
import numpy as np
from .opt import opt
from tqdm import tqdm
from .utils.img import flip, shuffleLR, vis_frame
from .util.eval import getPrediction
from .util.eval import getmap
import os
import cv2
def gaussian(size):
'''
Generate a 2D gaussian array
'''
sigma = 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g = g[np.newaxis, :]
return g
gaussian_kernel = nn.Conv2d(17, 17, kernel_size=4 * 1 + 1,
stride=1, padding=2, groups=17, bias=False)
g = torch.from_numpy(gaussian(4 * 1 + 1)).clone()
g = torch.unsqueeze(g, 1)
g = g.repeat(17, 1, 1, 1)
gaussian_kernel.weight.data = g.float()
gaussian_kernel.cuda()
def prediction(model):
model.eval()
dataset = Mscoco_minival()
minival_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=20, pin_memory=True)
minival_loader_desc = tqdm(minival_loader)
final_result = []
tmp_inp = {}
for i, (inp, box, im_name, metaData) in enumerate(minival_loader_desc):
#inp = torch.autograd.Variable(inp.cuda(), volatile=True)
pt1, pt2, ori_inp = metaData
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
if im_name[0] in tmp_inp.keys():
inps = tmp_inp[im_name[0]]['inps']
ori_inps = tmp_inp[im_name[0]]['ori_inps']
boxes = tmp_inp[im_name[0]]['boxes']
pt1s = tmp_inp[im_name[0]]['pt1s']
pt2s = tmp_inp[im_name[0]]['pt2s']
tmp_inp[im_name[0]]['inps'] = torch.cat((inps, inp), dim=0)
tmp_inp[im_name[0]]['pt1s'] = torch.cat((pt1s, pt1), dim=0)
tmp_inp[im_name[0]]['pt2s'] = torch.cat((pt2s, pt2), dim=0)
tmp_inp[im_name[0]]['ori_inps'] = torch.cat(
(ori_inps, ori_inp), dim=0)
tmp_inp[im_name[0]]['boxes'] = torch.cat((boxes, box), dim=0)
else:
tmp_inp[im_name[0]] = {
'inps': inp,
'ori_inps': ori_inp,
'boxes': box,
'pt1s': pt1,
'pt2s': pt2
}
for im_name, item in tqdm(tmp_inp.items()):
inp = item['inps']
pt1 = item['pt1s']
pt2 = item['pt2s']
box = item['boxes']
ori_inp = item['ori_inps']
with torch.no_grad():
try:
kp_preds = model(inp)
kp_preds = kp_preds.data[:, :17, :]
except RuntimeError as e:
'''
Divide inputs into two batches
'''
assert str(e) == 'CUDA error: out of memory'
bn = inp.shape[0]
inp1 = inp[: bn // 2]
inp2 = inp[bn // 2:]
kp_preds1 = model(inp1)
kp_preds2 = model(inp2)
kp_preds = torch.cat((kp_preds1, kp_preds2), dim=0)
kp_preds = kp_preds.data[:, :17, :]
# kp_preds = gaussian_kernel(F.relu(kp_preds))
# Get predictions
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
preds, preds_img, preds_scores = getPrediction(
kp_preds.cpu().data, pt1, pt2,
opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW
)
result = pose_nms(box, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
#img = display_frame(orig_img, result, opt.outputpath)
#ori_inp = np.transpose(
# ori_inp[0][:3].clone().numpy(), (1, 2, 0)) * 255
#img = vis_frame(ori_inp, result)
#cv2.imwrite(os.path.join(
# './val', 'vis', im_name), img)
final_result.append(result)
write_json(final_result, './val', for_eval=True)
return getmap()
if __name__ == '__main__':
prediction()
| 4,497 | 32.567164 | 78 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/train.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch
import torch.utils.data
from .utils.dataset import coco
from opt import opt
from tqdm import tqdm
from models.FastPose import createModel
from .utils.eval import DataLogger, accuracy
from .utils.img import flip, shuffleLR
from .evaluation import prediction
from tensorboardX import SummaryWriter
import os
def train(train_loader, m, criterion, optimizer, writer):
lossLogger = DataLogger()
accLogger = DataLogger()
m.train()
train_loader_desc = tqdm(train_loader)
for i, (inps, labels, setMask, imgset) in enumerate(train_loader_desc):
inps = inps.cuda().requires_grad_()
labels = labels.cuda()
setMask = setMask.cuda()
out = m(inps)
loss = criterion(out.mul(setMask), labels)
acc = accuracy(out.data.mul(setMask), labels.data, train_loader.dataset)
accLogger.update(acc[0], inps.size(0))
lossLogger.update(loss.item(), inps.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
opt.trainIters += 1
# Tensorboard
writer.add_scalar(
'Train/Loss', lossLogger.avg, opt.trainIters)
writer.add_scalar(
'Train/Acc', accLogger.avg, opt.trainIters)
# TQDM
train_loader_desc.set_description(
'loss: {loss:.8f} | acc: {acc:.2f}'.format(
loss=lossLogger.avg,
acc=accLogger.avg * 100)
)
train_loader_desc.close()
return lossLogger.avg, accLogger.avg
def valid(val_loader, m, criterion, optimizer, writer):
lossLogger = DataLogger()
accLogger = DataLogger()
m.eval()
val_loader_desc = tqdm(val_loader)
for i, (inps, labels, setMask, imgset) in enumerate(val_loader_desc):
inps = inps.cuda()
labels = labels.cuda()
setMask = setMask.cuda()
with torch.no_grad():
out = m(inps)
loss = criterion(out.mul(setMask), labels)
flip_out = m(flip(inps))
flip_out = flip(shuffleLR(flip_out, val_loader.dataset))
out = (flip_out + out) / 2
acc = accuracy(out.mul(setMask), labels, val_loader.dataset)
lossLogger.update(loss.item(), inps.size(0))
accLogger.update(acc[0], inps.size(0))
opt.valIters += 1
# Tensorboard
writer.add_scalar(
'Valid/Loss', lossLogger.avg, opt.valIters)
writer.add_scalar(
'Valid/Acc', accLogger.avg, opt.valIters)
val_loader_desc.set_description(
'loss: {loss:.8f} | acc: {acc:.2f}'.format(
loss=lossLogger.avg,
acc=accLogger.avg * 100)
)
val_loader_desc.close()
return lossLogger.avg, accLogger.avg
def main():
# Model Initialize
m = createModel().cuda()
if opt.loadModel:
print('Loading Model from {}'.format(opt.loadModel))
m.load_state_dict(torch.load(opt.loadModel))
if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
try:
os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
except FileNotFoundError:
os.mkdir("../exp/{}".format(opt.dataset))
os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
else:
print('Create new model')
if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)):
try:
os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
except FileNotFoundError:
os.mkdir("../exp/{}".format(opt.dataset))
os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID))
criterion = torch.nn.MSELoss().cuda()
if opt.optMethod == 'rmsprop':
optimizer = torch.optim.RMSprop(m.parameters(),
lr=opt.LR,
momentum=opt.momentum,
weight_decay=opt.weightDecay)
elif opt.optMethod == 'adam':
optimizer = torch.optim.Adam(
m.parameters(),
lr=opt.LR
)
else:
raise Exception
writer = SummaryWriter(
'.tensorboard/{}/{}'.format(opt.dataset, opt.expID))
# Prepare Dataset
if opt.dataset == 'coco':
train_dataset = coco.Mscoco(train=True)
val_dataset = coco.Mscoco(train=False)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.trainBatch, shuffle=True, num_workers=opt.nThreads, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=opt.validBatch, shuffle=False, num_workers=opt.nThreads, pin_memory=True)
# Model Transfer
m = torch.nn.DataParallel(m).cuda()
# Start Training
for i in range(opt.nEpochs):
opt.epoch = i
print('############# Starting Epoch {} #############'.format(opt.epoch))
loss, acc = train(train_loader, m, criterion, optimizer, writer)
print('Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
idx=opt.epoch,
loss=loss,
acc=acc
))
opt.acc = acc
opt.loss = loss
m_dev = m.module
if i % opt.snapshot == 0:
torch.save(
m_dev.state_dict(), '../exp/{}/{}/model_{}.pkl'.format(opt.dataset, opt.expID, opt.epoch))
torch.save(
opt, '../exp/{}/{}/option.pkl'.format(opt.dataset, opt.expID, opt.epoch))
torch.save(
optimizer, '../exp/{}/{}/optimizer.pkl'.format(opt.dataset, opt.expID))
loss, acc = valid(val_loader, m, criterion, optimizer, writer)
print('Valid-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format(
idx=i,
loss=loss,
acc=acc
))
'''
if opt.dataset != 'mpii':
with torch.no_grad():
mAP, mAP5 = prediction(m)
print('Prediction-{idx:d} epoch | mAP:{mAP:.3f} | mAP0.5:{mAP5:.3f}'.format(
idx=i,
mAP=mAP,
mAP5=mAP5
))
'''
writer.close()
if __name__ == '__main__':
main()
| 6,446 | 29.554502 | 106 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/models/FastPose.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch.nn as nn
from .layers.DUC import DUC
from .layers.SE_Resnet import SEResnet
# Import training option
from opt import opt
def createModel():
return FastPose_SE()
class FastPose_SE(nn.Module):
conv_dim = 128
def __init__(self):
super(FastPose_SE, self).__init__()
self.preact = SEResnet('resnet101')
self.suffle1 = nn.PixelShuffle(2)
self.duc1 = DUC(512, 1024, upscale_factor=2)
self.duc2 = DUC(256, 512, upscale_factor=2)
self.conv_out = nn.Conv2d(
self.conv_dim, opt.nClasses, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out = self.preact(x)
out = self.suffle1(out)
out = self.duc1(out)
out = self.duc2(out)
out = self.conv_out(out)
return out
| 1,038 | 23.738095 | 76 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/models/layers/SE_module.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
from torch import nn
class SELayer(nn.Module):
def __init__(self, channel, reduction=1):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
| 783 | 30.36 | 67 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/models/layers/SE_Resnet.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch.nn as nn
from models.layers.SE_module import SELayer
import torch.nn.functional as F
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=0.1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=0.1)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, momentum=0.1)
if reduction:
self.se = SELayer(planes * 4)
self.reduc = reduction
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
out = self.conv3(out)
out = self.bn3(out)
if self.reduc:
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu(out)
return out
class SEResnet(nn.Module):
""" SEResnet """
def __init__(self, architecture):
super(SEResnet, self).__init__()
assert architecture in ["resnet50", "resnet101"]
self.inplanes = 64
self.layers = [3, 4, {"resnet50": 6, "resnet101": 23}[architecture], 3]
self.block = Bottleneck
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,
stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.1, affine=True)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self.make_layer(self.block, 64, self.layers[0])
self.layer2 = self.make_layer(
self.block, 128, self.layers[1], stride=2)
self.layer3 = self.make_layer(
self.block, 256, self.layers[2], stride=2)
self.layer4 = self.make_layer(
self.block, 512, self.layers[3], stride=2)
def forward(self, x):
x = self.maxpool(self.relu(self.bn1(self.conv1(x)))) # 64 * h/4 * w/4
x = self.layer1(x) # 256 * h/4 * w/4
x = self.layer2(x) # 512 * h/8 * w/8
x = self.layer3(x) # 1024 * h/16 * w/16
x = self.layer4(x) # 2048 * h/32 * w/32
return x
def stages(self):
return [self.layer1, self.layer2, self.layer3, self.layer4]
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=0.1),
)
layers = []
if downsample is not None:
layers.append(block(self.inplanes, planes,
stride, downsample, reduction=True))
else:
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
| 3,822 | 35.066038 | 85 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/models/layers/DUC.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch.nn as nn
class DUC(nn.Module):
'''
Initialize: inplanes, planes, upscale_factor
OUTPUT: (planes // upscale_factor^2) * ht * wd
'''
def __init__(self, inplanes, planes, upscale_factor=2):
super(DUC, self).__init__()
self.conv = nn.Conv2d(
inplanes, planes, kernel_size=3, padding=1, bias=False)
self.bn = nn.BatchNorm2d(planes, momentum=0.1)
self.relu = nn.ReLU(inplace=True)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pixel_shuffle(x)
return x
| 898 | 30 | 67 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/utils/img.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import numpy as np
import torch
import scipy.misc
import torch.nn.functional as F
import cv2
from opt import opt
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def torch_to_im(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # C*H*W
return img
def load_image(img_path):
# H x W x C => C x H x W
return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def drawGaussian(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = torch.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
# Move to center
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt.int()
def transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = (pt * lenH) / resH
_pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])
new_point = torch.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
def cropBox(img, ul, br, resH, resW):
ul = ul.int()
br = (br - 1).int()
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.dim() == 2:
img = img[np.newaxis, :]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
img[:, :ul[1], :], img[:, :, :ul[0]] = 0, 0
img[:, br[1] + 1:, :], img[:, :, br[0] + 1:] = 0, 0
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def cv_rotate(img, rot, resW, resH):
center = np.array((resW - 1, resH - 1)) / 2
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def flip(x):
assert (x.dim() == 3 or x.dim() == 4)
if '0.4.1' in torch.__version__:
dim = x.dim() - 1
return x.flip(dims=(dim,))
else:
is_cuda = False
if x.is_cuda:
x = x.cpu()
is_cuda = True
x = x.numpy().copy()
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(
np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
x = torch.from_numpy(x.copy())
if is_cuda:
x = x.cuda()
return x
def shuffleLR(x, dataset):
flipRef = dataset.flipRef
assert (x.dim() == 3 or x.dim() == 4)
for pair in flipRef:
dim0, dim1 = pair
dim0 -= 1
dim1 -= 1
if x.dim() == 4:
tmp = x[:, dim1].clone()
x[:, dim1] = x[:, dim0].clone()
x[:, dim0] = tmp.clone()
#x[:, dim0], x[:, dim1] = deepcopy((x[:, dim1], x[:, dim0]))
else:
tmp = x[dim1].clone()
x[dim1] = x[dim0].clone()
x[dim0] = tmp.clone()
#x[dim0], x[dim1] = deepcopy((x[dim1], x[dim0]))
return x
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(5, 11), (6, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [RED, RED, RED, RED, RED, YELLOW, YELLOW, YELLOW,
YELLOW, YELLOW, YELLOW, GREEN, GREEN, GREEN, GREEN, GREEN, GREEN]
line_color = [YELLOW, YELLOW, YELLOW, YELLOW, BLUE, BLUE,
BLUE, BLUE, BLUE, PURPLE, PURPLE, RED, RED, RED, RED]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,
RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,
RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame.copy()
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.15:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
#transparency = max(0, min(1, kp_scores[n]))
#img = cv2.addWeighted(bg, transparency, img, 1, 0)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy,
line_color[i], (0.5 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
#transparency = max(
# 0, min(1, (kp_scores[start_p] + kp_scores[end_p])))
#img = cv2.addWeighted(bg, transparency, img, 1, 0)
return img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
| 9,817 | 30.773463 | 92 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/utils/pose.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
from utils.img import (load_image, drawGaussian, cropBox, transformBox, flip, shuffleLR, cv_rotate)
import torch
import numpy as np
import random
from opt import opt
def rnd(x):
return max(-2 * x, min(2 * x, np.random.randn(1)[0] * x))
def generateSampleBox(img_path, bndbox, part, nJoints, imgset, scale_factor, dataset, train=True, nJoints_coco=17):
img = load_image(img_path)
if train:
img[0].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[0].add_(-0.406)
img[1].add_(-0.457)
img[2].add_(-0.480)
upLeft = torch.Tensor((int(bndbox[0][0]), int(bndbox[0][1])))
bottomRight = torch.Tensor((int(bndbox[0][2]), int(bndbox[0][3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
imght = img.shape[1]
imgwidth = img.shape[2]
scaleRate = random.uniform(*scale_factor)
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2)
bottomRight[1] = min(imght - 1, bottomRight[1] + ht * scaleRate / 2)
# Doing Random Sample
if opt.addDPG:
PatchScale = random.uniform(0, 1)
if PatchScale > 0.85:
ratio = ht / width
if (width < ht):
patchWidth = PatchScale * width
patchHt = patchWidth * ratio
else:
patchHt = PatchScale * ht
patchWidth = patchHt / ratio
xmin = upLeft[0] + random.uniform(0, 1) * (width - patchWidth)
ymin = upLeft[1] + random.uniform(0, 1) * (ht - patchHt)
xmax = xmin + patchWidth + 1
ymax = ymin + patchHt + 1
else:
xmin = max(
1, min(upLeft[0] + np.random.normal(-0.0142, 0.1158) * width, imgwidth - 3))
ymin = max(
1, min(upLeft[1] + np.random.normal(0.0043, 0.068) * ht, imght - 3))
xmax = min(max(
xmin + 2, bottomRight[0] + np.random.normal(0.0154, 0.1337) * width), imgwidth - 3)
ymax = min(
max(ymin + 2, bottomRight[1] + np.random.normal(-0.0013, 0.0711) * ht), imght - 3)
upLeft[0] = xmin
upLeft[1] = ymin
bottomRight[0] = xmax
bottomRight[1] = ymax
# Counting Joints number
jointNum = 0
if imgset == 'coco':
for i in range(17):
if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
jointNum += 1
# Doing Random Crop
if opt.addDPG:
if jointNum > 13 and train:
switch = random.uniform(0, 1)
if switch > 0.96:
bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.92:
upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.88:
upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
elif switch > 0.84:
upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.80:
bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
elif switch > 0.76:
upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
elif switch > 0.72:
bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.68:
upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
inputResH, inputResW = opt.inputResH, opt.inputResW
outputResH, outputResW = opt.outputResH, opt.outputResW
inp = cropBox(img, upLeft, bottomRight, inputResH, inputResW)
if jointNum == 0:
inp = torch.zeros(3, inputResH, inputResW)
out = torch.zeros(nJoints, outputResH, outputResW)
setMask = torch.zeros(nJoints, outputResH, outputResW)
# Draw Label
if imgset == 'coco':
for i in range(nJoints_coco):
if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
hm_part = transformBox(
part[i], upLeft, bottomRight, inputResH, inputResW, outputResH, outputResW)
out[i] = drawGaussian(out[i], hm_part, opt.hmGauss)
setMask[i].add_(1)
if train:
# Flip
if random.uniform(0, 1) < 0.5:
inp = flip(inp)
out = shuffleLR(flip(out), dataset)
# Rotate
r = rnd(opt.rotate)
if random.uniform(0, 1) < 0.6:
r = 0
if r != 0:
inp = cv_rotate(inp, r, opt.inputResW, opt.inputResH)
out = cv_rotate(out, r, opt.outputResW, opt.outputResH)
return inp, out, setMask
| 5,388 | 36.685315 | 115 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/utils/eval.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
from opt import opt
import sys
import numpy as np
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from .img import transformBoxInvert
class DataLogger(object):
def __init__(self):
self.clear()
def clear(self):
self.value = 0
self.sum = 0
self.cnt = 0
self.avg = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.cnt += n
self._cal_avg()
def _cal_avg(self):
self.avg = self.sum / self.cnt
class NullWriter(object):
def write(self, arg):
pass
def accuracy(output, label, dataset, out_offset=None):
if type(output) == list:
return accuracy(output[opt.nStack - 1], label[opt.nStack - 1], dataset, out_offset)
else:
return heatmapAccuracy(output.cpu().data, label.cpu().data, dataset.accIdxs)
def heatmapAccuracy(output, label, idxs):
preds = getPreds(output)
gt = getPreds(label)
norm = torch.ones(preds.size(0)) * opt.outputResH / 10
dists = calc_dists(preds, gt, norm)
acc = torch.zeros(len(idxs) + 1)
avg_acc = 0
cnt = 0
for i in range(len(idxs)):
acc[i + 1] = dist_acc(dists[idxs[i] - 1])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
if cnt != 0:
acc[0] = avg_acc / cnt
return acc
def getPreds(hm):
''' get predictions from score maps in torch Tensor
return type: torch.LongTensor
'''
assert hm.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(hm.view(hm.size(0), hm.size(1), -1), 2)
maxval = maxval.view(hm.size(0), hm.size(1), 1)
idx = idx.view(hm.size(0), hm.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % hm.size(3)
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / hm.size(3))
# pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
# preds *= pred_mask
return preds
def calc_dists(preds, target, normalize):
preds = preds.float().clone()
target = target.float().clone()
dists = torch.zeros(preds.size(1), preds.size(0))
for n in range(preds.size(0)):
for c in range(preds.size(1)):
if target[n, c, 0] > 0 and target[n, c, 1] > 0:
dists[c, n] = torch.dist(
preds[n, c, :], target[n, c, :]) / normalize[n]
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
if dists.ne(-1).sum() > 0:
return dists.le(thr).eq(dists.ne(-1)).float().sum() * 1.0 / dists.ne(-1).float().sum()
else:
return - 1
def postprocess(output):
p = getPreds(output)
for i in range(p.size(0)):
for j in range(p.size(1)):
hm = output[i][j]
pX, pY = int(round(p[i][j][0])), int(round(p[i][j][1]))
if 0 < pX < opt.outputResW - 1 and 0 < pY < opt.outputResH - 1:
diff = torch.Tensor(
(hm[pY][pX + 1] - hm[pY][pX - 1], hm[pY + 1][pX] - hm[pY - 1][pX]))
p[i][j] += diff.sign() * 0.25
p -= 0.5
return p
def getPrediction(hms, pt1, pt2, inpH, inpW, resH, resW):
assert hms.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(hms.view(hms.size(0), hms.size(1), -1), 2)
maxval = maxval.view(hms.size(0), hms.size(1), 1)
idx = idx.view(hms.size(0), hms.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % hms.size(3)
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / hms.size(3))
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
# Very simple post-processing step to improve performance at tight PCK thresholds
for i in range(preds.size(0)):
for j in range(preds.size(1)):
hm = hms[i][j]
pX, pY = int(round(float(preds[i][j][0]))), int(
round(float(preds[i][j][1])))
if 1 < pX < opt.outputResW - 2 and 1 < pY < opt.outputResH - 2:
diff = torch.Tensor(
(hm[pY][pX + 1] - hm[pY][pX - 1], hm[pY + 1][pX] - hm[pY - 1][pX]))
diff = diff.sign() * 0.25
diff[1] = diff[1] * inpH / inpW
preds[i][j] += diff
preds_tf = torch.zeros(preds.size())
for i in range(hms.size(0)): # Number of samples
for j in range(hms.size(1)): # Number of output heatmaps for one sample
preds_tf[i][j] = transformBoxInvert(
preds[i][j], pt1[i], pt2[i], inpH, inpW, resH, resW)
return preds, preds_tf, maxval
def getmap(JsonDir='./val/alphapose-results.json'):
ListDir = '../coco-minival500_images.txt'
annType = ['segm', 'bbox', 'keypoints']
annType = annType[2] # specify type here
prefix = 'person_keypoints' if annType == 'keypoints' else 'instances'
print('Running evaluation for *%s* results.' % (annType))
# load Ground_truth
dataType = 'val2014'
annFile = '../%s_%s.json' % (prefix, dataType)
cocoGt = COCO(annFile)
# load Answer(json)
resFile = JsonDir
cocoDt = cocoGt.loadRes(resFile)
# load List
fin = open(ListDir, 'r')
imgIds_str = fin.readline()
if imgIds_str[-1] == '\n':
imgIds_str = imgIds_str[:-1]
imgIds_str = imgIds_str.split(',')
imgIds = []
for x in imgIds_str:
imgIds.append(int(x))
# running evaluation
iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
t = np.where(0.5 == iouThrs)[0]
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
score = cocoEval.eval['precision'][:, :, :, 0, :]
mApAll, mAp5 = 0.01, 0.01
if len(score[score > -1]) != 0:
score2 = score[t]
mApAll = np.mean(score[score > -1])
mAp5 = np.mean(score2[score2 > -1])
cocoEval.summarize()
return mApAll, mAp5
| 6,340 | 29.339713 | 94 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/utils/dataset/coco.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import os
import h5py
from functools import reduce
import torch.utils.data as data
from ..pose import generateSampleBox
from opt import opt
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints = 17
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
# create train/val split
with h5py.File('../data/coco/annot_coco.h5', 'r') as annot:
# train
self.imgname_coco_train = annot['imgname'][:-5887]
self.bndbox_coco_train = annot['bndbox'][:-5887]
self.part_coco_train = annot['part'][:-5887]
# val
self.imgname_coco_val = annot['imgname'][-5887:]
self.bndbox_coco_val = annot['bndbox'][-5887:]
self.part_coco_val = annot['part'][-5887:]
self.size_train = self.imgname_coco_train.shape[0]
self.size_val = self.imgname_coco_val.shape[0]
def __getitem__(self, index):
sf = self.scale_factor
if self.is_train:
part = self.part_coco_train[index]
bndbox = self.bndbox_coco_train[index]
imgname = self.imgname_coco_train[index]
else:
part = self.part_coco_val[index]
bndbox = self.bndbox_coco_val[index]
imgname = self.imgname_coco_val[index]
imgname = reduce(lambda x, y: x + y,
map(lambda x: chr(int(x)), imgname))
img_path = os.path.join(self.img_folder, imgname)
metaData = generateSampleBox(img_path, bndbox, part, self.nJoints,
'coco', sf, self, train=self.is_train)
inp, out, setMask = metaData
return inp, out, setMask, 'coco'
def __len__(self):
if self.is_train:
return self.size_train
else:
return self.size_val
| 2,822 | 34.2875 | 80 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/predict/p_poseNMS.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import torch
import json
import os
import numpy as np
''' Constant Configuration '''
delta1 = 1
mu = 1.7
delta2 = 1.3
gamma = 22.48
scoreThreds = 0.15
matchThreds = 5
alpha = 0.1
def pose_nms(bboxes, pose_preds, pose_scores):
'''
Parametric Pose NMS algorithm
bboxes: bbox locations list (n, 4)
bbox_scores: bbox scores list (n,)
pose_preds: pose locations list (n, 17, 2)
pose_scores: pose scores list (n, 17, 1)
'''
pose_scores[pose_scores <= scoreThreds] = 1e-5
pose_scores[pose_scores > 1] = 1
final_result = []
ori_pose_preds = pose_preds.clone()
ori_pose_scores = pose_scores.clone()
xmax = bboxes[:, 2]
xmin = bboxes[:, 0]
ymax = bboxes[:, 3]
ymin = bboxes[:, 1]
widths = xmax - xmin
heights = ymax - ymin
ref_dists = alpha * np.maximum(widths, heights)
nsamples = bboxes.shape[0]
human_scores = pose_scores.mean(dim=1)
human_ids = np.arange(nsamples)
# Do pPose-NMS
pick = []
merge_ids = []
while(human_scores.shape[0] != 0):
# Pick the one with highest score
pick_id = torch.argmax(human_scores)
pick.append(human_ids[pick_id])
# num_visPart = torch.sum(pose_scores[pick_id] > 0.2)
# Get numbers of match keypoints by calling PCK_match
ref_dist = ref_dists[human_ids[pick_id]]
simi = get_parametric_distance(
pick_id, pose_preds, pose_scores, ref_dist)
num_match_keypoints = PCK_match(
pose_preds[pick_id], pose_preds, ref_dist)
# Delete humans who have more than matchThreds keypoints overlap and high similarity
delete_ids = torch.from_numpy(np.arange(human_scores.shape[0]))[
(simi > gamma) | (num_match_keypoints >= matchThreds)]
if delete_ids.shape[0] == 0:
delete_ids = pick_id
#else:
# delete_ids = torch.from_numpy(delete_ids)
merge_ids.append(human_ids[delete_ids])
pose_preds = np.delete(pose_preds, delete_ids, axis=0)
pose_scores = np.delete(pose_scores, delete_ids, axis=0)
human_ids = np.delete(human_ids, delete_ids)
human_scores = np.delete(human_scores, delete_ids, axis=0)
assert len(merge_ids) == len(pick)
preds_pick = ori_pose_preds[pick]
scores_pick = ori_pose_scores[pick]
for j in range(len(pick)):
ids = np.arange(17)
max_score = torch.max(scores_pick[j, ids, 0])
if max_score < scoreThreds:
continue
# Merge poses
merge_id = merge_ids[j]
merge_pose, merge_score = p_merge_fast(
preds_pick[j], ori_pose_preds[merge_id], ori_pose_scores[merge_id], ref_dists[pick[j]])
max_score = torch.max(merge_score[ids])
if max_score < scoreThreds:
continue
xmax = max(merge_pose[:, 0])
xmin = min(merge_pose[:, 0])
ymax = max(merge_pose[:, 1])
ymin = min(merge_pose[:, 1])
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < 40 * 40.5):
continue
final_result.append({
'keypoints': merge_pose - 0.3,
'kp_score': merge_score,
'proposal_score': torch.mean(merge_score) + 1.25 * max(merge_score)
})
return final_result
def filter_result(args):
score_pick, merge_id, pred_pick, pick, bbox_score_pick = args
global ori_pose_preds, ori_pose_scores, ref_dists
ids = np.arange(17)
max_score = torch.max(score_pick[ids, 0])
if max_score < scoreThreds:
return None
# Merge poses
merge_pose, merge_score = p_merge_fast(
pred_pick, ori_pose_preds[merge_id], ori_pose_scores[merge_id], ref_dists[pick])
max_score = torch.max(merge_score[ids])
if max_score < scoreThreds:
return None
xmax = max(merge_pose[:, 0])
xmin = min(merge_pose[:, 0])
ymax = max(merge_pose[:, 1])
ymin = min(merge_pose[:, 1])
if (1.5 ** 2 * (xmax - xmin) * (ymax - ymin) < 40 * 40.5):
return None
return {
'keypoints': merge_pose - 0.3,
'kp_score': merge_score,
'proposal_score': torch.mean(merge_score) + bbox_score_pick + 1.25 * max(merge_score)
}
def p_merge(ref_pose, cluster_preds, cluster_scores, ref_dist):
'''
Score-weighted pose merging
INPUT:
ref_pose: reference pose -- [17, 2]
cluster_preds: redundant poses -- [n, 17, 2]
cluster_scores: redundant poses score -- [n, 17, 1]
ref_dist: reference scale -- Constant
OUTPUT:
final_pose: merged pose -- [17, 2]
final_score: merged score -- [17]
'''
dist = torch.sqrt(torch.sum(
torch.pow(ref_pose[np.newaxis, :] - cluster_preds, 2),
dim=2
)) # [n, 17]
kp_num = 17
ref_dist = min(ref_dist, 15)
mask = (dist <= ref_dist)
final_pose = torch.zeros(kp_num, 2)
final_score = torch.zeros(kp_num)
if cluster_preds.dim() == 2:
cluster_preds.unsqueeze_(0)
cluster_scores.unsqueeze_(0)
if mask.dim() == 1:
mask.unsqueeze_(0)
for i in range(kp_num):
cluster_joint_scores = cluster_scores[:, i][mask[:, i]] # [k, 1]
cluster_joint_location = cluster_preds[:, i, :][mask[:, i].unsqueeze(
-1).repeat(1, 2)].view((torch.sum(mask[:, i]), -1))
# Get an normalized score
normed_scores = cluster_joint_scores / torch.sum(cluster_joint_scores)
# Merge poses by a weighted sum
final_pose[i, 0] = torch.dot(
cluster_joint_location[:, 0], normed_scores.squeeze(-1))
final_pose[i, 1] = torch.dot(
cluster_joint_location[:, 1], normed_scores.squeeze(-1))
final_score[i] = torch.dot(cluster_joint_scores.transpose(
0, 1).squeeze(0), normed_scores.squeeze(-1))
return final_pose, final_score
def p_merge_fast(ref_pose, cluster_preds, cluster_scores, ref_dist):
'''
Score-weighted pose merging
INPUT:
ref_pose: reference pose -- [17, 2]
cluster_preds: redundant poses -- [n, 17, 2]
cluster_scores: redundant poses score -- [n, 17, 1]
ref_dist: reference scale -- Constant
OUTPUT:
final_pose: merged pose -- [17, 2]
final_score: merged score -- [17]
'''
dist = torch.sqrt(torch.sum(
torch.pow(ref_pose[np.newaxis, :] - cluster_preds, 2),
dim=2
))
kp_num = 17
ref_dist = min(ref_dist, 15)
mask = (dist <= ref_dist)
final_pose = torch.zeros(kp_num, 2)
final_score = torch.zeros(kp_num)
if cluster_preds.dim() == 2:
cluster_preds.unsqueeze_(0)
cluster_scores.unsqueeze_(0)
if mask.dim() == 1:
mask.unsqueeze_(0)
# Weighted Merge
masked_scores = cluster_scores.mul(mask.float().unsqueeze(-1))
normed_scores = masked_scores / torch.sum(masked_scores, dim=0)
final_pose = torch.mul(
cluster_preds, normed_scores.repeat(1, 1, 2)).sum(dim=0)
final_score = torch.mul(masked_scores, normed_scores).sum(dim=0)
return final_pose, final_score
def get_parametric_distance(i, all_preds, keypoint_scores, ref_dist):
pick_preds = all_preds[i]
pred_scores = keypoint_scores[i]
dist = torch.sqrt(torch.sum(
torch.pow(pick_preds[np.newaxis, :] - all_preds, 2),
dim=2
))
mask = (dist <= 1)
# Define a keypoints distance
score_dists = torch.zeros(all_preds.shape[0], 17)
keypoint_scores.squeeze_()
if keypoint_scores.dim() == 1:
keypoint_scores.unsqueeze_(0)
if pred_scores.dim() == 1:
pred_scores.unsqueeze_(1)
# The predicted scores are repeated up to do broadcast
pred_scores = pred_scores.repeat(1, all_preds.shape[0]).transpose(0, 1)
score_dists[mask] = torch.tanh(
pred_scores[mask] / delta1) * torch.tanh(keypoint_scores[mask] / delta1)
point_dist = torch.exp((-1) * dist / delta2)
final_dist = torch.sum(score_dists, dim=1) + mu * \
torch.sum(point_dist, dim=1)
return final_dist
def PCK_match(pick_pred, all_preds, ref_dist):
dist = torch.sqrt(torch.sum(
torch.pow(pick_pred[np.newaxis, :] - all_preds, 2),
dim=2
))
ref_dist = min(ref_dist, 7)
num_match_keypoints = torch.sum(
dist / ref_dist <= 1,
dim=1
)
return num_match_keypoints
def write_json(all_results, outputpath, for_eval=False):
'''
all_result: result dict of predictions
outputpath: output directory
'''
json_results = []
for im_res in all_results:
im_name = im_res['imgname']
for human in im_res['result']:
keypoints = []
result = {}
if for_eval:
result['image_id'] = int(im_name.split(
'/')[-1].split('.')[0].split('_')[-1])
else:
result['image_id'] = im_name.split('/')[-1]
result['category_id'] = 1
kp_preds = human['keypoints']
kp_scores = human['kp_score']
pro_scores = human['proposal_score']
for n in range(kp_scores.shape[0]):
keypoints.append(float(kp_preds[n, 0]))
keypoints.append(float(kp_preds[n, 1]))
keypoints.append(float(kp_scores[n]))
result['keypoints'] = keypoints
result['score'] = float(pro_scores)
json_results.append(result)
with open(os.path.join(outputpath, 'alphapose-results.json'), 'w') as json_file:
json_file.write(json.dumps(json_results))
| 9,924 | 30.60828 | 99 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/train_sppe/src/predict/annot/coco_minival.py | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import os
import h5py
import torch
import torch.utils.data as data
from train_sppe.src.utils.img import (load_image, cropBox)
from opt import opt
class Mscoco_minival(data.Dataset):
def __init__(self, annoSet='coco-minival-images-newnms/test-dev'):
self.img_folder = '../data/coco/images' # root image folders
self.annot = dict()
# Read in annotation information from hdf5 file
tags = ['xmin', 'ymin', 'xmax', 'ymax']
with h5py.File('./predict/annot/' + annoSet + '.h5', 'r') as a:
for tag in tags:
self.annot[tag] = a[tag][:]
# Load in image file names
with open('./predict/annot/' + annoSet + '_images.txt', 'r') as f:
self.images = f.readlines()
self.images = list(map(lambda x: x.strip('\n'), self.images))
assert len(self.images) == self.annot['xmin'].shape[0]
self.size = len(self.images)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
self.year = 2017
def __getitem__(self, index):
if self.year == 2014:
imgname = self.images[index]
else:
imgname = self.images[index].split('_')[2]
img_path = os.path.join(self.img_folder, imgname)
img = load_image(img_path)
ori_img = img.clone()
img[0].add_(-0.406)
img[1].add_(-0.457)
img[2].add_(-0.480)
imght = img.size(1)
imgwidth = img.size(2)
upLeft = torch.Tensor(
(float(self.annot['xmin'][index]), float(self.annot['ymin'][index])))
bottomRight = torch.Tensor(
(float(self.annot['xmax'][index]), float(self.annot['ymax'][index])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
inp = cropBox(img, upLeft, bottomRight, opt.inputResH, opt.inputResW)
ori_inp = cropBox(ori_img, upLeft, bottomRight,
opt.inputResH, opt.inputResW)
metaData = (
upLeft,
bottomRight,
ori_inp
)
box = torch.zeros(4)
box[0] = upLeft[0]
box[1] = upLeft[1]
box[2] = bottomRight[0]
box[3] = bottomRight[1]
return inp, box, imgname, metaData
def __len__(self):
return self.size
| 3,055 | 32.955556 | 85 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/video_demo.py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image, letterbox_image
import pandas as pd
import random
import pickle as pkl
import argparse
def get_test_input(input_dim, CUDA):
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Video Detection Module')
parser.add_argument("--video", dest = 'video', help =
"Video to run detection upon",
default = "video.avi", type = str)
parser.add_argument("--dataset", dest = "dataset", help = "Dataset on which the network has been trained", default = "pascal")
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3-spp.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3-spp.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
num_classes = 80
CUDA = torch.cuda.is_available()
bbox_attrs = 5 + num_classes
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda()
model(get_test_input(inp_dim, CUDA), CUDA)
model.eval()
videofile = args.video
cap = cv2.VideoCapture(videofile)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
im_dim = im_dim.cuda()
img = img.cuda()
with torch.no_grad():
output = model(Variable(img), CUDA)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
im_dim = im_dim.repeat(output.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| 5,807 | 30.058824 | 130 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/detect.py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
import argparse
import os
import os.path as osp
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image
import pandas as pd
import random
import pickle as pkl
import itertools
if __name__ == '__main__':
scales = "1,2,3"
images = "imgs/messi.jpg"
batch_size = 1
confidence = 0.5
nms_thesh = 0.4
CUDA = torch.cuda.is_available()
num_classes = 80
classes = load_classes('data/coco.names')
#Set up the neural network
print("Loading network.....")
model = Darknet("cfg/yolov3-spp.cfg")
model.load_weights("yolov3-spp.weights")
print("Network successfully loaded")
model.net_info["height"] = "608"
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
#If there's a GPU availible, put the model on GPU
if CUDA:
model.cuda()
#Set the model in evaluation mode
model.eval()
#Detection phase
try:
imlist = []
imlist.append(osp.join(osp.realpath('.'), images))
except FileNotFoundError:
print ("No file or directory with the name {}".format(images))
exit()
batches = list(map(prep_image, imlist, [inp_dim for x in range(len(imlist))]))
im_batches = [x[0] for x in batches]
orig_ims = [x[1] for x in batches]
im_dim_list = [x[2] for x in batches]
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
if CUDA:
im_dim_list = im_dim_list.cuda()
for batch in im_batches:
#load the image
if CUDA:
batch = batch.cuda()
with torch.no_grad():
prediction = model(Variable(batch), CUDA)
prediction = write_results(prediction, confidence, num_classes, nms=True, nms_conf=nms_thesh)
output = prediction
if CUDA:
torch.cuda.synchronize()
try:
output
except NameError:
print("No detections were made")
exit()
print(im_dim_list.shape)
im_dim_list = torch.index_select(im_dim_list, 0, output[:,0].long())
scaling_factor = torch.min(inp_dim/im_dim_list,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim_list[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim_list[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim_list[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim_list[i,1])
print(output)
print(output.shape)
| 2,727 | 25.230769 | 101 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/cam_demo.py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
def get_test_input(input_dim, CUDA):
img = cv2.imread("imgs/messi.jpg")
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo')
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "160", type = str)
return parser.parse_args()
if __name__ == '__main__':
cfgfile = "cfg/yolov3-spp.cfg"
weightsfile = "yolov3-spp.weights"
num_classes = 80
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
num_classes = 80
bbox_attrs = 5 + num_classes
model = Darknet(cfgfile)
model.load_weights(weightsfile)
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda()
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture(0)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
# im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
im_dim = im_dim.cuda()
img = img.cuda()
output = model(Variable(img), CUDA)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim))/inp_dim
# im_dim = im_dim.repeat(output.size(0), 1)
output[:,[1,3]] *= frame.shape[1]
output[:,[2,4]] *= frame.shape[0]
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| 4,682 | 26.710059 | 126 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/video_demo_half.py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image, letterbox_image
import pandas as pd
import random
import pickle as pkl
import argparse
def get_test_input(input_dim, CUDA):
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v2 Video Detection Module')
parser.add_argument("--video", dest = 'video', help =
"Video to run detection upon",
default = "video.avi", type = str)
parser.add_argument("--dataset", dest = "dataset", help = "Dataset on which the network has been trained", default = "pascal")
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3-spp.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3-spp.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
CUDA = torch.cuda.is_available()
num_classes = 80
bbox_attrs = 5 + num_classes
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda().half()
model(get_test_input(inp_dim, CUDA), CUDA)
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture(videofile)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
img = img.cuda().half()
im_dim = im_dim.half().cuda()
write_results = write_results_half
predict_transform = predict_transform_half
output = model(Variable(img, volatile = True), CUDA)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
im_dim = im_dim.repeat(output.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| 5,937 | 30.252632 | 130 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/bbox.py | from __future__ import division
import torch
import random
import numpy as np
import cv2
def confidence_filter(result, confidence):
conf_mask = (result[:,:,4] > confidence).float().unsqueeze(2)
result = result*conf_mask
return result
def confidence_filter_cls(result, confidence):
max_scores = torch.max(result[:,:,5:25], 2)[0]
res = torch.cat((result, max_scores),2)
print(res.shape)
cond_1 = (res[:,:,4] > confidence).float()
cond_2 = (res[:,:,25] > 0.995).float()
conf = cond_1 + cond_2
conf = torch.clamp(conf, 0.0, 1.0)
conf = conf.unsqueeze(2)
result = result*conf
return result
def get_abs_coord(box):
box[2], box[3] = abs(box[2]), abs(box[3])
x1 = (box[0] - box[2]/2) - 1
y1 = (box[1] - box[3]/2) - 1
x2 = (box[0] + box[2]/2) - 1
y2 = (box[1] + box[3]/2) - 1
return x1, y1, x2, y2
def sanity_fix(box):
if (box[0] > box[2]):
box[0], box[2] = box[2], box[0]
if (box[1] > box[3]):
box[1], box[3] = box[3], box[1]
return box
def bbox_iou(box1, box2):
"""
Returns the IoU of two bounding boxes
"""
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1,torch.zeros(inter_rect_x2.shape).cuda())*torch.max(inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).cuda())
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def pred_corner_coord(prediction):
#Get indices of non-zero confidence bboxes
ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()
box = prediction[ind_nz[0], ind_nz[1]]
box_a = box.new(box.shape)
box_a[:,0] = (box[:,0] - box[:,2]/2)
box_a[:,1] = (box[:,1] - box[:,3]/2)
box_a[:,2] = (box[:,0] + box[:,2]/2)
box_a[:,3] = (box[:,1] + box[:,3]/2)
box[:,:4] = box_a[:,:4]
prediction[ind_nz[0], ind_nz[1]] = box
return prediction
def write(x, batches, results, colors, classes):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = results[int(x[0])]
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
| 3,130 | 26.464912 | 187 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/util.py |
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import matplotlib.pyplot as plt
try:
from bbox import bbox_iou
except ImportError:
from yolo.bbox import bbox_iou
def count_parameters(model):
return sum(p.numel() for p in model.parameters())
def count_learnable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def convert2cpu(matrix):
if matrix.is_cuda:
return torch.FloatTensor(matrix.size()).copy_(matrix)
else:
return matrix
def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA = True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1,2).contiguous()
prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
#Sigmoid the centre_X, centre_Y. and object confidencce
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
#Add the center offsets
grid_len = np.arange(grid_size)
a,b = np.meshgrid(grid_len, grid_len)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
#log space transform height and the width
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors
#Softmax the class scores
prediction[:,:,5: 5 + num_classes] = torch.sigmoid((prediction[:,:, 5 : 5 + num_classes]))
prediction[:,:,:4] *= stride
return prediction
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def get_im_dim(im):
im = cv2.imread(im)
w,h = im.shape[1], im.shape[0]
return w,h
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
tensor_res = tensor.new(unique_tensor.shape)
tensor_res.copy_(unique_tensor)
return tensor_res
def dynamic_write_results(prediction, confidence, num_classes, nms=True, nms_conf=0.4):
prediction_bak = prediction.clone()
dets = write_results(prediction.clone(), confidence, num_classes, nms, nms_conf)
if isinstance(dets, int):
return dets
if dets.shape[0] > 100:
nms_conf -= 0.05
dets = write_results(prediction_bak.clone(), confidence, num_classes, nms, nms_conf)
return dets
def write_results(prediction, confidence, num_classes, nms=True, nms_conf=0.4):
conf_mask = (prediction[:, :, 4] > confidence).float().float().unsqueeze(2)
prediction = prediction * conf_mask
try:
ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()
except:
return 0
box_a = prediction.new(prediction.shape)
box_a[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)
box_a[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)
box_a[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2)
box_a[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)
prediction[:,:,:4] = box_a[:,:,:4]
batch_size = prediction.size(0)
output = prediction.new(1, prediction.size(2) + 1)
write = False
num = 0
for ind in range(batch_size):
#select the image from the batch
image_pred = prediction[ind]
#Get the class having maximum score, and the index of that class
#Get rid of num_classes softmax scores
#Add the class index and the class score of class having maximum score
max_conf, max_conf_score = torch.max(image_pred[:,5:5+ num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:,:5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
#Get rid of the zero entries
non_zero_ind = (torch.nonzero(image_pred[:,4]))
image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7)
#Get the various classes detected in the image
try:
img_classes = unique(image_pred_[:,-1])
except:
continue
#WE will do NMS classwise
#print(img_classes)
for cls in img_classes:
if cls != 0:
continue
#get the detections with one particular class
cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1,7)
#sort the detections such that the entry with the maximum objectness
#confidence is at the top
conf_sort_index = torch.sort(image_pred_class[:,4], descending = True )[1]
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0)
#if nms has to be done
if nms:
# Perform non-maximum suppression
max_detections = []
while image_pred_class.size(0):
# Get detection with highest confidence and save as max detection
max_detections.append(image_pred_class[0].unsqueeze(0))
# Stop if we're at the last detection
if len(image_pred_class) == 1:
break
# Get the IOUs for all boxes with lower confidence
ious = bbox_iou(max_detections[-1], image_pred_class[1:])
# Remove detections with IoU >= NMS threshold
image_pred_class = image_pred_class[1:][ious < nms_conf]
image_pred_class = torch.cat(max_detections).data
#Concatenate the batch_id of the image to the detection
#this helps us identify which image does the detection correspond to
#We use a linear straucture to hold ALL the detections from the batch
#the batch_dim is flattened
#batch is identified by extra batch column
batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
num += 1
if not num:
return 0
return output
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 00:12:16 2018
@author: ayooshmac
"""
def predict_transform_half(prediction, inp_dim, anchors, num_classes, CUDA = True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
grid_size = inp_dim // stride
prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1,2).contiguous()
prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
#Sigmoid the centre_X, centre_Y. and object confidencce
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
#Add the center offsets
grid_len = np.arange(grid_size)
a,b = np.meshgrid(grid_len, grid_len)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
if CUDA:
x_offset = x_offset.cuda().half()
y_offset = y_offset.cuda().half()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
#log space transform height and the width
anchors = torch.HalfTensor(anchors)
if CUDA:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors
#Softmax the class scores
prediction[:,:,5: 5 + num_classes] = nn.Softmax(-1)(Variable(prediction[:,:, 5 : 5 + num_classes])).data
prediction[:,:,:4] *= stride
return prediction
def write_results_half(prediction, confidence, num_classes, nms = True, nms_conf = 0.4):
conf_mask = (prediction[:,:,4] > confidence).half().unsqueeze(2)
prediction = prediction*conf_mask
try:
ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()
except:
return 0
box_a = prediction.new(prediction.shape)
box_a[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)
box_a[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)
box_a[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2)
box_a[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)
prediction[:,:,:4] = box_a[:,:,:4]
batch_size = prediction.size(0)
output = prediction.new(1, prediction.size(2) + 1)
write = False
for ind in range(batch_size):
#select the image from the batch
image_pred = prediction[ind]
#Get the class having maximum score, and the index of that class
#Get rid of num_classes softmax scores
#Add the class index and the class score of class having maximum score
max_conf, max_conf_score = torch.max(image_pred[:,5:5+ num_classes], 1)
max_conf = max_conf.half().unsqueeze(1)
max_conf_score = max_conf_score.half().unsqueeze(1)
seq = (image_pred[:,:5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
#Get rid of the zero entries
non_zero_ind = (torch.nonzero(image_pred[:,4]))
try:
image_pred_ = image_pred[non_zero_ind.squeeze(),:]
except:
continue
#Get the various classes detected in the image
img_classes = unique(image_pred_[:,-1].long()).half()
#WE will do NMS classwise
for cls in img_classes:
#get the detections with one particular class
cls_mask = image_pred_*(image_pred_[:,-1] == cls).half().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()
image_pred_class = image_pred_[class_mask_ind]
#sort the detections such that the entry with the maximum objectness
#confidence is at the top
conf_sort_index = torch.sort(image_pred_class[:,4], descending = True )[1]
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0)
#if nms has to be done
if nms:
#For each detection
for i in range(idx):
#Get the IOUs of all boxes that come after the one we are looking at
#in the loop
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])
except ValueError:
break
except IndexError:
break
#Zero out all the detections that have IoU > treshhold
iou_mask = (ious < nms_conf).half().unsqueeze(1)
image_pred_class[i+1:] *= iou_mask
#Remove the non-zero entries
non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind]
#Concatenate the batch_id of the image to the detection
#this helps us identify which image does the detection correspond to
#We use a linear straucture to hold ALL the detections from the batch
#the batch_dim is flattened
#batch is identified by extra batch column
batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
return output
| 13,284 | 33.239691 | 108 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/preprocess.py | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import matplotlib.pyplot as plt
try:
from util import count_parameters as count
from util import convert2cpu as cpu
except ImportError:
from yolo.util import count_parameters as count
from yolo.util import convert2cpu as cpu
from PIL import Image, ImageDraw
def letterbox_image(img, inp_dim):
'''resize image with unchanged aspect ratio using padding'''
img_w, img_h = img.shape[1], img.shape[0]
w, h = inp_dim
new_w = int(img_w * min(w / img_w, h / img_h))
new_h = int(img_h * min(w / img_w, h / img_h))
resized_image = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
canvas = np.full((inp_dim[1], inp_dim[0], 3), 128)
canvas[(h - new_h) // 2:(h - new_h) // 2 + new_h, (w - new_w) // 2:(w - new_w) // 2 + new_w, :] = resized_image
return canvas
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = cv2.imread(img)
shape = orig_im.shape
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:, :, ::-1].transpose((2, 0, 1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def prep_frame(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:, :, ::-1].transpose((2, 0, 1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def prep_image_pil(img, network_dim):
orig_im = Image.open(img)
img = orig_im.convert('RGB')
dim = img.size
img = img.resize(network_dim)
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(*network_dim, 3).transpose(0, 1).transpose(0, 2).contiguous()
img = img.view(1, 3, *network_dim)
img = img.float().div(255.0)
return (img, orig_im, dim)
def inp_to_image(inp):
inp = inp.cpu().squeeze()
inp = inp * 255
try:
inp = inp.data.numpy()
except RuntimeError:
inp = inp.numpy()
inp = inp.transpose(1, 2, 0)
inp = inp[:, :, ::-1]
return inp
| 2,474 | 27.125 | 115 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/yolo/darknet.py | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import matplotlib.pyplot as plt
try:
from util import count_parameters as count
from util import convert2cpu as cpu
from util import predict_transform
except ImportError:
from yolo.util import count_parameters as count
from yolo.util import convert2cpu as cpu
from yolo.util import predict_transform
class test_net(nn.Module):
def __init__(self, num_layers, input_size):
super(test_net, self).__init__()
self.num_layers= num_layers
self.linear_1 = nn.Linear(input_size, 5)
self.middle = nn.ModuleList([nn.Linear(5,5) for x in range(num_layers)])
self.output = nn.Linear(5,2)
def forward(self, x):
x = x.view(-1)
fwd = nn.Sequential(self.linear_1, *self.middle, self.output)
return fwd(x)
def get_test_input():
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (416,416))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
return img_
def parse_cfg(cfgfile):
"""
Takes a configuration file
Returns a list of blocks. Each blocks describes a block in the neural
network to be built. Block is represented as a dictionary in the list
"""
file = open(cfgfile, 'r')
lines = file.read().split('\n') #store the lines in a list
lines = [x for x in lines if len(x) > 0] #get read of the empty lines
lines = [x for x in lines if x[0] != '#']
lines = [x.rstrip().lstrip() for x in lines]
block = {}
blocks = []
for line in lines:
if line[0] == "[": #This marks the start of a new block
if len(block) != 0:
blocks.append(block)
block = {}
block["type"] = line[1:-1].rstrip()
else:
key,value = line.split("=")
block[key.rstrip()] = value.lstrip()
blocks.append(block)
return blocks
# print('\n\n'.join([repr(x) for x in blocks]))
import pickle as pkl
class MaxPoolStride1(nn.Module):
def __init__(self, kernel_size):
super(MaxPoolStride1, self).__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def forward(self, x):
padding = int(self.pad / 2)
#padded_x = F.pad(x, (0,self.pad,0,self.pad), mode="replicate")
#pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)
#padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode="replicate")
padded_x = F.pad(x, (padding, padding, padding, padding), mode="constant", value=0)
pooled_x = nn.MaxPool2d(self.kernel_size, 1)(padded_x)
return pooled_x
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
class DetectionLayer(nn.Module):
def __init__(self, anchors):
super(DetectionLayer, self).__init__()
self.anchors = anchors
def forward(self, x, inp_dim, num_classes, confidence):
x = x.data
global CUDA
prediction = x
prediction = predict_transform(prediction, inp_dim, self.anchors, num_classes, confidence, CUDA)
return prediction
class Upsample(nn.Module):
def __init__(self, stride=2):
super(Upsample, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert(x.data.dim() == 4)
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
ws = stride
hs = stride
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H*stride, W*stride)
return x
#
class ReOrgLayer(nn.Module):
def __init__(self, stride = 2):
super(ReOrgLayer, self).__init__()
self.stride= stride
def forward(self,x):
assert(x.data.dim() == 4)
B,C,H,W = x.data.shape
hs = self.stride
ws = self.stride
assert(H % hs == 0), "The stride " + str(self.stride) + " is not a proper divisor of height " + str(H)
assert(W % ws == 0), "The stride " + str(self.stride) + " is not a proper divisor of height " + str(W)
x = x.view(B,C, H // hs, hs, W // ws, ws).transpose(-2,-3).contiguous()
x = x.view(B,C, H // hs * W // ws, hs, ws)
x = x.view(B,C, H // hs * W // ws, hs*ws).transpose(-1,-2).contiguous()
x = x.view(B, C, ws*hs, H // ws, W // ws).transpose(1,2).contiguous()
x = x.view(B, C*ws*hs, H // ws, W // ws)
return x
def create_modules(blocks):
net_info = blocks[0] #Captures the information about the input and pre-processing
module_list = nn.ModuleList()
index = 0 #indexing blocks helps with implementing route layers (skip connections)
prev_filters = 3
output_filters = []
for x in blocks:
module = nn.Sequential()
if (x["type"] == "net"):
continue
#If it's a convolutional layer
if (x["type"] == "convolutional"):
#Get the info about the layer
activation = x["activation"]
try:
batch_normalize = int(x["batch_normalize"])
bias = False
except:
batch_normalize = 0
bias = True
filters= int(x["filters"])
padding = int(x["pad"])
kernel_size = int(x["size"])
stride = int(x["stride"])
if padding:
pad = (kernel_size - 1) // 2
else:
pad = 0
#Add the convolutional layer
conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=bias)
module.add_module("conv_{0}".format(index), conv)
#Add the Batch Norm Layer
if batch_normalize:
bn = nn.BatchNorm2d(filters)
module.add_module("batch_norm_{0}".format(index), bn)
#Check the activation.
#It is either Linear or a Leaky ReLU for YOLO
if activation == "leaky":
activn = nn.LeakyReLU(0.1, inplace = True)
module.add_module("leaky_{0}".format(index), activn)
#If it's an upsampling layer
#We use Bilinear2dUpsampling
elif (x["type"] == "upsample"):
stride = int(x["stride"])
# upsample = Upsample(stride)
upsample = nn.Upsample(scale_factor = 2, mode = "nearest")
module.add_module("upsample_{}".format(index), upsample)
#If it is a route layer
elif (x["type"] == "route"):
x["layers"] = x["layers"].split(',')
#Start of a route
start = int(x["layers"][0])
if len(x["layers"]) <= 2:
#end, if there exists one.
try:
end = int(x["layers"][1])
except:
end = 0
#Positive anotation
if start > 0:
start = start - index
if end > 0:
end = end - index
route = EmptyLayer()
module.add_module("route_{0}".format(index), route)
if end < 0:
filters = output_filters[index + start] + output_filters[index + end]
else:
filters= output_filters[index + start]
else: #SPP-route
assert len(x["layers"]) == 4
round = EmptyLayer()
module.add_module("route_{0}".format(index), route)
filters = output_filters[index + start] + output_filters[index + int(x["layers"][1])] \
+ output_filters[index + int(x["layers"][2])] + output_filters[index + int(x["layers"][3])]
#shortcut corresponds to skip connection
elif x["type"] == "shortcut":
from_ = int(x["from"])
shortcut = EmptyLayer()
module.add_module("shortcut_{}".format(index), shortcut)
elif x["type"] == "maxpool":
stride = int(x["stride"])
size = int(x["size"])
if stride != 1:
maxpool = nn.MaxPool2d(size, stride)
else:
maxpool = MaxPoolStride1(size)
#maxpool = nn.MaxPool2d(size, stride=1, padding=size-1)
module.add_module("maxpool_{}".format(index), maxpool)
#Yolo is the detection layer
elif x["type"] == "yolo":
mask = x["mask"].split(",")
mask = [int(x) for x in mask]
anchors = x["anchors"].split(",")
anchors = [int(a) for a in anchors]
anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]
anchors = [anchors[i] for i in mask]
detection = DetectionLayer(anchors)
module.add_module("Detection_{}".format(index), detection)
else:
print("Something I dunno")
assert False
module_list.append(module)
prev_filters = filters
output_filters.append(filters)
index += 1
return (net_info, module_list)
class Darknet(nn.Module):
def __init__(self, cfgfile):
super(Darknet, self).__init__()
self.blocks = parse_cfg(cfgfile)
self.net_info, self.module_list = create_modules(self.blocks)
self.header = torch.IntTensor([0,0,0,0])
self.seen = 0
def get_blocks(self):
return self.blocks
def get_module_list(self):
return self.module_list
def forward(self, x, CUDA):
detections = []
modules = self.blocks[1:]
outputs = {} #We cache the outputs for the route layer
write = 0
for i in range(len(modules)):
module_type = (modules[i]["type"])
if module_type == "convolutional" or module_type == "upsample" or module_type == "maxpool":
x = self.module_list[i](x)
outputs[i] = x
elif module_type == "route":
layers = modules[i]["layers"]
layers = [int(a) for a in layers]
if (layers[0]) > 0:
layers[0] = layers[0] - i
if len(layers) == 1:
x = outputs[i + (layers[0])]
elif len(layers) == 2:
if (layers[1]) > 0:
layers[1] = layers[1] - i
map1 = outputs[i + layers[0]]
map2 = outputs[i + layers[1]]
x = torch.cat((map1, map2), 1)
elif len(layers) == 4: # SPP
map1 = outputs[i + layers[0]]
map2 = outputs[i + layers[1]]
map3 = outputs[i + layers[2]]
map4 = outputs[i + layers[3]]
x = torch.cat((map1, map2, map3, map4), 1)
outputs[i] = x
elif module_type == "shortcut":
from_ = int(modules[i]["from"])
x = outputs[i-1] + outputs[i+from_]
outputs[i] = x
elif module_type == 'yolo':
anchors = self.module_list[i][0].anchors
#Get the input dimensions
inp_dim = int (self.net_info["height"])
#Get the number of classes
num_classes = int (modules[i]["classes"])
#Output the result
x = x.data
x = predict_transform(x, inp_dim, anchors, num_classes, CUDA)
if type(x) == int:
continue
if not write:
detections = x
write = 1
else:
detections = torch.cat((detections, x), 1)
outputs[i] = outputs[i-1]
try:
return detections
except:
return 0
def load_weights(self, weightfile):
#Open the weights file
fp = open(weightfile, "rb")
#The first 4 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4. IMages seen
header = np.fromfile(fp, dtype = np.int32, count = 5)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
#The rest of the values are the weights
# Let's load them up
weights = np.fromfile(fp, dtype = np.float32)
ptr = 0
for i in range(len(self.module_list)):
module_type = self.blocks[i + 1]["type"]
if module_type == "convolutional":
model = self.module_list[i]
try:
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
#Get the number of weights of Batch Norm Layer
num_bn_biases = bn.bias.numel()
#Load the weights
bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases])
ptr += num_bn_biases
bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
#Cast the loaded weights into dims of model weights.
bn_biases = bn_biases.view_as(bn.bias.data)
bn_weights = bn_weights.view_as(bn.weight.data)
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn_running_var = bn_running_var.view_as(bn.running_var)
#Copy the data to model
bn.bias.data.copy_(bn_biases)
bn.weight.data.copy_(bn_weights)
bn.running_mean.copy_(bn_running_mean)
bn.running_var.copy_(bn_running_var)
else:
#Number of biases
num_biases = conv.bias.numel()
#Load the weights
conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases])
ptr = ptr + num_biases
#reshape the loaded weights according to the dims of the model weights
conv_biases = conv_biases.view_as(conv.bias.data)
#Finally copy the data
conv.bias.data.copy_(conv_biases)
#Let us load the weights for the Convolutional layers
num_weights = conv.weight.numel()
#Do the same as above for weights
conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])
ptr = ptr + num_weights
conv_weights = conv_weights.view_as(conv.weight.data)
conv.weight.data.copy_(conv_weights)
def save_weights(self, savedfile, cutoff = 0):
if cutoff <= 0:
cutoff = len(self.blocks) - 1
fp = open(savedfile, 'wb')
# Attach the header at the top of the file
self.header[3] = self.seen
header = self.header
header = header.numpy()
header.tofile(fp)
# Now, let us save the weights
for i in range(len(self.module_list)):
module_type = self.blocks[i+1]["type"]
if (module_type) == "convolutional":
model = self.module_list[i]
try:
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
#If the parameters are on GPU, convert them back to CPU
#We don't convert the parameter to GPU
#Instead. we copy the parameter and then convert it to CPU
#This is done as weight are need to be saved during training
cpu(bn.bias.data).numpy().tofile(fp)
cpu(bn.weight.data).numpy().tofile(fp)
cpu(bn.running_mean).numpy().tofile(fp)
cpu(bn.running_var).numpy().tofile(fp)
else:
cpu(conv.bias.data).numpy().tofile(fp)
#Let us save the weights for the Convolutional layers
cpu(conv.weight.data).numpy().tofile(fp)
#
#dn = Darknet('cfg/yolov3.cfg')
#dn.load_weights("yolov3.weights")
#inp = get_test_input()
#a, interms = dn(inp)
#dn.eval()
#a_i, interms_i = dn(inp)
| 18,505 | 32.708561 | 117 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/opt.py | import argparse
import torch
parser = argparse.ArgumentParser(description='PyTorch AlphaPose Training')
"----------------------------- General options -----------------------------"
parser.add_argument('--expID', default='default', type=str,
help='Experiment ID')
parser.add_argument('--dataset', default='coco', type=str,
help='Dataset choice: mpii | coco')
parser.add_argument('--nThreads', default=30, type=int,
help='Number of data loading threads')
parser.add_argument('--debug', default=False, type=bool,
help='Print the debug information')
parser.add_argument('--snapshot', default=1, type=int,
help='How often to take a snapshot of the model (0 = never)')
"----------------------------- AlphaPose options -----------------------------"
parser.add_argument('--addDPG', default=False, type=bool,
help='Train with data augmentation')
"----------------------------- Model options -----------------------------"
parser.add_argument('--netType', default='hgPRM', type=str,
help='Options: hgPRM | resnext')
parser.add_argument('--loadModel', default=None, type=str,
help='Provide full path to a previously trained model')
parser.add_argument('--Continue', default=False, type=bool,
help='Pick up where an experiment left off')
parser.add_argument('--nFeats', default=256, type=int,
help='Number of features in the hourglass')
parser.add_argument('--nClasses', default=17, type=int,
help='Number of output channel')
parser.add_argument('--nStack', default=8, type=int,
help='Number of hourglasses to stack')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--LR', default=2.5e-4, type=float,
help='Learning rate')
parser.add_argument('--momentum', default=0, type=float,
help='Momentum')
parser.add_argument('--weightDecay', default=0, type=float,
help='Weight decay')
parser.add_argument('--crit', default='MSE', type=str,
help='Criterion type')
parser.add_argument('--optMethod', default='rmsprop', type=str,
help='Optimization method: rmsprop | sgd | nag | adadelta')
"----------------------------- Training options -----------------------------"
parser.add_argument('--nEpochs', default=50, type=int,
help='Number of hourglasses to stack')
parser.add_argument('--epoch', default=0, type=int,
help='Current epoch')
parser.add_argument('--trainBatch', default=40, type=int,
help='Train-batch size')
parser.add_argument('--validBatch', default=20, type=int,
help='Valid-batch size')
parser.add_argument('--trainIters', default=0, type=int,
help='Total train iters')
parser.add_argument('--valIters', default=0, type=int,
help='Total valid iters')
parser.add_argument('--init', default=None, type=str,
help='Initialization')
"----------------------------- Data options -----------------------------"
parser.add_argument('--inputResH', default=384, type=int,
help='Input image height')
parser.add_argument('--inputResW', default=320, type=int,
help='Input image width')
parser.add_argument('--outputResH', default=96, type=int,
help='Output heatmap height')
parser.add_argument('--outputResW', default=80, type=int,
help='Output heatmap width')
parser.add_argument('--scale', default=0.25, type=float,
help='Degree of scale augmentation')
parser.add_argument('--rotate', default=30, type=float,
help='Degree of rotation augmentation')
parser.add_argument('--hmGauss', default=1, type=int,
help='Heatmap gaussian size')
"----------------------------- PyraNet options -----------------------------"
parser.add_argument('--baseWidth', default=9, type=int,
help='Heatmap gaussian size')
parser.add_argument('--cardinality', default=5, type=int,
help='Heatmap gaussian size')
parser.add_argument('--nResidual', default=1, type=int,
help='Number of residual modules at each location in the pyranet')
"----------------------------- Distribution options -----------------------------"
parser.add_argument('--dist', dest='dist', type=int, default=1,
help='distributed training or not')
parser.add_argument('--backend', dest='backend', type=str, default='gloo',
help='backend for distributed training')
parser.add_argument('--port', dest='port',
help='port of server')
opt = parser.parse_args()
if opt.Continue:
opt = torch.load("../exp/{}/{}/option.pkl".format(opt.dataset, opt.expID))
opt.Continue = True
opt.nEpochs = 50
print("--- Continue ---")
| 5,063 | 48.165049 | 86 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/main_fast_inference.py | import sys
import torch
import torch._utils
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
from SPPE.src.models.FastPose import createModel
from SPPE.src.utils.img import flip, shuffleLR
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
class InferenNet(nn.Module):
def __init__(self, kernel_size, dataset):
super(InferenNet, self).__init__()
model = createModel().cuda()
print('Loading pose model from {}'.format('joints_detectors/Alphapose/models/sppe/duc_se.pth'))
sys.stdout.flush()
model.load_state_dict(torch.load('joints_detectors/Alphapose/models/sppe/duc_se.pth'))
model.eval()
self.pyranet = model
self.dataset = dataset
def forward(self, x):
out = self.pyranet(x)
out = out.narrow(1, 0, 17)
flip_out = self.pyranet(flip(x))
flip_out = flip_out.narrow(1, 0, 17)
flip_out = flip(shuffleLR(
flip_out, self.dataset))
out = (flip_out + out) / 2
return out
class InferenNet_fast(nn.Module):
def __init__(self, kernel_size, dataset):
super(InferenNet_fast, self).__init__()
model = createModel().cuda()
print('Loading pose model from {}'.format('joints_detectors/Alphapose/models/sppe/duc_se.pth'))
model.load_state_dict(torch.load('joints_detectors/Alphapose/models/sppe/duc_se.pth'))
model.eval()
self.pyranet = model
self.dataset = dataset
def forward(self, x):
out = self.pyranet(x)
out = out.narrow(1, 0, 17)
return out
| 1,979 | 28.117647 | 103 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/hg-prm.py | import torch.nn as nn
from .layers.PRM import Residual as ResidualPyramid
from .layers.Residual import Residual as Residual
from torch.autograd import Variable
from opt import opt
from collections import defaultdict
class Hourglass(nn.Module):
def __init__(self, n, nFeats, nModules, inputResH, inputResW, net_type, B, C):
super(Hourglass, self).__init__()
self.ResidualUp = ResidualPyramid if n >= 2 else Residual
self.ResidualDown = ResidualPyramid if n >= 3 else Residual
self.depth = n
self.nModules = nModules
self.nFeats = nFeats
self.net_type = net_type
self.B = B
self.C = C
self.inputResH = inputResH
self.inputResW = inputResW
self.up1 = self._make_residual(self.ResidualUp, False, inputResH, inputResW)
self.low1 = nn.Sequential(
nn.MaxPool2d(2),
self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
)
if n > 1:
self.low2 = Hourglass(n - 1, nFeats, nModules, inputResH / 2, inputResW / 2, net_type, B, C)
else:
self.low2 = self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
self.low3 = self._make_residual(self.ResidualDown, True, inputResH / 2, inputResW / 2)
self.up2 = nn.UpsamplingNearest2d(scale_factor=2)
self.upperBranch = self.up1
self.lowerBranch = nn.Sequential(
self.low1,
self.low2,
self.low3,
self.up2
)
def _make_residual(self, resBlock, useConv, inputResH, inputResW):
layer_list = []
for i in range(self.nModules):
layer_list.append(resBlock(self.nFeats, self.nFeats, inputResH, inputResW,
stride=1, net_type=self.net_type, useConv=useConv,
baseWidth=self.B, cardinality=self.C))
return nn.Sequential(*layer_list)
def forward(self, x: Variable):
up1 = self.upperBranch(x)
up2 = self.lowerBranch(x)
out = up1 + up2
return out
class PyraNet(nn.Module):
def __init__(self):
super(PyraNet, self).__init__()
B, C = opt.baseWidth, opt.cardinality
self.inputResH = opt.inputResH / 4
self.inputResW = opt.inputResW / 4
self.nStack = opt.nStack
self.cnv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(True)
)
self.r1 = nn.Sequential(
ResidualPyramid(64, 128, opt.inputResH / 2, opt.inputResW / 2,
stride=1, net_type='no_preact', useConv=False, baseWidth=B, cardinality=C),
nn.MaxPool2d(2)
)
self.r4 = ResidualPyramid(128, 128, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
self.r5 = ResidualPyramid(128, opt.nFeats, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
self.preact = nn.Sequential(
self.cnv1,
self.r1,
self.r4,
self.r5
)
self.stack_layers = defaultdict(list)
for i in range(self.nStack):
hg = Hourglass(4, opt.nFeats, opt.nResidual, self.inputResH, self.inputResW, 'preact', B, C)
lin = nn.Sequential(
hg,
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True),
nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True)
)
tmpOut = nn.Conv2d(opt.nFeats, opt.nClasses, kernel_size=1, stride=1, padding=0)
self.stack_layers['lin'].append(lin)
self.stack_layers['out'].append(tmpOut)
if i < self.nStack - 1:
lin_ = nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0)
tmpOut_ = nn.Conv2d(opt.nClasses, opt.nFeats, kernel_size=1, stride=1, padding=0)
self.stack_layers['lin_'].append(lin_)
self.stack_layers['out_'].append(tmpOut_)
def forward(self, x: Variable):
out = []
inter = self.preact(x)
for i in range(self.nStack):
lin = self.stack_layers['lin'][i](inter)
tmpOut = self.stack_layers['out'][i](lin)
out.append(tmpOut)
if i < self.nStack - 1:
lin_ = self.stack_layers['lin_'][i](lin)
tmpOut_ = self.stack_layers['out_'][i](tmpOut)
inter = inter + lin_ + tmpOut_
return out
def createModel(**kw):
model = PyraNet()
return model
| 4,899 | 37.582677 | 105 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/FastPose.py | import torch.nn as nn
from torch.autograd import Variable
from .layers.SE_Resnet import SEResnet
from .layers.DUC import DUC
from opt import opt
def createModel():
return FastPose()
class FastPose(nn.Module):
DIM = 128
def __init__(self):
super(FastPose, self).__init__()
self.preact = SEResnet('resnet101')
self.suffle1 = nn.PixelShuffle(2)
self.duc1 = DUC(512, 1024, upscale_factor=2)
self.duc2 = DUC(256, 512, upscale_factor=2)
self.conv_out = nn.Conv2d(
self.DIM, opt.nClasses, kernel_size=3, stride=1, padding=1)
def forward(self, x: Variable):
out = self.preact(x)
out = self.suffle1(out)
out = self.duc1(out)
out = self.duc2(out)
out = self.conv_out(out)
return out
| 808 | 21.472222 | 71 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/hgPRM.py | import torch.nn as nn
from .layers.PRM import Residual as ResidualPyramid
from .layers.Residual import Residual as Residual
from torch.autograd import Variable
import torch
from opt import opt
import math
class Hourglass(nn.Module):
def __init__(self, n, nFeats, nModules, inputResH, inputResW, net_type, B, C):
super(Hourglass, self).__init__()
self.ResidualUp = ResidualPyramid if n >= 2 else Residual
self.ResidualDown = ResidualPyramid if n >= 3 else Residual
self.depth = n
self.nModules = nModules
self.nFeats = nFeats
self.net_type = net_type
self.B = B
self.C = C
self.inputResH = inputResH
self.inputResW = inputResW
up1 = self._make_residual(self.ResidualUp, False, inputResH, inputResW)
low1 = nn.Sequential(
nn.MaxPool2d(2),
self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
)
if n > 1:
low2 = Hourglass(n - 1, nFeats, nModules, inputResH / 2, inputResW / 2, net_type, B, C)
else:
low2 = self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
low3 = self._make_residual(self.ResidualDown, True, inputResH / 2, inputResW / 2)
up2 = nn.UpsamplingNearest2d(scale_factor=2)
self.upperBranch = up1
self.lowerBranch = nn.Sequential(
low1,
low2,
low3,
up2
)
def _make_residual(self, resBlock, useConv, inputResH, inputResW):
layer_list = []
for i in range(self.nModules):
layer_list.append(resBlock(self.nFeats, self.nFeats, inputResH, inputResW,
stride=1, net_type=self.net_type, useConv=useConv,
baseWidth=self.B, cardinality=self.C))
return nn.Sequential(*layer_list)
def forward(self, x: Variable):
up1 = self.upperBranch(x)
up2 = self.lowerBranch(x)
# out = up1 + up2
out = torch.add(up1, up2)
return out
class PyraNet(nn.Module):
def __init__(self):
super(PyraNet, self).__init__()
B, C = opt.baseWidth, opt.cardinality
self.inputResH = opt.inputResH / 4
self.inputResW = opt.inputResW / 4
self.nStack = opt.nStack
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 3))
cnv1 = nn.Sequential(
conv1,
nn.BatchNorm2d(64),
nn.ReLU(True)
)
r1 = nn.Sequential(
ResidualPyramid(64, 128, opt.inputResH / 2, opt.inputResW / 2,
stride=1, net_type='no_preact', useConv=False, baseWidth=B, cardinality=C),
nn.MaxPool2d(2)
)
r4 = ResidualPyramid(128, 128, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
r5 = ResidualPyramid(128, opt.nFeats, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
self.preact = nn.Sequential(
cnv1,
r1,
r4,
r5
)
self.stack_lin = nn.ModuleList()
self.stack_out = nn.ModuleList()
self.stack_lin_ = nn.ModuleList()
self.stack_out_ = nn.ModuleList()
for i in range(self.nStack):
hg = Hourglass(4, opt.nFeats, opt.nResidual, self.inputResH, self.inputResW, 'preact', B, C)
conv1 = nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2))
lin = nn.Sequential(
hg,
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True),
conv1,
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True)
)
tmpOut = nn.Conv2d(opt.nFeats, opt.nClasses, kernel_size=1, stride=1, padding=0)
if opt.init:
nn.init.xavier_normal(tmpOut.weight)
self.stack_lin.append(lin)
self.stack_out.append(tmpOut)
if i < self.nStack - 1:
lin_ = nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0)
tmpOut_ = nn.Conv2d(opt.nClasses, opt.nFeats, kernel_size=1, stride=1, padding=0)
if opt.init:
nn.init.xavier_normal(lin_.weight)
nn.init.xavier_normal(tmpOut_.weight)
self.stack_lin_.append(lin_)
self.stack_out_.append(tmpOut_)
def forward(self, x: Variable):
out = []
inter = self.preact(x)
for i in range(self.nStack):
lin = self.stack_lin[i](inter)
tmpOut = self.stack_out[i](lin)
out.append(tmpOut)
if i < self.nStack - 1:
lin_ = self.stack_lin_[i](lin)
tmpOut_ = self.stack_out_[i](tmpOut)
inter = inter + lin_ + tmpOut_
return out
class PyraNet_Inference(nn.Module):
def __init__(self):
super(PyraNet_Inference, self).__init__()
B, C = opt.baseWidth, opt.cardinality
self.inputResH = opt.inputResH / 4
self.inputResW = opt.inputResW / 4
self.nStack = opt.nStack
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 3))
cnv1 = nn.Sequential(
conv1,
nn.BatchNorm2d(64),
nn.ReLU(True)
)
r1 = nn.Sequential(
ResidualPyramid(64, 128, opt.inputResH / 2, opt.inputResW / 2,
stride=1, net_type='no_preact', useConv=False, baseWidth=B, cardinality=C),
nn.MaxPool2d(2)
)
r4 = ResidualPyramid(128, 128, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
r5 = ResidualPyramid(128, opt.nFeats, self.inputResH, self.inputResW,
stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C)
self.preact = nn.Sequential(
cnv1,
r1,
r4,
r5
)
self.stack_lin = nn.ModuleList()
self.stack_out = nn.ModuleList()
self.stack_lin_ = nn.ModuleList()
self.stack_out_ = nn.ModuleList()
for i in range(self.nStack):
hg = Hourglass(4, opt.nFeats, opt.nResidual,
self.inputResH, self.inputResW, 'preact', B, C)
conv1 = nn.Conv2d(opt.nFeats, opt.nFeats,
kernel_size=1, stride=1, padding=0)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2))
lin = nn.Sequential(
hg,
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True),
conv1,
nn.BatchNorm2d(opt.nFeats),
nn.ReLU(True)
)
tmpOut = nn.Conv2d(opt.nFeats, opt.nClasses,
kernel_size=1, stride=1, padding=0)
if opt.init:
nn.init.xavier_normal(tmpOut.weight)
self.stack_lin.append(lin)
self.stack_out.append(tmpOut)
if i < self.nStack - 1:
lin_ = nn.Conv2d(opt.nFeats, opt.nFeats,
kernel_size=1, stride=1, padding=0)
tmpOut_ = nn.Conv2d(opt.nClasses, opt.nFeats,
kernel_size=1, stride=1, padding=0)
if opt.init:
nn.init.xavier_normal(lin_.weight)
nn.init.xavier_normal(tmpOut_.weight)
self.stack_lin_.append(lin_)
self.stack_out_.append(tmpOut_)
def forward(self, x: Variable):
inter = self.preact(x)
for i in range(self.nStack):
lin = self.stack_lin[i](inter)
tmpOut = self.stack_out[i](lin)
out = tmpOut
if i < self.nStack - 1:
lin_ = self.stack_lin_[i](lin)
tmpOut_ = self.stack_out_[i](tmpOut)
inter = inter + lin_ + tmpOut_
return out
def createModel(**kw):
model = PyraNet()
return model
def createModel_Inference(**kw):
model = PyraNet_Inference()
return model
| 8,687 | 35.658228 | 104 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/Resnet.py | import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, stride=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu(out, inplace=True)
return out
class ResNet(nn.Module):
""" Resnet """
def __init__(self, architecture):
super(ResNet, self).__init__()
assert architecture in ["resnet50", "resnet101"]
self.inplanes = 64
self.layers = [3, 4, {"resnet50": 6, "resnet101": 23}[architecture], 3]
self.block = Bottleneck
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.01, affine=True)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.layer1 = self.make_layer(self.block, 64, self.layers[0])
self.layer2 = self.make_layer(self.block, 128, self.layers[1], stride=2)
self.layer3 = self.make_layer(self.block, 256, self.layers[2], stride=2)
self.layer4 = self.make_layer(
self.block, 512, self.layers[3], stride=2)
def forward(self, x):
x = self.maxpool(self.relu(self.bn1(self.conv1(x))))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def stages(self):
return [self.layer1, self.layer2, self.layer3, self.layer4]
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
| 2,957 | 34.638554 | 99 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/util_models.py | import torch
import torch.nn as nn
from torch.autograd import Variable
class ConcatTable(nn.Module):
def __init__(self, module_list=None):
super(ConcatTable, self).__init__()
self.modules_list = nn.ModuleList(module_list)
def forward(self, x: Variable):
y = []
for i in range(len(self.modules_list)):
y.append(self.modules_list[i](x))
return y
def add(self, module):
self.modules_list.append(module)
class CaddTable(nn.Module):
def __init__(self, inplace=False):
super(CaddTable, self).__init__()
self.inplace = inplace
def forward(self, x: Variable or list):
return torch.stack(x, 0).sum(0)
class Identity(nn.Module):
def __init__(self, params=None):
super(Identity, self).__init__()
self.params = nn.ParameterList(params)
def forward(self, x: Variable or list):
return x
| 920 | 23.236842 | 54 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/SE_module.py | from torch import nn
class SELayer(nn.Module):
def __init__(self, channel, reduction=1):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
| 552 | 26.65 | 53 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/Residual.py | import torch.nn as nn
import math
from .util_models import ConcatTable, CaddTable, Identity
from opt import opt
def Residual(numIn, numOut, *arg, stride=1, net_type='preact', useConv=False, **kw):
con = ConcatTable([convBlock(numIn, numOut, stride, net_type),
skipLayer(numIn, numOut, stride, useConv)])
cadd = CaddTable(True)
return nn.Sequential(con, cadd)
def convBlock(numIn, numOut, stride, net_type):
s_list = []
if net_type != 'no_preact':
s_list.append(nn.BatchNorm2d(numIn))
s_list.append(nn.ReLU(True))
conv1 = nn.Conv2d(numIn, numOut // 2, kernel_size=1)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2))
s_list.append(conv1)
s_list.append(nn.BatchNorm2d(numOut // 2))
s_list.append(nn.ReLU(True))
conv2 = nn.Conv2d(numOut // 2, numOut // 2, kernel_size=3, stride=stride, padding=1)
if opt.init:
nn.init.xavier_normal(conv2.weight)
s_list.append(conv2)
s_list.append(nn.BatchNorm2d(numOut // 2))
s_list.append(nn.ReLU(True))
conv3 = nn.Conv2d(numOut // 2, numOut, kernel_size=1)
if opt.init:
nn.init.xavier_normal(conv3.weight)
s_list.append(conv3)
return nn.Sequential(*s_list)
def skipLayer(numIn, numOut, stride, useConv):
if numIn == numOut and stride == 1 and not useConv:
return Identity()
else:
conv1 = nn.Conv2d(numIn, numOut, kernel_size=1, stride=stride)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2))
return nn.Sequential(
nn.BatchNorm2d(numIn),
nn.ReLU(True),
conv1
)
| 1,684 | 29.636364 | 88 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/PRM.py | import torch.nn as nn
from .util_models import ConcatTable, CaddTable, Identity
import math
from opt import opt
class Residual(nn.Module):
def __init__(self, numIn, numOut, inputResH, inputResW, stride=1,
net_type='preact', useConv=False, baseWidth=9, cardinality=4):
super(Residual, self).__init__()
self.con = ConcatTable([convBlock(numIn, numOut, inputResH,
inputResW, net_type, baseWidth, cardinality, stride),
skipLayer(numIn, numOut, stride, useConv)])
self.cadd = CaddTable(True)
def forward(self, x):
out = self.con(x)
out = self.cadd(out)
return out
def convBlock(numIn, numOut, inputResH, inputResW, net_type, baseWidth, cardinality, stride):
numIn = int(numIn)
numOut = int(numOut)
addTable = ConcatTable()
s_list = []
if net_type != 'no_preact':
s_list.append(nn.BatchNorm2d(numIn))
s_list.append(nn.ReLU(True))
conv1 = nn.Conv2d(numIn, numOut // 2, kernel_size=1)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2))
s_list.append(conv1)
s_list.append(nn.BatchNorm2d(numOut // 2))
s_list.append(nn.ReLU(True))
conv2 = nn.Conv2d(numOut // 2, numOut // 2,
kernel_size=3, stride=stride, padding=1)
if opt.init:
nn.init.xavier_normal(conv2.weight)
s_list.append(conv2)
s = nn.Sequential(*s_list)
addTable.add(s)
D = math.floor(numOut // baseWidth)
C = cardinality
s_list = []
if net_type != 'no_preact':
s_list.append(nn.BatchNorm2d(numIn))
s_list.append(nn.ReLU(True))
conv1 = nn.Conv2d(numIn, D, kernel_size=1, stride=stride)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / C))
s_list.append(conv1)
s_list.append(nn.BatchNorm2d(D))
s_list.append(nn.ReLU(True))
s_list.append(pyramid(D, C, inputResH, inputResW))
s_list.append(nn.BatchNorm2d(D))
s_list.append(nn.ReLU(True))
a = nn.Conv2d(D, numOut // 2, kernel_size=1)
a.nBranchIn = C
if opt.init:
nn.init.xavier_normal(a.weight, gain=math.sqrt(1 / C))
s_list.append(a)
s = nn.Sequential(*s_list)
addTable.add(s)
elewiswAdd = nn.Sequential(
addTable,
CaddTable(False)
)
conv2 = nn.Conv2d(numOut // 2, numOut, kernel_size=1)
if opt.init:
nn.init.xavier_normal(conv2.weight, gain=math.sqrt(1 / 2))
model = nn.Sequential(
elewiswAdd,
nn.BatchNorm2d(numOut // 2),
nn.ReLU(True),
conv2
)
return model
def pyramid(D, C, inputResH, inputResW):
pyraTable = ConcatTable()
sc = math.pow(2, 1 / C)
for i in range(C):
scaled = 1 / math.pow(sc, i + 1)
conv1 = nn.Conv2d(D, D, kernel_size=3, stride=1, padding=1)
if opt.init:
nn.init.xavier_normal(conv1.weight)
s = nn.Sequential(
nn.FractionalMaxPool2d(2, output_ratio=(scaled, scaled)),
conv1,
nn.UpsamplingBilinear2d(size=(int(inputResH), int(inputResW))))
pyraTable.add(s)
pyra = nn.Sequential(
pyraTable,
CaddTable(False)
)
return pyra
class skipLayer(nn.Module):
def __init__(self, numIn, numOut, stride, useConv):
super(skipLayer, self).__init__()
self.identity = False
if numIn == numOut and stride == 1 and not useConv:
self.identity = True
else:
conv1 = nn.Conv2d(numIn, numOut, kernel_size=1, stride=stride)
if opt.init:
nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2))
self.m = nn.Sequential(
nn.BatchNorm2d(numIn),
nn.ReLU(True),
conv1
)
def forward(self, x):
if self.identity:
return x
else:
return self.m(x)
| 3,974 | 28.227941 | 95 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/SE_Resnet.py | import torch.nn as nn
from .SE_module import SELayer
import torch.nn.functional as F
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
if reduction:
self.se = SELayer(planes * 4)
self.reduc = reduction
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
out = self.conv3(out)
out = self.bn3(out)
if self.reduc:
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu(out)
return out
class SEResnet(nn.Module):
""" SEResnet """
def __init__(self, architecture):
super(SEResnet, self).__init__()
assert architecture in ["resnet50", "resnet101"]
self.inplanes = 64
self.layers = [3, 4, {"resnet50": 6, "resnet101": 23}[architecture], 3]
self.block = Bottleneck
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,
stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.01, affine=True)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self.make_layer(self.block, 64, self.layers[0])
self.layer2 = self.make_layer(
self.block, 128, self.layers[1], stride=2)
self.layer3 = self.make_layer(
self.block, 256, self.layers[2], stride=2)
self.layer4 = self.make_layer(
self.block, 512, self.layers[3], stride=2)
def forward(self, x):
x = self.maxpool(self.relu(self.bn1(self.conv1(x)))) # 64 * h/4 * w/4
x = self.layer1(x) # 256 * h/4 * w/4
x = self.layer2(x) # 512 * h/8 * w/8
x = self.layer3(x) # 1024 * h/16 * w/16
x = self.layer4(x) # 2048 * h/32 * w/32
return x
def stages(self):
return [self.layer1, self.layer2, self.layer3, self.layer4]
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
if downsample is not None:
layers.append(block(self.inplanes, planes, stride, downsample, reduction=True))
else:
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
| 3,491 | 33.92 | 91 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/models/layers/DUC.py | import torch.nn as nn
import torch.nn.functional as F
class DUC(nn.Module):
'''
INPUT: inplanes, planes, upscale_factor
OUTPUT: (planes // 4)* ht * wd
'''
def __init__(self, inplanes, planes, upscale_factor=2):
super(DUC, self).__init__()
self.conv = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, bias=False)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pixel_shuffle(x)
return x
| 639 | 25.666667 | 85 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/utils/img.py | import numpy as np
import cv2
import torch
import scipy.misc
from torchvision import transforms
import torch.nn.functional as F
from scipy.ndimage import maximum_filter
from PIL import Image
from copy import deepcopy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def torch_to_im(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # C*H*W
return img
def load_image(img_path):
# H x W x C => C x H x W
return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def drawCircle(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g[g > 0] = 1
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def drawGaussian(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def drawBigCircle(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g[g > 0.4] = 1
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def drawSmallCircle(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g[g > 0.5] = 1
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = torch.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
# Move to center
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt.int()
def transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):
center = np.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = (pt * lenH) / resH
_pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])
new_point = np.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
def transformBoxInvert_batch(pt, ul, br, inpH, inpW, resH, resW):
'''
pt: [n, 17, 2]
ul: [n, 2]
br: [n, 2]
'''
center = (br - 1 - ul) / 2
size = br - ul
size[:, 0] *= (inpH / inpW)
lenH, _ = torch.max(size, dim=1) # [n,]
lenW = lenH * (inpW / inpH)
_pt = (pt * lenH[:, np.newaxis, np.newaxis]) / resH
_pt[:, :, 0] = _pt[:, :, 0] - ((lenW[:, np.newaxis].repeat(1, 17) - 1) /
2 - center[:, 0].unsqueeze(-1).repeat(1, 17)).clamp(min=0)
_pt[:, :, 1] = _pt[:, :, 1] - ((lenH[:, np.newaxis].repeat(1, 17) - 1) /
2 - center[:, 1].unsqueeze(-1).repeat(1, 17)).clamp(min=0)
new_point = torch.zeros(pt.size())
new_point[:, :, 0] = _pt[:, :, 0] + ul[:, 0].unsqueeze(-1).repeat(1, 17)
new_point[:, :, 1] = _pt[:, :, 1] + ul[:, 1].unsqueeze(-1).repeat(1, 17)
return new_point
def cropBox(img, ul, br, resH, resW):
ul = ul.int()
br = (br - 1).int()
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.dim() == 2:
img = img[np.newaxis, :]
box_shape = [(br[1] - ul[1]).item(), (br[0] - ul[0]).item()]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
if ul[1] > 0:
img[:, :ul[1], :] = 0
if ul[0] > 0:
img[:, :, :ul[0]] = 0
if br[1] < img.shape[1] - 1:
img[:, br[1] + 1:, :] = 0
if br[0] < img.shape[2] - 1:
img[:, :, br[0] + 1:] = 0
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array(
[ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array(
[br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def cv_rotate(img, rot, resW, resH):
center = np.array((resW - 1, resH - 1)) / 2
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def flip(x):
assert (x.dim() == 3 or x.dim() == 4)
dim = x.dim() - 1
if '0.4.1' in torch.__version__ or '1.0' in torch.__version__:
return x.flip(dims=(dim,))
else:
is_cuda = False
if x.is_cuda:
is_cuda = True
x = x.cpu()
x = x.numpy().copy()
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(
np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
# x = x.swapaxes(dim, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, dim)
x = torch.from_numpy(x.copy())
if is_cuda:
x = x.cuda()
return x
def shuffleLR(x, dataset):
flipRef = dataset.flipRef
assert (x.dim() == 3 or x.dim() == 4)
for pair in flipRef:
dim0, dim1 = pair
dim0 -= 1
dim1 -= 1
if x.dim() == 4:
tmp = x[:, dim1].clone()
x[:, dim1] = x[:, dim0].clone()
x[:, dim0] = tmp.clone()
#x[:, dim0], x[:, dim1] = deepcopy((x[:, dim1], x[:, dim0]))
else:
tmp = x[dim1].clone()
x[dim1] = x[dim0].clone()
x[dim0] = tmp.clone()
#x[dim0], x[dim1] = deepcopy((x[dim1], x[dim0]))
return x
def drawMPII(inps, preds):
assert inps.dim() == 4
p_color = ['g', 'b', 'purple', 'b', 'purple',
'y', 'o', 'y', 'o', 'y', 'o',
'pink', 'r', 'pink', 'r', 'pink', 'r']
p_color = ['r', 'r', 'r', 'b', 'b', 'b',
'black', 'black', 'black', 'black',
'y', 'y', 'white', 'white', 'g', 'g']
nImg = inps.size(0)
imgs = []
for n in range(nImg):
img = to_numpy(inps[n])
img = np.transpose(img, (1, 2, 0))
imgs.append(img)
fig = plt.figure()
plt.imshow(imgs[0])
ax = fig.add_subplot(1, 1, 1)
#print(preds.shape)
for p in range(16):
x, y = preds[0][p]
cor = (round(x), round(y)), 10
ax.add_patch(plt.Circle(*cor, color=p_color[p]))
plt.axis('off')
plt.show()
return imgs
def drawCOCO(inps, preds, scores):
assert inps.dim() == 4
p_color = ['g', 'b', 'purple', 'b', 'purple',
'y', 'orange', 'y', 'orange', 'y', 'orange',
'pink', 'r', 'pink', 'r', 'pink', 'r']
nImg = inps.size(0)
imgs = []
for n in range(nImg):
img = to_numpy(inps[n])
img = np.transpose(img, (1, 2, 0))
imgs.append(img)
fig = plt.figure()
plt.imshow(imgs[0])
ax = fig.add_subplot(1, 1, 1)
#print(preds.shape)
for p in range(17):
if scores[0][p][0] < 0.2:
continue
x, y = preds[0][p]
cor = (round(x), round(y)), 3
ax.add_patch(plt.Circle(*cor, color=p_color[p]))
plt.axis('off')
plt.show()
return imgs
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def findPeak(hm):
mx = maximum_filter(hm, size=5)
idx = zip(*np.where((mx == hm) * (hm > 0.1)))
candidate_points = []
for (y, x) in idx:
candidate_points.append([x, y, hm[y][x]])
if len(candidate_points) == 0:
return torch.zeros(0)
candidate_points = np.array(candidate_points)
candidate_points = candidate_points[np.lexsort(-candidate_points.T)]
return torch.Tensor(candidate_points)
def processPeaks(candidate_points, hm, pt1, pt2, inpH, inpW, resH, resW):
# type: (Tensor, Tensor, Tensor, Tensor, float, float, float, float) -> List[Tensor]
if candidate_points.shape[0] == 0: # Low Response
maxval = np.max(hm.reshape(1, -1), 1)
idx = np.argmax(hm.reshape(1, -1), 1)
x = idx % resW
y = int(idx / resW)
candidate_points = np.zeros((1, 3))
candidate_points[0, 0:1] = x
candidate_points[0, 1:2] = y
candidate_points[0, 2:3] = maxval
res_pts = []
for i in range(candidate_points.shape[0]):
x, y, maxval = candidate_points[i][0], candidate_points[i][1], candidate_points[i][2]
if bool(maxval < 0.05) and len(res_pts) > 0:
pass
else:
if bool(x > 0) and bool(x < resW - 2):
if bool(hm[int(y)][int(x) + 1] - hm[int(y)][int(x) - 1] > 0):
x += 0.25
elif bool(hm[int(y)][int(x) + 1] - hm[int(y)][int(x) - 1] < 0):
x -= 0.25
if bool(y > 0) and bool(y < resH - 2):
if bool(hm[int(y) + 1][int(x)] - hm[int(y) - 1][int(x)] > 0):
y += (0.25 * inpH / inpW)
elif bool(hm[int(y) + 1][int(x)] - hm[int(y) - 1][int(x)] < 0):
y -= (0.25 * inpH / inpW)
#pt = torch.zeros(2)
pt = np.zeros(2)
pt[0] = x + 0.2
pt[1] = y + 0.2
pt = transformBoxInvert(pt, pt1, pt2, inpH, inpW, resH, resW)
res_pt = np.zeros(3)
res_pt[:2] = pt
res_pt[2] = maxval
res_pts.append(res_pt)
if maxval < 0.05:
break
return res_pts
| 15,424 | 29.973896 | 93 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/utils/pose.py | from .img import (load_image, drawGaussian, drawBigCircle, drawSmallCircle, cv_rotate,
cropBox, transformBox, transformBoxInvert, flip, shuffleLR, drawCOCO)
from .eval import getPrediction
import torch
import numpy as np
import random
from opt import opt
def rnd(x):
return max(-2 * x, min(2 * x, np.random.randn(1)[0] * x))
def generateSampleBox(img_path, bndbox, part, nJoints, imgset, scale_factor, dataset, train=True):
nJoints_coco = 17
nJoints_mpii = 16
img = load_image(img_path)
if train:
img[0].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
ori_img = img.clone()
img[0].add_(-0.406)
img[1].add_(-0.457)
img[2].add_(-0.480)
upLeft = torch.Tensor((int(bndbox[0][0]), int(bndbox[0][1])))
bottomRight = torch.Tensor((int(bndbox[0][2]), int(bndbox[0][3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
imght = img.shape[1]
imgwidth = img.shape[2]
scaleRate = random.uniform(*scale_factor)
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2)
bottomRight[1] = min(imght - 1, bottomRight[1] + ht * scaleRate / 2)
# Doing Random Sample
if opt.addDPG:
PatchScale = random.uniform(0, 1)
if PatchScale > 0.85:
ratio = ht / width
if (width < ht):
patchWidth = PatchScale * width
patchHt = patchWidth * ratio
else:
patchHt = PatchScale * ht
patchWidth = patchHt / ratio
xmin = upLeft[0] + random.uniform(0, 1) * (width - patchWidth)
ymin = upLeft[1] + random.uniform(0, 1) * (ht - patchHt)
xmax = xmin + patchWidth + 1
ymax = ymin + patchHt + 1
else:
xmin = max(1, min(upLeft[0] + np.random.normal(-0.0142, 0.1158) * width, imgwidth - 3))
ymin = max(1, min(upLeft[1] + np.random.normal(0.0043, 0.068) * ht, imght - 3))
xmax = min(max(xmin + 2, bottomRight[0] + np.random.normal(0.0154, 0.1337) * width), imgwidth - 3)
ymax = min(max(ymin + 2, bottomRight[1] + np.random.normal(-0.0013, 0.0711) * ht), imght - 3)
upLeft[0] = xmin
upLeft[1] = ymin
bottomRight[0] = xmax
bottomRight[1] = ymax
# Counting Joints number
jointNum = 0
if imgset == 'coco':
for i in range(17):
if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
jointNum += 1
else:
for i in range(16):
if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
jointNum += 1
# Doing Random Crop
if opt.addDPG:
if jointNum > 13 and train:
switch = random.uniform(0, 1)
if switch > 0.96:
bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.92:
upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.88:
upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
elif switch > 0.84:
upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.80:
bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
elif switch > 0.76:
upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
elif switch > 0.72:
bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
elif switch > 0.68:
upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
ori_inp = cropBox(ori_img, upLeft, bottomRight, opt.inputResH, opt.inputResW)
inp = cropBox(img, upLeft, bottomRight, opt.inputResH, opt.inputResW)
if jointNum == 0:
inp = torch.zeros(3, opt.inputResH, opt.inputResW)
out_bigcircle = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
out_smallcircle = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
out = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
setMask = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
# Draw Label
if imgset == 'coco':
for i in range(nJoints_coco):
if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
out_bigcircle[i] = drawBigCircle(out_bigcircle[i], transformBox(part[i], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss * 2)
out_smallcircle[i] = drawSmallCircle(out_smallcircle[i], transformBox(part[i], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss)
out[i] = drawGaussian(out[i], transformBox(part[i], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss)
setMask[i].add_(1)
elif imgset == 'mpii':
for i in range(nJoints_coco, nJoints_coco + nJoints_mpii):
if part[i - nJoints_coco][0] > 0 and part[i - nJoints_coco][0] > upLeft[0] and part[i - nJoints_coco][1] > upLeft[1] \
and part[i - nJoints_coco][0] < bottomRight[0] and part[i - nJoints_coco][1] < bottomRight[1]:
out_bigcircle[i] = drawBigCircle(out_bigcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss * 2)
out_smallcircle[i] = drawSmallCircle(out_smallcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss)
out[i] = drawGaussian(out[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss)
setMask[i].add_(1)
else:
for i in range(nJoints_coco, nJoints_coco + nJoints_mpii):
if part[i - nJoints_coco][0] > 0 and part[i - nJoints_coco][0] > upLeft[0] and part[i - nJoints_coco][1] > upLeft[1] \
and part[i - nJoints_coco][0] < bottomRight[0] and part[i - nJoints_coco][1] < bottomRight[1]:
out_bigcircle[i] = drawBigCircle(out_bigcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss * 2)
out_smallcircle[i] = drawSmallCircle(out_smallcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss)
out[i] = drawGaussian(out[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss)
if i != 6 + nJoints_coco and i != 7 + nJoints_coco:
setMask[i].add_(1)
if opt.debug:
preds_hm, preds_img, preds_scores = getPrediction(out.unsqueeze(0), upLeft.unsqueeze(0), bottomRight.unsqueeze(0), opt.inputResH,
opt.inputResW, opt.outputResH, opt.outputResW)
tmp_preds = preds_hm.mul(opt.inputResH / opt.outputResH)
drawCOCO(ori_inp.unsqueeze(0), tmp_preds, preds_scores)
if train:
# Flip
if random.uniform(0, 1) < 0.5:
inp = flip(inp)
ori_inp = flip(ori_inp)
out_bigcircle = shuffleLR(flip(out_bigcircle), dataset)
out_smallcircle = shuffleLR(flip(out_smallcircle), dataset)
out = shuffleLR(flip(out), dataset)
# Rotate
r = rnd(opt.rotate)
if random.uniform(0, 1) < 0.6:
r = 0
if r != 0:
inp = cv_rotate(inp, r, opt.inputResW, opt.inputResH)
out_bigcircle = cv_rotate(out_bigcircle, r, opt.outputResW, opt.outputResH)
out_smallcircle = cv_rotate(out_smallcircle, r, opt.outputResW, opt.outputResH)
out = cv_rotate(out, r, opt.outputResW, opt.outputResH)
return inp, out_bigcircle, out_smallcircle, out, setMask
| 8,728 | 50.347059 | 206 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/utils/eval.py | from opt import opt
try:
from utils.img import transformBoxInvert, transformBoxInvert_batch, findPeak, processPeaks
except ImportError:
from SPPE.src.utils.img import transformBoxInvert, transformBoxInvert_batch, findPeak, processPeaks
import torch
class DataLogger(object):
def __init__(self):
self.clear()
def clear(self):
self.value = 0
self.sum = 0
self.cnt = 0
self.avg = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.cnt += n
self._cal_avg()
def _cal_avg(self):
self.avg = self.sum / self.cnt
def accuracy(output, label, dataset):
if type(output) == list:
return accuracy(output[opt.nStack - 1], label[opt.nStack - 1], dataset)
else:
return heatmapAccuracy(output.cpu().data, label.cpu().data, dataset.accIdxs)
def heatmapAccuracy(output, label, idxs):
preds = getPreds(output)
gt = getPreds(label)
norm = torch.ones(preds.size(0)) * opt.outputResH / 10
dists = calc_dists(preds, gt, norm)
#print(dists)
acc = torch.zeros(len(idxs) + 1)
avg_acc = 0
cnt = 0
for i in range(len(idxs)):
acc[i + 1] = dist_acc(dists[idxs[i] - 1])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
if cnt != 0:
acc[0] = avg_acc / cnt
return acc
def getPreds(hm):
''' get predictions from score maps in torch Tensor
return type: torch.LongTensor
'''
assert hm.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(hm.view(hm.size(0), hm.size(1), -1), 2)
maxval = maxval.view(hm.size(0), hm.size(1), 1)
idx = idx.view(hm.size(0), hm.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % hm.size(3)
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / hm.size(3))
# pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
# preds *= pred_mask
return preds
def calc_dists(preds, target, normalize):
preds = preds.float().clone()
target = target.float().clone()
dists = torch.zeros(preds.size(1), preds.size(0))
for n in range(preds.size(0)):
for c in range(preds.size(1)):
if target[n, c, 0] > 0 and target[n, c, 1] > 0:
dists[c, n] = torch.dist(
preds[n, c, :], target[n, c, :]) / normalize[n]
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
if dists.ne(-1).sum() > 0:
return dists.le(thr).eq(dists.ne(-1)).float().sum() * 1.0 / dists.ne(-1).float().sum()
else:
return - 1
def postprocess(output):
p = getPreds(output)
for i in range(p.size(0)):
for j in range(p.size(1)):
hm = output[i][j]
pX, pY = int(round(p[i][j][0])), int(round(p[i][j][1]))
if 0 < pX < opt.outputResW - 1 and 0 < pY < opt.outputResH - 1:
diff = torch.Tensor((hm[pY][pX + 1] - hm[pY][pX - 1], hm[pY + 1][pX] - hm[pY - 1][pX]))
p[i][j] += diff.sign() * 0.25
p -= 0.5
return p
def getPrediction(hms, pt1, pt2, inpH, inpW, resH, resW):
'''
Get keypoint location from heatmaps
'''
assert hms.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(hms.view(hms.size(0), hms.size(1), -1), 2)
maxval = maxval.view(hms.size(0), hms.size(1), 1)
idx = idx.view(hms.size(0), hms.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % hms.size(3)
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / hms.size(3))
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
# Very simple post-processing step to improve performance at tight PCK thresholds
for i in range(preds.size(0)):
for j in range(preds.size(1)):
hm = hms[i][j]
pX, pY = int(round(float(preds[i][j][0]))), int(round(float(preds[i][j][1])))
if 0 < pX < opt.outputResW - 1 and 0 < pY < opt.outputResH - 1:
diff = torch.Tensor(
(hm[pY][pX + 1] - hm[pY][pX - 1], hm[pY + 1][pX] - hm[pY - 1][pX]))
preds[i][j] += diff.sign() * 0.25
preds += 0.2
preds_tf = torch.zeros(preds.size())
preds_tf = transformBoxInvert_batch(preds, pt1, pt2, inpH, inpW, resH, resW)
return preds, preds_tf, maxval
def getMultiPeakPrediction(hms, pt1, pt2, inpH, inpW, resH, resW):
assert hms.dim() == 4, 'Score maps should be 4-dim'
preds_img = {}
hms = hms.numpy()
for n in range(hms.shape[0]): # Number of samples
preds_img[n] = {} # Result of sample: n
for k in range(hms.shape[1]): # Number of keypoints
preds_img[n][k] = [] # Result of keypoint: k
hm = hms[n][k]
candidate_points = findPeak(hm)
res_pt = processPeaks(candidate_points, hm,
pt1[n], pt2[n], inpH, inpW, resH, resW)
preds_img[n][k] = res_pt
return preds_img
def getPrediction_batch(hms, pt1, pt2, inpH, inpW, resH, resW):
'''
Get keypoint location from heatmaps
pt1, pt2: [n, 2]
OUTPUT:
preds: [n, 17, 2]
'''
assert hms.dim() == 4, 'Score maps should be 4-dim'
flat_hms = hms.view(hms.size(0), hms.size(1), -1)
maxval, idx = torch.max(flat_hms, 2)
maxval = maxval.view(hms.size(0), hms.size(1), 1)
idx = idx.view(hms.size(0), hms.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % hms.size(3)
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / hms.size(3))
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
# Very simple post-processing step to improve performance at tight PCK thresholds
idx_up = (idx - hms.size(3)).clamp(0, flat_hms.size(2) - 1)
idx_down = (idx + hms.size(3)).clamp(0, flat_hms.size(2) - 1)
idx_left = (idx - 1).clamp(0, flat_hms.size(2) - 1)
idx_right = (idx + 1).clamp(0, flat_hms.size(2) - 1)
maxval_up = flat_hms.gather(2, idx_up)
maxval_down = flat_hms.gather(2, idx_down)
maxval_left = flat_hms.gather(2, idx_left)
maxval_right = flat_hms.gather(2, idx_right)
diff1 = (maxval_right - maxval_left).sign() * 0.25
diff2 = (maxval_down - maxval_up).sign() * 0.25
diff1[idx_up <= hms.size(3)] = 0
diff1[idx_down / hms.size(3) >= (hms.size(3) - 1)] = 0
diff2[(idx_left % hms.size(3)) == 0] = 0
diff2[(idx_left % hms.size(3)) == (hms.size(3) - 1)] = 0
preds[:, :, 0] += diff1.squeeze(-1)
preds[:, :, 1] += diff2.squeeze(-1)
preds_tf = torch.zeros(preds.size())
preds_tf = transformBoxInvert_batch(preds, pt1, pt2, inpH, inpW, resH, resW)
return preds, preds_tf, maxval
| 6,955 | 30.618182 | 103 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/utils/dataset/mpii.py | import os
import h5py
from functools import reduce
import torch.utils.data as data
from ..pose import generateSampleBox
from opt import opt
class Mpii(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=0.25, rot_factor=30, label_type='Gaussian'):
self.img_folder = '../data/mpii/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = 320
self.inputResW = 256
self.outputResH = 80
self.outputResW = 64
self.sigma = sigma
self.scale_factor = (0.2, 0.3)
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_mpii = 16
self.nJoints = 16
self.accIdxs = (1, 2, 3, 4, 5, 6,
11, 12, 15, 16)
self.flipRef = ((1, 6), (2, 5), (3, 4),
(11, 16), (12, 15), (13, 14))
# create train/val split
with h5py.File('../data/mpii/annot_mpii.h5', 'r') as annot:
# train
self.imgname_mpii_train = annot['imgname'][:-1358]
self.bndbox_mpii_train = annot['bndbox'][:-1358]
self.part_mpii_train = annot['part'][:-1358]
# val
self.imgname_mpii_val = annot['imgname'][-1358:]
self.bndbox_mpii_val = annot['bndbox'][-1358:]
self.part_mpii_val = annot['part'][-1358:]
self.size_train = self.imgname_mpii_train.shape[0]
self.size_val = self.imgname_mpii_val.shape[0]
self.train, self.valid = [], []
def __getitem__(self, index):
sf = self.scale_factor
if self.is_train:
part = self.part_mpii_train[index]
bndbox = self.bndbox_mpii_train[index]
imgname = self.imgname_mpii_train[index]
else:
part = self.part_mpii_val[index]
bndbox = self.bndbox_mpii_val[index]
imgname = self.imgname_mpii_val[index]
imgname = reduce(lambda x, y: x + y, map(lambda x: chr(int(x)), imgname))[:13]
img_path = os.path.join(self.img_folder, imgname)
metaData = generateSampleBox(img_path, bndbox, part, self.nJoints,
'mpii', sf, self, train=self.is_train)
inp, out_bigcircle, out_smallcircle, out, setMask = metaData
label = []
for i in range(opt.nStack):
if i < 2:
#label.append(out_bigcircle.clone())
label.append(out.clone())
elif i < 4:
#label.append(out_smallcircle.clone())
label.append(out.clone())
else:
label.append(out.clone())
return inp, label, setMask
def __len__(self):
if self.is_train:
return self.size_train
else:
return self.size_val
| 2,870 | 32.776471 | 86 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/utils/dataset/fuse.py | import os
import h5py
from functools import reduce
import torch.utils.data as data
from ..pose import generateSampleBox
from opt import opt
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=0.25, rot_factor=30, label_type='Gaussian'):
self.img_folder = '../data/' # root image folders
self.is_train = train # training set or test set
self.inputResH = 320
self.inputResW = 256
self.outputResH = 80
self.outputResW = 64
self.sigma = sigma
self.scale_factor = (0.2, 0.3)
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8, # COCO
9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, # MPII
28, 29, 32, 33)
self.flipRef = ((2, 3), (4, 5), (6, 7), # COCO
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17),
(18, 23), (19, 22), (20, 21), # MPII
(28, 33), (29, 32), (30, 31))
'''
Create train/val split
'''
# COCO
with h5py.File('../data/coco/annot_clean.h5', 'r') as annot:
# train
self.imgname_coco_train = annot['imgname'][:-5887]
self.bndbox_coco_train = annot['bndbox'][:-5887]
self.part_coco_train = annot['part'][:-5887]
# val
self.imgname_coco_val = annot['imgname'][-5887:]
self.bndbox_coco_val = annot['bndbox'][-5887:]
self.part_coco_val = annot['part'][-5887:]
# MPII
with h5py.File('../data/mpii/annot_mpii.h5', 'r') as annot:
# train
self.imgname_mpii_train = annot['imgname'][:-1358]
self.bndbox_mpii_train = annot['bndbox'][:-1358]
self.part_mpii_train = annot['part'][:-1358]
# val
self.imgname_mpii_val = annot['imgname'][-1358:]
self.bndbox_mpii_val = annot['bndbox'][-1358:]
self.part_mpii_val = annot['part'][-1358:]
self.size_coco_train = self.imgname_coco_train.shape[0]
self.size_coco_val = self.imgname_coco_val.shape[0]
self.size_train = self.imgname_coco_train.shape[0] + self.imgname_mpii_train.shape[0]
self.size_val = self.imgname_coco_val.shape[0] + self.imgname_mpii_val.shape[0]
self.train, self.valid = [], []
def __getitem__(self, index):
sf = self.scale_factor
if self.is_train and index < self.size_coco_train: # COCO
part = self.part_coco_train[index]
bndbox = self.bndbox_coco_train[index]
imgname = self.imgname_coco_train[index]
imgset = 'coco'
elif self.is_train: # MPII
part = self.part_mpii_train[index - self.size_coco_train]
bndbox = self.bndbox_mpii_train[index - self.size_coco_train]
imgname = self.imgname_mpii_train[index - self.size_coco_train]
imgset = 'mpii'
elif index < self.size_coco_val:
part = self.part_coco_val[index]
bndbox = self.bndbox_coco_val[index]
imgname = self.imgname_coco_val[index]
imgset = 'coco'
else:
part = self.part_mpii_val[index - self.size_coco_val]
bndbox = self.bndbox_mpii_val[index - self.size_coco_val]
imgname = self.imgname_mpii_val[index - self.size_coco_val]
imgset = 'mpii'
if imgset == 'coco':
imgname = reduce(lambda x, y: x + y, map(lambda x: chr(int(x)), imgname))
else:
imgname = reduce(lambda x, y: x + y, map(lambda x: chr(int(x)), imgname))[:13]
img_path = os.path.join(self.img_folder, imgset, 'images', imgname)
metaData = generateSampleBox(img_path, bndbox, part, self.nJoints,
imgset, sf, self, train=self.is_train)
inp, out_bigcircle, out_smallcircle, out, setMask = metaData
label = []
for i in range(opt.nStack):
if i < 2:
# label.append(out_bigcircle.clone())
label.append(out.clone())
elif i < 4:
# label.append(out_smallcircle.clone())
label.append(out.clone())
else:
label.append(out.clone())
return inp, label, setMask, imgset
def __len__(self):
if self.is_train:
return self.size_train
else:
return self.size_val
| 4,753 | 37.650407 | 93 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/joints_detectors/Alphapose/SPPE/src/utils/dataset/coco.py | import os
import h5py
from functools import reduce
import torch.utils.data as data
from ..pose import generateSampleBox
from opt import opt
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
# create train/val split
with h5py.File('../data/coco/annot_clean.h5', 'r') as annot:
# train
self.imgname_coco_train = annot['imgname'][:-5887]
self.bndbox_coco_train = annot['bndbox'][:-5887]
self.part_coco_train = annot['part'][:-5887]
# val
self.imgname_coco_val = annot['imgname'][-5887:]
self.bndbox_coco_val = annot['bndbox'][-5887:]
self.part_coco_val = annot['part'][-5887:]
self.size_train = self.imgname_coco_train.shape[0]
self.size_val = self.imgname_coco_val.shape[0]
def __getitem__(self, index):
sf = self.scale_factor
if self.is_train:
part = self.part_coco_train[index]
bndbox = self.bndbox_coco_train[index]
imgname = self.imgname_coco_train[index]
else:
part = self.part_coco_val[index]
bndbox = self.bndbox_coco_val[index]
imgname = self.imgname_coco_val[index]
imgname = reduce(lambda x, y: x + y, map(lambda x: chr(int(x)), imgname))
img_path = os.path.join(self.img_folder, imgname)
metaData = generateSampleBox(img_path, bndbox, part, self.nJoints,
'coco', sf, self, train=self.is_train)
inp, out_bigcircle, out_smallcircle, out, setMask = metaData
label = []
for i in range(opt.nStack):
if i < 2:
# label.append(out_bigcircle.clone())
label.append(out.clone())
elif i < 4:
# label.append(out_smallcircle.clone())
label.append(out.clone())
else:
label.append(out.clone())
return inp, label, setMask, 'coco'
def __len__(self):
if self.is_train:
return self.size_train
else:
return self.size_val
| 2,988 | 33.755814 | 81 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/common/camera.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
from common.quaternion import qrot, qinverse
from common.utils import wrap
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X / w * 2 - [1, h / w]
def normalize_screen_coordinates_new(X, w, h):
assert X.shape[-1] == 2
return (X - (w / 2, h / 2)) / (w / 2, h / 2)
def image_coordinates_new(X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X * (w / 2, h / 2)) + (w / 2, h / 2)
def image_coordinates(X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h / w]) * w / 2
def world_to_camera(X, R, t):
Rt = wrap(qinverse, R) # Invert rotation
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - t) # Rotate and translate
def camera_to_world(X, R, t):
return wrap(qrot, np.tile(R, (*X.shape[:-1], 1)), X) + t
def project_to_2d(X, camera_params):
"""
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script.
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
focal length / principal point / radial_distortion / tangential_distortion
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2] # focal lendgth
c = camera_params[..., 2:4] # center principal point
k = camera_params[..., 4:7]
p = camera_params[..., 7:]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
r2 = torch.sum(XX[..., :2] ** 2, dim=len(XX.shape) - 1, keepdim=True)
radial = 1 + torch.sum(k * torch.cat((r2, r2 ** 2, r2 ** 3), dim=len(r2.shape) - 1), dim=len(r2.shape) - 1, keepdim=True)
tan = torch.sum(p * XX, dim=len(XX.shape) - 1, keepdim=True)
XXX = XX * (radial + tan) + p * r2
return f * XXX + c
def project_to_2d_linear(X, camera_params):
"""
使用linear parameters is a little difference for use linear and no-linear parameters
Project 3D points to 2D using only linear parameters (focal length and principal point).
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
return f * XX + c
| 3,217 | 28.796296 | 125 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/common/loss.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
def mpjpe(predicted, target):
"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1))
def weighted_mpjpe(predicted, target, w):
"""
Weighted mean per-joint position error (i.e. mean Euclidean distance)
"""
assert predicted.shape == target.shape
assert w.shape[0] == predicted.shape[0]
return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape) - 1))
def p_mpjpe(predicted, target):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a * np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a * np.matmul(predicted, R) + t
# Return MPJPE
return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape) - 1))
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = torch.mean(torch.sum(predicted ** 2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target * predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
return mpjpe(scale * predicted, target)
def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape) - 1))
def pose_align(predicted, target):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
size: bx16x3
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a * np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a * np.matmul(predicted, R) + t
# Return MPJPE
return predicted_aligned
| 4,372 | 30.460432 | 106 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/common/utils.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hashlib
import os
import pathlib
import shutil
import sys
import time
import cv2
import numpy as np
import torch
def add_path():
Alphapose_path = os.path.abspath('joints_detectors/Alphapose')
hrnet_path = os.path.abspath('joints_detectors/hrnet')
trackers_path = os.path.abspath('pose_trackers')
paths = filter(lambda p: p not in sys.path, [Alphapose_path, hrnet_path, trackers_path])
sys.path.extend(paths)
def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
# Convert input types where applicable
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
# Convert output types where applicable
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result
def deterministic_random(min_value, max_value, data):
digest = hashlib.sha256(data.encode()).digest()
raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)
return int(raw_value / (2 ** 32 - 1) * (max_value - min_value)) + min_value
def alpha_map(prediction):
p_min, p_max = prediction.min(), prediction.max()
k = 1.6 / (p_max - p_min)
b = 0.8 - k * p_max
prediction = k * prediction + b
return prediction
def change_score(prediction, detectron_detection_path):
detectron_predictions = np.load(detectron_detection_path, allow_pickle=True)['positions_2d'].item()
pose = detectron_predictions['S1']['Directions 1']
prediction[..., 2] = pose[..., 2]
return prediction
class Timer:
def __init__(self, message, show=True):
self.message = message
self.elapsed = 0
self.show = show
def __enter__(self):
self.start = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.show:
print(f'{self.message} --- elapsed time: {time.perf_counter() - self.start} s')
def calculate_area(data):
"""
Get the rectangle area of keypoints.
:param data: AlphaPose json keypoint format([x, y, score, ... , x, y, score]) or AlphaPose result keypoint format([[x, y], ..., [x, y]])
:return: area
"""
data = np.array(data)
if len(data.shape) == 1:
data = np.reshape(data, (-1, 3))
width = min(data[:, 0]) - max(data[:, 0])
height = min(data[:, 1]) - max(data[:, 1])
return np.abs(width * height)
def read_video(filename, fps=None, skip=0, limit=-1):
stream = cv2.VideoCapture(filename)
i = 0
while True:
grabbed, frame = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
print('===========================> This video get ' + str(i) + ' frames in total.')
sys.stdout.flush()
break
i += 1
if i > skip:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
yield np.array(frame)
if i == limit:
break
def split_video(video_path):
stream = cv2.VideoCapture(video_path)
output_dir = os.path.dirname(video_path)
video_name = os.path.basename(video_path)
video_name = video_name[:video_name.rfind('.')]
save_folder = pathlib.Path(f'./{output_dir}/alpha_pose_{video_name}/split_image/')
shutil.rmtree(str(save_folder), ignore_errors=True)
save_folder.mkdir(parents=True, exist_ok=True)
total_frames = int(stream.get(cv2.CAP_PROP_FRAME_COUNT))
length = len(str(total_frames)) + 1
i = 1
while True:
grabbed, frame = stream.read()
if not grabbed:
print(f'Split totally {i + 1} images from video.')
break
save_path = f'{save_folder}/output{str(i).zfill(length)}.png'
cv2.imwrite(save_path, frame)
i += 1
saved_path = os.path.dirname(save_path)
print(f'Split images saved in {saved_path}')
return saved_path
def evaluate(test_generator, model_pos, action=None, return_predictions=False, joints_leftright=None):
"""
Inference the 3d positions from 2d position.
:type test_generator: UnchunkedGenerator
:param test_generator:
:param model_pos: 3d pose model
:param return_predictions: return predictions if true
:return:
"""
if not joints_leftright:
joints_left, joints_right = list([4, 5, 6, 11, 12, 13]), list([1, 2, 3, 14, 15, 16])
else:
joints_left, joints_right = joints_leftright[0], joints_leftright[1]
with torch.no_grad():
model_pos.eval()
N = 0
for _, batch, batch_2d in test_generator.next_epoch():
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
# Positional model
predicted_3d_pos = model_pos(inputs_2d)
if test_generator.augment_enabled():
# Undo flipping and take average with non-flipped version
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, joints_left + joints_right] = predicted_3d_pos[1, :, joints_right + joints_left]
predicted_3d_pos = torch.mean(predicted_3d_pos, dim=0, keepdim=True)
if return_predictions:
return predicted_3d_pos.squeeze(0).cpu().numpy()
if __name__ == '__main__':
os.chdir('..')
split_video('outputs/kobe.mp4')
| 6,189 | 29.048544 | 140 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/common/model.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
class TemporalModelBase(nn.Module):
"""
Do not instantiate this class.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal, dropout, channels):
super().__init__()
# Validate input
for fw in filter_widths:
assert fw % 2 != 0, 'Only odd filter widths are supported'
self.num_joints_in = num_joints_in
self.in_features = in_features
self.num_joints_out = num_joints_out
self.filter_widths = filter_widths
self.drop = nn.Dropout(dropout)
self.relu = nn.ReLU(inplace=True)
self.pad = [filter_widths[0] // 2]
self.expand_bn = nn.BatchNorm1d(channels, momentum=0.1)
self.shrink = nn.Conv1d(channels, num_joints_out * 3, 1)
def set_bn_momentum(self, momentum):
self.expand_bn.momentum = momentum
for bn in self.layers_bn:
bn.momentum = momentum
def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
return 1 + 2 * frames
def total_causal_shift(self):
"""
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
"""
frames = self.causal_shift[0]
next_dilation = self.filter_widths[0]
for i in range(1, len(self.filter_widths)):
frames += self.causal_shift[i] * next_dilation
next_dilation *= self.filter_widths[i]
return frames
def forward(self, x):
assert len(x.shape) == 4
assert x.shape[-2] == self.num_joints_in
assert x.shape[-1] == self.in_features
sz = x.shape[:3]
x = x.view(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
x = self._forward_blocks(x)
x = x.permute(0, 2, 1)
x = x.view(sz[0], -1, self.num_joints_out, 3)
return x
class TemporalModel(TemporalModelBase):
"""
Reference 3D pose estimation model with temporal convolutions.
This implementation can be used for all use-cases.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, filter_widths[0], bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels,
filter_widths[i] if not dense else (2 * self.pad[-1] + 1),
dilation=next_dilation if not dense else 1,
bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
pad = self.pad[i + 1]
shift = self.causal_shift[i + 1]
# clip
res = x[:, :, pad + shift: x.shape[2] - pad + shift]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
return x
class TemporalModelOptimized1f(TemporalModelBase):
"""
3D pose estimation model optimized for single-frame batching, i.e.
where batches have input length = receptive field, and output length = 1.
This scenario is only used for training when stride == 1.
This implementation replaces dilated convolutions with strided convolutions
to avoid generating unused intermediate results. The weights are interchangeable
with the reference implementation.
"""
def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = nn.Conv1d(num_joints_in * in_features, channels, filter_widths[0], stride=filter_widths[0], bias=False)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0] // 2) if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2) if causal else 0)
layers_conv.append(nn.Conv1d(channels, channels, filter_widths[i], stride=filter_widths[i], bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
res = x[:, :, self.causal_shift[i + 1] + self.filter_widths[i + 1] // 2:: self.filter_widths[i + 1]]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
return x
| 8,048 | 39.044776 | 130 | py |
PoseTriplet | PoseTriplet-main/estimator_inference/common/quaternion.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
def qrot(q, v):
"""
Rotate vector(s) v about the rotation described by 四元数quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
qvec = q[..., 1:]
uv = torch.cross(qvec, v, dim=len(q.shape) - 1)
uuv = torch.cross(qvec, uv, dim=len(q.shape) - 1)
return (v + 2 * (q[..., :1] * uv + uuv))
def qinverse(q, inplace=False):
# We assume the quaternion to be normalized
if inplace:
q[..., 1:] *= -1
return q
else:
w = q[..., :1]
xyz = q[..., 1:]
return torch.cat((w, -xyz), dim=len(q.shape) - 1)
| 1,004 | 26.162162 | 78 | py |
PoseTriplet | PoseTriplet-main/imitator/core/policy_disc.py | import torch.nn as nn
from utils.math import *
from core.distributions import Categorical
from core.policy import Policy
class PolicyDiscrete(Policy):
def __init__(self, net, action_num, net_out_dim=None):
super().__init__()
self.type = 'discrete'
if net_out_dim is None:
net_out_dim = net.out_dim
self.net = net
self.action_head = nn.Linear(net_out_dim, action_num)
self.action_head.weight.data.mul_(0.1)
self.action_head.bias.data.mul_(0.0)
def forward(self, x):
x = self.net(x)
action_prob = torch.softmax(self.action_head(x), dim=1)
return Categorical(probs=action_prob)
def get_fim(self, x):
action_prob = self.forward(x)
M = action_prob.pow(-1).view(-1).detach()
return M, action_prob, {}
| 828 | 28.607143 | 63 | py |
PoseTriplet | PoseTriplet-main/imitator/core/distributions.py | import torch
from torch.distributions import Normal
from torch.distributions import Categorical as TorchCategorical
class DiagGaussian(Normal):
def __init__(self, loc, scale):
super().__init__(loc, scale)
def kl(self):
loc1 = self.loc
scale1 = self.scale
log_scale1 = self.scale.log()
loc0 = self.loc.detach()
scale0 = self.scale.detach()
log_scale0 = log_scale1.detach()
kl = log_scale1 - log_scale0 + (scale0.pow(2) + (loc0 - loc1).pow(2)) / (2.0 * scale1.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
def log_prob(self, value):
return super().log_prob(value).sum(1, keepdim=True)
def mean_sample(self):
return self.loc
class Categorical(TorchCategorical):
def __init__(self, probs=None, logits=None):
super().__init__(probs, logits)
def kl(self):
loc1 = self.loc
scale1 = self.scale
log_scale1 = self.scale.log()
loc0 = self.loc.detach()
scale0 = self.scale.detach()
log_scale0 = log_scale1.detach()
kl = log_scale1 - log_scale0 + (scale0.pow(2) + (loc0 - loc1).pow(2)) / (2.0 * scale1.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
def log_prob(self, value):
return super().log_prob(value).unsqueeze(1)
def mean_sample(self):
return self.probs.argmax(dim=1)
| 1,379 | 27.75 | 107 | py |
PoseTriplet | PoseTriplet-main/imitator/core/policy_gaussian.py | import torch.nn as nn
from core.distributions import DiagGaussian
from core.policy import Policy
from utils.math import *
class PolicyGaussian(Policy):
def __init__(self, net, action_dim, net_out_dim=None, log_std=0, fix_std=False):
super().__init__()
self.type = 'gaussian'
self.net = net
if net_out_dim is None:
net_out_dim = net.out_dim
self.action_mean = nn.Linear(net_out_dim, action_dim)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter(torch.ones(1, action_dim) * log_std, requires_grad=not fix_std)
def forward(self, x):
x = self.net(x)
action_mean = self.action_mean(x)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return DiagGaussian(action_mean, action_std)
def get_fim(self, x):
dist = self.forward(x)
cov_inv = self.action_log_std.exp().pow(-2).squeeze(0).repeat(x.size(0))
param_count = 0
std_index = 0
id = 0
for name, param in self.named_parameters():
if name == "action_log_std":
std_id = id
std_index = param_count
param_count += param.view(-1).shape[0]
id += 1
return cov_inv.detach(), dist.loc, {'std_id': std_id, 'std_index': std_index}
| 1,432 | 33.95122 | 106 | py |
PoseTriplet | PoseTriplet-main/imitator/core/policy.py | import torch.nn as nn
class Policy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
"""This function should return a distribution to sample action from"""
raise NotImplementedError
def select_action(self, x, mean_action=False):
dist = self.forward(x)
action = dist.mean_sample() if mean_action else dist.sample()
return action
def get_kl(self, x):
dist = self.forward(x)
return dist.kl()
def get_log_prob(self, x, action):
dist = self.forward(x)
return dist.log_prob(action)
| 605 | 24.25 | 78 | py |
PoseTriplet | PoseTriplet-main/imitator/core/common.py | import torch
from utils import batch_to
def estimate_advantages(rewards, masks, values, gamma, tau):
device = rewards.device
rewards, masks, values = batch_to(torch.device('cpu'), rewards, masks, values)
tensor_type = type(rewards)
deltas = tensor_type(rewards.size(0), 1)
advantages = tensor_type(rewards.size(0), 1)
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]
prev_value = values[i, 0]
prev_advantage = advantages[i, 0]
returns = values + advantages
advantages = (advantages - advantages.mean()) / advantages.std()
advantages, returns = batch_to(device, advantages, returns)
return advantages, returns
| 858 | 32.038462 | 82 | py |
PoseTriplet | PoseTriplet-main/imitator/core/critic.py | import torch.nn as nn
import torch
class Value(nn.Module):
def __init__(self, net, net_out_dim=None):
super().__init__()
self.net = net
if net_out_dim is None:
net_out_dim = net.out_dim
self.value_head = nn.Linear(net_out_dim, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, x):
x = self.net(x)
value = self.value_head(x)
return value
| 477 | 24.157895 | 51 | py |
PoseTriplet | PoseTriplet-main/imitator/pose_imitation/pose_mimic_eval.py | import argparse
import os
import sys
import pickle
import time
sys.path.append(os.getcwd())
import multiprocessing
from utils import *
from core.policy_gaussian import PolicyGaussian
from core.critic import Value
from models.mlp import MLP
from models.video_state_net import VideoStateNet
from models.video_reg_net import VideoRegNet
from envs.visual.humanoid_vis import HumanoidVisEnv
from pose_imitation.envs.humanoid_v4 import HumanoidEnv
from pose_imitation.utils.posemimic_config import Config
from pose_imitation.core.reward_function import reward_func
# config
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='subject_03')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--save-gif', action='store_true', default=False) # plot sktwpos in gif
parser.add_argument('--gif-ds', type=int, default='5')
parser.add_argument('--iter', type=int, default='3000')
parser.add_argument('--expert-ind', type=int, default=-1) # take_ind
parser.add_argument('--sync', action='store_true', default=False)
parser.add_argument('--causal', action='store_true', default=False)
parser.add_argument('--data', default='test')
parser.add_argument('--show-noise', action='store_true', default=False)
parser.add_argument('--fail-safe', default='naivefs') # 'valuefs'
parser.add_argument('--valuefs-factor', type=float, default='0.6')
parser.add_argument('--mocap-folder', type=str, default='debug')
parser.add_argument('--num-threads', type=int, default=32)
args = parser.parse_args()
cfg = Config(args.cfg, create_dirs=False, mocap_folder=args.mocap_folder)
# 准备log文件.
dtype = torch.float64
torch.set_default_dtype(dtype)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
torch.set_grad_enabled(False)
logger = create_logger(os.path.join(cfg.log_dir, 'log_eval.txt'))
"""environment"""
env = HumanoidEnv(cfg)
env.load_experts(cfg.takes[args.data], cfg.expert_feat_file, cfg.cnn_feat_key)
env_vis = HumanoidVisEnv(cfg.vis_model_file, 10)
env.seed(cfg.seed)
epos = None
cnn_feat_dim = env.cnn_feat[0].shape[-1]
actuators = env.model.actuator_names
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
body_qposaddr = get_body_qposaddr(env.model)
# if args.fail_safe == 'naivefs':
# env.set_fix_head_lb(cfg.fix_head_lb)
if cfg.set_fix_start_state:
start_state = np.zeros(117)
start_state[:7] = np.array([0,0,1,0,0,0,1])
env.set_fix_sampling(start_state=start_state)
"""load policy net"""
policy_vs_net = VideoStateNet(cnn_feat_dim, cfg.policy_v_hdim, cfg.fr_margin, cfg.policy_v_net, cfg.policy_v_net_param, cfg.causal)
value_vs_net = VideoStateNet(cnn_feat_dim, cfg.value_v_hdim, cfg.fr_margin, cfg.value_v_net, cfg.value_v_net_param, cfg.causal)
policy_net = PolicyGaussian(MLP(state_dim + cfg.policy_v_hdim, cfg.policy_hsize, cfg.policy_htype), action_dim,
log_std=cfg.log_std, fix_std=cfg.fix_std)
value_net = Value(MLP(state_dim + cfg.value_v_hdim, cfg.value_hsize, cfg.value_htype))
cp_path = '%s/iter_%04d.p' % (cfg.model_dir, args.iter)
logger.info('loading policy net from checkpoint: %s' % cp_path)
model_cp = pickle.load(open(cp_path, "rb"))
policy_net.load_state_dict(model_cp['policy_dict'])
policy_vs_net.load_state_dict(model_cp['policy_vs_dict'])
value_net.load_state_dict(model_cp['value_dict'])
value_vs_net.load_state_dict(model_cp['value_vs_dict'])
running_state = model_cp['running_state']
value_stat = RunningStat(1)
# count_param
count_param(logger, policy_vs_net, 'policy_vs_net')
count_param(logger, value_vs_net, 'value_vs_net')
count_param(logger, policy_net, 'policy_net')
count_param(logger, value_net, 'value_net')
"""load state net"""
# to_test(policy_vs_net, policy_net, value_vs_net, value_net, state_net)
to_test(policy_vs_net, policy_net, value_vs_net, value_net)
# reward functions
expert_reward = reward_func[cfg.reward_id]
def render():
env_vis.data.qpos[:env.model.nq] = env.data.qpos.copy()
env_vis.data.qpos[env.model.nq:] = epos
env_vis.data.qpos[env.model.nq] += 1.0
env_vis.sim_forward()
env_vis.render()
def reset_env_state(state, ref_qpos):
qpos = ref_qpos.copy()
qpos[2:] = state[:qpos.size - 2]
qvel = state[qpos.size - 2:]
align_human_state(qpos, qvel, ref_qpos)
env.set_state(qpos, qvel)
return env.get_obs()
def eval_expert(expert_ind):
global epos
expert_name = env.expert_list[expert_ind]
logger.info('Testing on expert trajectory %s' % expert_name)
traj_pred = []
traj_orig = []
vel_pred = []
skt_wpos_orig = []
skt_wpos_pred = []
num_reset = 0
log_reset = []
reward_episode = 0
data_len = env.cnn_feat[expert_ind].shape[0]
test_len = data_len - 2 * cfg.fr_margin
env.set_fix_sampling(expert_ind, cfg.fr_margin, test_len)
state = env.reset()
cnn_feat = tensor(env.get_episode_cnn_feat())
policy_vs_net.initialize(cnn_feat)
value_vs_net.initialize(cnn_feat)
state_pred = env.get_episode_export_qpos_qvel()
if cfg.set_fix_start_state:
state = reset_env_state(start_state[2:], env.data.qpos)
else:
state = reset_env_state(state_pred[0, :], env.data.qpos)
if running_state is not None:
state = running_state(state, update=False)
for t in range(test_len):
ind = env.get_expert_index(t)
epos = env.get_expert_attr('qpos', ind).copy()
skt_wpos_e = env.get_expert_attr('skt_wpos', ind).copy()
skt_wpos = env.get_skeleton_pos().copy()
traj_pred.append(env.data.qpos.copy())
traj_orig.append(epos.copy())
vel_pred.append(env.data.qvel.copy())
skt_wpos_orig.append(skt_wpos_e)
skt_wpos_pred.append(skt_wpos)
if args.sync:
epos[:3] = quat_mul_vec(env.expert['rel_heading'], epos[:3] - env.expert['start_pos']) + env.expert['sim_pos']
epos[3:7] = quaternion_multiply(env.expert['rel_heading'], epos[3:7])
if args.render:
render()
if args.causal:
policy_vs_net.initialize(cnn_feat[:t + 2*cfg.fr_margin + 1])
policy_vs_net.t = t
"""learner policy"""
state_var = tensor(state, dtype=dtype).unsqueeze(0)
policy_vs_out = policy_vs_net(state_var)
value_vs_out = value_vs_net(state_var)
value = value_net(value_vs_out).item()
value_stat.push(np.array([value]))
action = policy_net.select_action(policy_vs_out, mean_action=not args.show_noise)[0].numpy()
next_state, reward, done, info = env.step(action)
if running_state is not None:
next_state = running_state(next_state, update=False)
reward, cinfo = reward_func[cfg.reward_id](env, state, action, info)
logger.debug("{} {:.2f} {} {:.2f}".format(t, reward, np.array2string(cinfo, formatter={'all': lambda x: '%.4f' % x}), value))
reward_episode += reward
if info['end']:
break
if args.fail_safe == 'valuefs' and value < args.valuefs_factor * value_stat.mean[0] or \
args.fail_safe == 'naivefs' and info['fail']:
logger.info('reset state!')
num_reset += 1
log_reset.append(t)
if cfg.set_fix_start_state:
state = reset_env_state(start_state[2:], env.data.qpos)
else:
state = reset_env_state(state_pred[t+1, :], env.data.qpos)
if running_state is not None:
state = running_state(state, update=False)
else:
state = next_state
return np.vstack(traj_pred), np.vstack(traj_orig), np.vstack(vel_pred), np.vstack(skt_wpos_pred), np.vstack(skt_wpos_orig), num_reset, log_reset
# if expert_ind is defined, then keeping visualizing this trajectory
if args.expert_ind >= 0:
for i in range(100):
eval_expert(args.expert_ind)
elif args.render or args.save_gif:
# traj_pred = {}
# traj_orig = {}
# vel_pred = {}
# skt_wpos_pred = {}
result_dict = {}
num_reset = 0
for i, take in enumerate(env.expert_list):
# traj_pred[take], traj_orig[take], vel_pred[take], skt_wpos_pred[take], t_num_reset = eval_expert(i)
traj_pred, traj_orig, vel_pred, skt_wpos_pred, skt_wpos_orig, t_num_reset, log_reset = eval_expert(i)
# result_dict[take] = {'skt_wpos': skt_wpos_pred, 'skt_wpos_orig': skt_wpos_orig,}
result_dict[take] = {'traj_pred': traj_pred, 'traj_orig': traj_orig,
'skt_wpos': skt_wpos_pred, 'skt_wpos_orig': skt_wpos_orig,
't_num_reset': t_num_reset, 'log_reset': log_reset,
}
num_reset += t_num_reset
# results = {'traj_pred': traj_pred, 'traj_orig': traj_orig, 'vel_pred': vel_pred, 'skt_wpos': skt_wpos_pred}
meta = {'algo': 'ego_mimic', 'num_reset': num_reset}
fs_tag = '' if args.fail_safe == 'valuefs' else '_' + args.fail_safe
c_tag = '_causal' if args.causal else ''
res_path = '%s/iter_%04d_%s%s%s.p' % (cfg.result_dir, args.iter, args.data, fs_tag, c_tag)
# pickle.dump((results, meta), open(res_path, 'wb'))
pickle.dump(result_dict, open(res_path, 'wb'))
logger.info('num reset: %d' % num_reset)
logger.info('saved results to %s' % res_path)
if args.save_gif:
from tmp.viz import save_3dpose_gif
for i, take in enumerate(env.expert_list):
tmp_skt_wpos_pred = result_dict[take]['skt_wpos'].reshape(-1, 16, 3)
tmp_skt_wpos_orig = result_dict[take]['skt_wpos_orig'].reshape(-1, 16, 3)
save_name = '%s/iter_%04d_%s%s%s/%s.gif' % (cfg.result_dir, args.iter, args.data, fs_tag, c_tag, take)
mkd(save_name)
save_3dpose_gif(tmp_skt_wpos_pred, tmp_skt_wpos_orig, save_name, args.gif_ds)
logger.info('saved results gif to %s' % save_name)
else:
def get_eval_expert(q, expert_ind):
time0_get_expert = time.time() # time start load.
take = env.expert_list[expert_ind]
traj_pred, traj_orig, vel_pred, skt_wpos_pred, skt_wpos_orig, t_num_reset, log_reset = eval_expert(expert_ind)
# tmp_results = {'traj_pred': traj_pred, 'traj_orig': traj_orig, 'vel_pred': vel_pred,
# 'skt_wpos': skt_wpos_pred, 't_num_reset':t_num_reset}
tmp_results = {'traj_pred': traj_pred, 'traj_orig': traj_orig,
'skt_wpos': skt_wpos_pred, 'skt_wpos_orig': skt_wpos_orig,
't_num_reset':t_num_reset, 'log_reset': log_reset,}
# tmp_results = {'skt_wpos': skt_wpos_pred, 't_num_reset':t_num_reset}
q.put((take, tmp_results)) # queue
time_cost_get_expert = time.time() - time0_get_expert # time spend.
print('-> get_eval_expert spends {:.2f}s on ID{}:{} with {:0>6d} frames'.format(time_cost_get_expert,
expert_ind, take, skt_wpos_pred.shape[0]))
# start
# task_lst = np.arange(0, len(env.expert_list), 1)
task_lst = env.expert_list
num_threads = args.num_threads
q = multiprocessing.Queue()
timer = Timer()
# num_sample = 0
result_dict = {}
for ep in range(math.ceil(len(task_lst) / num_threads)):
p_lst = []
for i in range(num_threads):
idx = ep * num_threads + i
if idx >= len(task_lst):
break
p = multiprocessing.Process(target=get_eval_expert, args=(q, idx,))
p_lst.append(p)
for p in p_lst:
p.start()
for p in p_lst:
take, tmp_results = q.get()
result_dict[take] = tmp_results
for p in p_lst:
p.join()
print('complete ep:', ep)
# end.
timer.update_time('complete multiprocessing')
# static:
num_reset = 0
for take in result_dict:
tmp_results = result_dict[take]
num_reset += tmp_results['t_num_reset']
fs_tag = '' if args.fail_safe == 'valuefs' else '_' + args.fail_safe
c_tag = '_causal' if args.causal else ''
res_path = '%s/iter_%04d_%s%s%s.p' % (cfg.result_dir, args.iter, args.data, fs_tag, c_tag)
# pickle.dump((results, meta), open(res_path, 'wb'))
pickle.dump(result_dict, open(res_path, 'wb'))
logger.info('num reset: %d' % num_reset)
logger.info('saved results to %s' % res_path)
| 12,405 | 38.509554 | 148 | py |
PoseTriplet | PoseTriplet-main/imitator/pose_imitation/pose_mimic.py | import argparse
import os
import sys
import pickle
import time
sys.path.append(os.getcwd())
from utils import *
from core.policy_gaussian import PolicyGaussian
from core.critic import Value
from models.mlp import MLP
from models.video_state_net import VideoStateNet
from pose_imitation.envs.humanoid_v4 import HumanoidEnv
from pose_imitation.core.agent_mimic import AgentEgo
from pose_imitation.utils.posemimic_config import Config
from pose_imitation.core.reward_function import reward_func, reward_name_list
# config
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='subject_init')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--num-threads', type=int, default=1)
parser.add_argument('--gpu-index', type=int, default=1)
parser.add_argument('--iter', type=int, default=0) # for pretrain case
parser.add_argument('--show-noise', action='store_true', default=False)
parser.add_argument('--mocap-folder', type=str, default='checkpoint/exp_init/helix_0')
args = parser.parse_args()
if args.render:
args.num_threads = 1
cfg = Config(args.cfg, create_dirs=not (args.render or args.iter > 0), mocap_folder=args.mocap_folder)
# log.
dtype = torch.float64
torch.set_default_dtype(dtype)
device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_index)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
tb_logger = Logger(cfg.tb_dir) if not args.render else None
logger = create_logger(os.path.join(cfg.log_dir, 'log.txt'), file_handle=not args.render)
"""environment"""
env = HumanoidEnv(cfg)
env.seed(cfg.seed)
# if cfg.fix_head_lb:
# env.set_fix_head_lb(cfg.fix_head_lb)
if cfg.set_fix_start_state:
start_state = np.zeros(117)
start_state[:7] = np.array([0,0,1,0,0,0,1])
env.set_fix_sampling(start_state=start_state)
env.load_experts(cfg.takes['train'], cfg.expert_feat_file, cfg.cnn_feat_key)
cnn_feat_dim = env.cnn_feat[0].shape[-1]
actuators = env.model.actuator_names
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
running_state = ZFilter((state_dim,), clip=5)
"""define actor and critic: vs: video state for better information fusion"""
policy_vs_net = VideoStateNet(cnn_feat_dim, cfg.policy_v_hdim, cfg.fr_margin, cfg.policy_v_net, cfg.policy_v_net_param, cfg.causal)
value_vs_net = VideoStateNet(cnn_feat_dim, cfg.value_v_hdim, cfg.fr_margin, cfg.value_v_net, cfg.value_v_net_param, cfg.causal)
policy_net = PolicyGaussian(MLP(state_dim + cfg.policy_v_hdim, cfg.policy_hsize, cfg.policy_htype), action_dim,
log_std=cfg.log_std, fix_std=cfg.fix_std)
value_net = Value(MLP(state_dim + cfg.value_v_hdim, cfg.value_hsize, cfg.value_htype))
if args.iter > 0:
cp_path = '%s/iter_%04d.p' % (cfg.model_dir, args.iter)
logger.info('loading model from checkpoint: %s' % cp_path)
model_cp = pickle.load(open(cp_path, "rb"))
policy_net.load_state_dict(model_cp['policy_dict'])
policy_vs_net.load_state_dict(model_cp['policy_vs_dict'])
value_net.load_state_dict(model_cp['value_dict'])
value_vs_net.load_state_dict(model_cp['value_vs_dict'])
running_state = model_cp['running_state']
to_device(device, policy_net, value_net, policy_vs_net, value_vs_net)
policy_params = list(policy_net.parameters()) + list(policy_vs_net.parameters())
value_params = list(value_net.parameters()) + list(value_vs_net.parameters())
if cfg.policy_optimizer == 'Adam':
optimizer_policy = torch.optim.Adam(policy_params, lr=cfg.policy_lr, weight_decay=cfg.policy_weightdecay)
else:
optimizer_policy = torch.optim.SGD(policy_params, lr=cfg.policy_lr, momentum=cfg.policy_momentum, weight_decay=cfg.policy_weightdecay)
if cfg.value_optimizer == 'Adam':
optimizer_value = torch.optim.Adam(value_params, lr=cfg.value_lr, weight_decay=cfg.value_weightdecay)
else:
optimizer_value = torch.optim.SGD(value_params, lr=cfg.value_lr, momentum=cfg.value_momentum, weight_decay=cfg.value_weightdecay)
# reward functions
expert_reward = reward_func[cfg.reward_id]
reward_name_list = reward_name_list[cfg.reward_id]
"""create agent"""
agent = AgentEgo(env=env, dtype=dtype, device=device, running_state=running_state,
custom_reward=expert_reward, mean_action=args.render and not args.show_noise,
render=args.render, num_threads=args.num_threads,
policy_net=policy_net, policy_vs_net=policy_vs_net,
value_net=value_net, value_vs_net=value_vs_net,
optimizer_policy=optimizer_policy, optimizer_value=optimizer_value, opt_num_epochs=cfg.num_optim_epoch,
gamma=cfg.gamma, tau=cfg.tau, clip_epsilon=cfg.clip_epsilon,
policy_grad_clip=[(policy_params, 40)])
def pre_iter_update(i_iter):
cfg.update_adaptive_params(i_iter)
agent.set_noise_rate(cfg.adp_noise_rate)
set_optimizer_lr(optimizer_policy, cfg.adp_policy_lr)
if cfg.fix_std:
policy_net.action_log_std.fill_(cfg.adp_log_std)
return
def main_loop():
if args.render:
pre_iter_update(args.iter)
agent.sample(1e8)
else:
for i_iter in range(args.iter, cfg.max_iter_num):
"""generate multiple trajectories that reach the minimum batch_size"""
pre_iter_update(i_iter)
batch, log = agent.sample(cfg.min_batch_size)
agent.env.end_reward = log.avg_c_reward * cfg.gamma / (1 - cfg.gamma) # set end reward
"""update networks"""
t0 = time.time()
agent.update_params(batch)
t1 = time.time()
"""logging"""
c_info = log.avg_c_info
logger.info(
'{}\tT_sample {:.2f}\tT_update {:.2f}\tR_avg {:.4f} {}'
'\tR_range ({:.4f}, {:.4f})\teps_len_avg {:.2f}'
.format(i_iter, log.sample_time, t1 - t0, log.avg_c_reward,
np.array2string(c_info, formatter={'all': lambda x: '%.4f' % x}, separator=','),
log.min_c_reward, log.max_c_reward, log.avg_episode_reward))
tb_logger.scalar_summary('total_reward', log.avg_c_reward, i_iter)
tb_logger.scalar_summary('episode_len', log.avg_episode_reward, i_iter)
for i in range(c_info.shape[0]):
# tb_logger.scalar_summary('reward_%s' % reward_name_list[i], c_info[i], i_iter)
tb_logger.scalar_summary('reward_detail/%s' % reward_name_list[i], c_info[i], i_iter)
if cfg.save_model_interval > 0 and (i_iter+1) % cfg.save_model_interval == 0:
with to_cpu(policy_net, value_net, policy_vs_net, value_vs_net):
cp_path = '%s/iter_%04d.p' % (cfg.model_dir, i_iter + 1)
model_cp = {'policy_dict': policy_net.state_dict(), 'policy_vs_dict': policy_vs_net.state_dict(),
'value_dict': value_net.state_dict(), 'value_vs_dict': value_vs_net.state_dict(),
'running_state': running_state}
pickle.dump(model_cp, open(cp_path, 'wb'))
"""clean up gpu memory"""
torch.cuda.empty_cache()
logger.info('training done!')
main_loop()
| 7,324 | 44.78125 | 138 | py |
PoseTriplet | PoseTriplet-main/imitator/pose_imitation/core/agent_mimic.py | import time
from utils.torch import *
from agents import AgentPPO
from core.common import *
from pose_imitation.core.trajbatch_mimic import TrajBatchEgo
class AgentEgo(AgentPPO):
def __init__(self, policy_vs_net=None, value_vs_net=None, **kwargs):
super().__init__(use_mini_batch=False, **kwargs)
self.traj_cls = TrajBatchEgo
self.policy_vs_net = policy_vs_net
self.value_vs_net = value_vs_net
self.sample_modules.append(policy_vs_net)
self.update_modules += [policy_vs_net, value_vs_net]
def pre_sample(self):
self.policy_vs_net.set_mode('test')
def pre_episode(self):
self.policy_vs_net.initialize(tensor(self.env.get_episode_cnn_feat()))
def push_memory(self, memory, state, action, mask, next_state, reward, exp):
v_meta = np.array([self.env.expert_ind, self.env.start_ind])
memory.push(state, action, mask, next_state, reward, exp, v_meta)
def trans_policy(self, states):
return self.policy_vs_net(states)
def trans_value(self, states):
return self.value_vs_net(states)
def update_params(self, batch):
t0 = time.time()
to_train(*self.update_modules)
states = torch.from_numpy(batch.states).to(self.dtype).to(self.device) #50632x115
actions = torch.from_numpy(batch.actions).to(self.dtype).to(self.device)
rewards = torch.from_numpy(batch.rewards).to(self.dtype).to(self.device)
masks = torch.from_numpy(batch.masks).to(self.dtype).to(self.device)
exps = torch.from_numpy(batch.exps).to(self.dtype).to(self.device)
v_metas = batch.v_metas #50630x2
self.policy_vs_net.set_mode('train')
self.value_vs_net.set_mode('train')
self.policy_vs_net.initialize((masks, self.env.cnn_feat, v_metas))
self.value_vs_net.initialize((masks, self.env.cnn_feat, v_metas))
with to_test(*self.update_modules):
with torch.no_grad():
values = self.value_net(self.trans_value(states))
"""get advantage estimation from the trajectories"""
advantages, returns = estimate_advantages(rewards, masks, values, self.gamma, self.tau)
self.update_policy(states, actions, returns, advantages, exps)
return time.time() - t0
| 2,297 | 38.62069 | 95 | py |
PoseTriplet | PoseTriplet-main/imitator/pose_imitation/utils/poseaug_viz.py |
"""
Functions to visualize human poses
"""
import matplotlib.pyplot as plt
import numpy as np
import os
from mpl_toolkits.mplot3d import Axes3D
def show3DposePair(realt3d, faket3d, ax, lcolor="#3498db", rcolor="#e74c3c", add_labels=True,
gt=True, pred=False): # blue, orange
"""
Visualize a 3d skeleton pair
Args
channels: 96x1 vector. The pose to plot.
ax: matplotlib 3d axis to draw on
lcolor: color for left part of the body
rcolor: color for right part of the body
add_labels: whether to add coordinate labels
Returns
Nothing. Draws on ax.
"""
# assert channels.size == len(data_utils.H36M_NAMES)*3, "channels should have 96 entries, it has %d instead" % channels.size
realt3d = np.reshape(realt3d, (16, -1))
faket3d = np.reshape(faket3d, (16, -1))
I = np.array([0, 1, 2, 0, 4, 5, 0, 7, 8, 8, 10, 11, 8, 13, 14]) # start points
J = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) # end points
LR = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
for idx, vals in enumerate([realt3d, faket3d]):
# Make connection matrix
for i in np.arange(len(I)):
x, y, z = [np.array([vals[I[i], j], vals[J[i], j]]) for j in range(3)]
if idx == 0:
ax.plot(x, z, -y, lw=2, c='k')
# ax.plot(x,y, z, lw=2, c='k')
elif idx == 1:
ax.plot(x, z, -y, lw=2, c='r')
# ax.plot(x,y, z, lw=2, c='r')
else:
# ax.plot(x,z, -y, lw=2, c=lcolor if LR[i] else rcolor)
ax.plot(x, y, z, lw=2, c=lcolor if LR[i] else rcolor)
RADIUS = 1 # space around the subject
xroot, yroot, zroot = vals[0,0], vals[0,1], vals[0,2]
ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])
ax.set_ylim3d([-RADIUS+zroot, RADIUS+zroot])
ax.set_zlim3d([-RADIUS-yroot, RADIUS-yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("z")
ax.set_zlabel("-y")
# Get rid of the ticks and tick labels
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
#
# ax.get_xaxis().set_ticklabels([])
# ax.get_yaxis().set_ticklabels([])
# ax.set_zticklabels([])
# ax.set_aspect('equal')
# Get rid of the panes (actually, make them white)
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
# Keep z pane
# Get rid of the lines in 3d
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
def show3Dpose(channels, ax, lcolor="#3498db", rcolor="#e74c3c", add_labels=True,
gt=False,pred=False): # blue, orange
"""
Visualize a 3d skeleton
Args
channels: 96x1 vector. The pose to plot.
ax: matplotlib 3d axis to draw on
lcolor: color for left part of the body
rcolor: color for right part of the body
add_labels: whether to add coordinate labels
Returns
Nothing. Draws on ax.
"""
# assert channels.size == len(data_utils.H36M_NAMES)*3, "channels should have 96 entries, it has %d instead" % channels.size
vals = np.reshape( channels, (16, -1) )
I = np.array([0,1,2,0,4,5,0,7,8,8,10,11,8,13,14]) # start points
J = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) # end points
LR = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
# Make connection matrix
for i in np.arange( len(I) ):
x, y, z = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(3)]
if gt:
ax.plot(x,z, -y, lw=2, c='k')
# ax.plot(x,y, z, lw=2, c='k')
elif pred:
ax.plot(x,z, -y, lw=2, c='r')
# ax.plot(x,y, z, lw=2, c='r')
else:
# ax.plot(x,z, -y, lw=2, c=lcolor if LR[i] else rcolor)
ax.plot(x, z, -y, lw=2, c=lcolor if LR[i] else rcolor)
RADIUS = 1 # space around the subject
xroot, yroot, zroot = vals[0,0], vals[0,1], vals[0,2]
ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])
ax.set_ylim3d([-RADIUS+zroot, RADIUS+zroot])
ax.set_zlim3d([-RADIUS-yroot, RADIUS-yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("z")
ax.set_zlabel("-y")
# Get rid of the ticks and tick labels
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
#
# ax.get_xaxis().set_ticklabels([])
# ax.get_yaxis().set_ticklabels([])
# ax.set_zticklabels([])
# ax.set_aspect('equal')
# Get rid of the panes (actually, make them white)
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
# Keep z pane
# Get rid of the lines in 3d
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
def show2Dpose(channels, ax, lcolor="#3498db", rcolor="#e74c3c", add_labels=True):
"""
Visualize a 2d skeleton
Args
channels: 64x1 vector. The pose to plot.
ax: matplotlib axis to draw on
lcolor: color for left part of the body
rcolor: color for right part of the body
add_labels: whether to add coordinate labels
Returns
Nothing. Draws on ax.
"""
vals = np.reshape(channels, (-1, 2))
# plt.plot(vals[:,0], vals[:,1], 'ro')
I = np.array([0, 1, 2, 0, 4, 5, 0, 7, 8, 8, 10, 11, 8, 13, 14]) # start points
J = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) # end points
LR = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
# Make connection matrix
for i in np.arange(len(I)):
x, y = [np.array([vals[I[i], j], vals[J[i], j]]) for j in range(2)]
# print('x',x)
# print(y)
ax.plot(x, -y, lw=2, c=lcolor if LR[i] else rcolor)
# Get rid of the ticks
# ax.set_xticks([])
# ax.set_yticks([])
#
# # Get rid of tick labels
# ax.get_xaxis().set_ticklabels([])
# ax.get_yaxis().set_ticklabels([])
RADIUS = 1 # space around the subject
xroot, yroot = vals[0, 0], vals[0, 1]
# ax.set_xlim([-RADIUS+xroot, RADIUS+xroot])
# ax.set_ylim([-RADIUS+yroot, RADIUS+yroot])
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("-y")
ax.set_aspect('equal')
##############################
# wrap for simple usage
##############################
def wrap_show3d_pose(vals3d):
fig3d = plt.figure()
# ax = fig.add_subplot(1, 1, 1, projection='3d')
ax3d = Axes3D(fig3d)
show3Dpose(vals3d, ax3d)
plt.show()
def wrap_show2d_pose(vals2d):
ax2d = plt.axes()
show2Dpose(vals2d, ax2d)
plt.show()
import os
import matplotlib.pyplot as plt
# from common.camera import project_to_2d
# from common.viz import show3Dpose, show3DposePair, show2Dpose
import torch
h36m_cam3d_sample = torch.from_numpy(np.load('tmp/h36m_sample/inputs_3d_cam_f32.npy'))
h36m_cam2d_sample = torch.from_numpy(np.load('tmp/h36m_sample/inputs_2d_cam_f32.npy'))
def plot_poseaug(Gcam_rlt, e_2dpose, iter, log_dir):
num = Gcam_rlt['pose3D_camed'].shape[0]
idx1 = np.random.randint(num)
inputs_3d = Gcam_rlt['pose3D_camed'][0]
outputs_3d_ba = Gcam_rlt['pose3D_camed'][idx1]
h36m_cam3d_sample_0 = h36m_cam3d_sample[0,0]
h36m_cam3d_sample_random = h36m_cam3d_sample[np.random.randint(5000),np.random.randint(32)]
inputs_2d = Gcam_rlt['pose2D_camed'][0]
outputs_2d_ba = Gcam_rlt['pose2D_camed'][idx1]
h36m_cam2d_sample_0 = h36m_cam2d_sample[0,0]
e_2dpose = e_2dpose[0]
# plot the augmented pose from origin -> ba -> bl -> rt
_plot_poseaug(
inputs_3d.cpu().detach().numpy(), inputs_2d.cpu().detach().numpy(),
outputs_3d_ba.cpu().detach().numpy(), outputs_2d_ba.cpu().detach().numpy(),
h36m_cam3d_sample_0.cpu().detach().numpy(), h36m_cam2d_sample_0.cpu().detach().numpy(),
h36m_cam3d_sample_random.cpu().detach().numpy(), e_2dpose.cpu().detach().numpy(),
iter, log_dir
)
def _plot_poseaug(
tmp_inputs_3d, tmp_inputs_2d,
tmp_outputs_3d_ba, tmp_outputs_2d_ba,
tmp_h36m_cam3d_sample_0, tmp_h36m_cam2d_sample_0,
tmp_h36m_cam3d_sample_random, tmp_e_2dpose,
iter, log_dir
):
# plot all the rlt
fig3d = plt.figure(figsize=(16, 8))
# input 3d
ax3din = fig3d.add_subplot(2, 4, 1, projection='3d')
ax3din.set_title('input 3D')
show3Dpose(tmp_inputs_3d, ax3din, gt=False)
# show source 2d
ax2din = fig3d.add_subplot(2, 4, 5)
ax2din.set_title('input 2d')
show2Dpose(tmp_inputs_2d, ax2din)
# input 3d to modify 3d
ax3dba = fig3d.add_subplot(2, 4, 2, projection='3d')
ax3dba.set_title('input/ba 3d')
show3Dpose(tmp_outputs_3d_ba, ax3dba)
# show source 2d
ax2dba = fig3d.add_subplot(2, 4, 6)
ax2dba.set_title('ba 2d')
show2Dpose(tmp_outputs_2d_ba, ax2dba)
# input 3d to modify 3d
ax3dbl = fig3d.add_subplot(2, 4, 3, projection='3d')
ax3dbl.set_title('tmp_h36m_cam3d_sample_0')
show3Dpose(tmp_h36m_cam3d_sample_0, ax3dbl)
# show source 2d
ax2dbl = fig3d.add_subplot(2, 4, 7)
ax2dbl.set_title('tmp_h36m_cam2d_sample_0')
show2Dpose(tmp_h36m_cam2d_sample_0, ax2dbl)
# modify 3d to rotated 3d
ax3drt = fig3d.add_subplot(2, 4, 4, projection='3d')
ax3drt.set_title('tmp_h36m_cam3d_sample_random')
show3Dpose(tmp_h36m_cam3d_sample_random, ax3drt, gt=False)
# rt 3d to 2d
ax2d = fig3d.add_subplot(2, 4, 8)
ax2d.set_title('tmp_e_2dpose')
show2Dpose(tmp_e_2dpose, ax2d)
os.makedirs('{}/tmp_viz'.format(log_dir), exist_ok=True)
image_name = '{}/tmp_viz/iter_{:0>4d}.png'.format(log_dir, iter)
plt.savefig(image_name)
plt.close('all')
| 9,657 | 30.459283 | 130 | py |
PoseTriplet | PoseTriplet-main/imitator/models/video_reg_net.py | from utils.torch import *
from torch import nn
from models.resnet import ResNet
from models.tcn import TemporalConvNet
from models.rnn import RNN
from models.mlp import MLP
from models.mobile_net import MobileNet
from models.linear_model import LinearModel
class VideoRegNet(nn.Module):
def __init__(self, out_dim, v_hdim, cnn_fdim, no_cnn=False, frame_shape=(32,), mlp_dim=(300, 200),
cnn_type='mlp', v_net_type='lstm', v_net_param=None, causal=False):
super().__init__()
self.out_dim = out_dim
self.cnn_fdim = cnn_fdim
self.v_hdim = v_hdim
self.no_cnn = no_cnn
self.frame_shape = frame_shape
self.input_dim = frame_shape[0]
if no_cnn:
self.cnn = None
# elif cnn_type == 'resnet':
# self.cnn = ResNet(cnn_fdim)
# elif cnn_type == 'mobile':
# self.cnn = MobileNet(cnn_fdim)
elif cnn_type == 'mlp':
self.cnn = LinearModel(input_size=self.input_dim, output_size=cnn_fdim,
linear_size=512, num_stage=2, p_dropout=0.25)
self.v_net_type = v_net_type
if v_net_type == 'lstm':
self.v_net = RNN(cnn_fdim, v_hdim, v_net_type, bi_dir=not causal)
elif v_net_type == 'tcn':
if v_net_param is None:
v_net_param = {}
tcn_size = v_net_param.get('size', [64, 128])
dropout = v_net_param.get('dropout', 0.2)
kernel_size = v_net_param.get('kernel_size', 3)
assert tcn_size[-1] == v_hdim
self.v_net = TemporalConvNet(cnn_fdim, tcn_size, kernel_size=kernel_size, dropout=dropout, causal=causal)
self.mlp = MLP(v_hdim, mlp_dim, 'relu')
self.linear = nn.Linear(self.mlp.out_dim, out_dim)
def forward_v_net(self, x):
if self.v_net_type == 'tcn':
x = x.permute(1, 2, 0).contiguous()
x = self.v_net(x)
if self.v_net_type == 'tcn':
x = x.permute(2, 0, 1).contiguous()
return x
def forward(self, x):
# CNN
if self.cnn is not None:
x = self.cnn(x.view((-1,) + self.frame_shape)).view(-1, x.size(1), self.cnn_fdim)
x = self.forward_v_net(x).view(-1, self.v_hdim)
x = self.mlp(x)
x = self.linear(x)
return x
def get_cnn_feature(self, x):
return self.cnn(x.view((-1,) + self.frame_shape))
if __name__ == '__main__':
import time
print('start')
net = VideoRegNet(64, 128, 128, v_net_type='lstm', cnn_type='mlp')
input = ones(16, 10, 32)
t0 = time.time()
out = net(input)
print(out.shape)
print(time.time() - t0)
| 2,690 | 33.5 | 117 | py |
PoseTriplet | PoseTriplet-main/imitator/models/video_forecast_net.py | import torch.nn as nn
from models.tcn import TemporalConvNet
from models.rnn import RNN
from utils.torch import *
class VideoForecastNet(nn.Module):
def __init__(self, cnn_feat_dim, state_dim, v_hdim=128, v_margin=10, v_net_type='lstm', v_net_param=None,
s_hdim=None, s_net_type='id', dynamic_v=False):
super().__init__()
s_hdim = state_dim if s_hdim is None else s_hdim
self.mode = 'test'
self.cnn_feat_dim = cnn_feat_dim
self.state_dim = state_dim
self.v_net_type = v_net_type
self.v_hdim = v_hdim
self.v_margin = v_margin
self.s_net_type = s_net_type
self.s_hdim = s_hdim
self.dynamic_v = dynamic_v
self.out_dim = v_hdim + s_hdim
if v_net_type == 'lstm':
self.v_net = RNN(cnn_feat_dim, v_hdim, v_net_type, bi_dir=False)
elif v_net_type == 'tcn':
if v_net_param is None:
v_net_param = {}
tcn_size = v_net_param.get('size', [64, 128])
dropout = v_net_param.get('dropout', 0.2)
kernel_size = v_net_param.get('kernel_size', 3)
assert tcn_size[-1] == v_hdim
self.v_net = TemporalConvNet(cnn_feat_dim, tcn_size, kernel_size=kernel_size, dropout=dropout, causal=True)
if s_net_type == 'lstm':
self.s_net = RNN(state_dim, s_hdim, s_net_type, bi_dir=False)
self.v_out = None
self.t = 0
# training only
self.indices = None
self.s_scatter_indices = None
self.s_gather_indices = None
self.v_gather_indices = None
self.cnn_feat_ctx = None
self.num_episode = None
self.max_episode_len = None
self.set_mode('test')
def set_mode(self, mode):
self.mode = mode
if self.s_net_type == 'lstm':
if mode == 'train':
self.s_net.set_mode('batch')
else:
self.s_net.set_mode('step')
def initialize(self, x):
if self.mode == 'test':
self.v_out = self.forward_v_net(x.unsqueeze(1)[:self.v_margin])[-1]
if self.s_net_type == 'lstm':
self.s_net.initialize()
self.t = 0
elif self.mode == 'train':
masks, cnn_feat, v_metas = x
device, dtype = masks.device, masks.dtype
end_indice = np.where(masks.cpu().numpy() == 0)[0]
v_metas = v_metas[end_indice, :]
num_episode = len(end_indice)
end_indice = np.insert(end_indice, 0, -1)
max_episode_len = int(np.diff(end_indice).max())
self.num_episode = num_episode
self.max_episode_len = max_episode_len
self.indices = np.arange(masks.shape[0])
for i in range(1, num_episode):
start_index = end_indice[i] + 1
end_index = end_indice[i + 1] + 1
self.indices[start_index:end_index] += i * max_episode_len - start_index
self.cnn_feat_ctx = np.zeros((self.v_margin + max_episode_len if
self.dynamic_v else self.v_margin, num_episode, self.cnn_feat_dim))
for i in range(num_episode):
exp_ind, start_ind = v_metas[i, :]
self.cnn_feat_ctx[:self.v_margin, i, :] = cnn_feat[exp_ind][start_ind - self.v_margin: start_ind]
self.cnn_feat_ctx = tensor(self.cnn_feat_ctx, dtype=dtype, device=device)
self.s_scatter_indices = LongTensor(np.tile(self.indices[:, None], (1, self.state_dim))).to(device)
self.s_gather_indices = LongTensor(np.tile(self.indices[:, None], (1, self.s_hdim))).to(device)
self.v_gather_indices = LongTensor(np.tile(self.indices[:, None], (1, self.v_hdim))).to(device)
def forward(self, x):
if self.mode == 'test':
if self.s_net_type == 'lstm':
x = self.s_net(x)
x = torch.cat((self.v_out, x), dim=1)
self.t += 1
elif self.mode == 'train':
if self.dynamic_v:
v_ctx = self.forward_v_net(self.cnn_feat_ctx)[self.v_margin:]
else:
v_ctx = self.forward_v_net(self.cnn_feat_ctx)[[-1]]
v_ctx = v_ctx.repeat(self.max_episode_len, 1, 1)
v_ctx = v_ctx.transpose(0, 1).contiguous().view(-1, self.v_hdim)
v_out = torch.gather(v_ctx, 0, self.v_gather_indices)
if self.s_net_type == 'lstm':
s_ctx = zeros((self.num_episode * self.max_episode_len, self.state_dim), device=x.device)
s_ctx.scatter_(0, self.s_scatter_indices, x)
s_ctx = s_ctx.view(-1, self.max_episode_len, self.state_dim).transpose(0, 1).contiguous()
s_ctx = self.s_net(s_ctx).transpose(0, 1).contiguous().view(-1, self.s_hdim)
s_out = torch.gather(s_ctx, 0, self.s_gather_indices)
else:
s_out = x
x = torch.cat((v_out, s_out), dim=1)
return x
def forward_v_net(self, x):
if self.v_net_type == 'tcn':
x = x.permute(1, 2, 0).contiguous()
x = self.v_net(x)
if self.v_net_type == 'tcn':
x = x.permute(2, 0, 1).contiguous()
return x
| 5,292 | 42.385246 | 119 | py |
PoseTriplet | PoseTriplet-main/imitator/models/resnet.py | from torch import nn
from torchvision import models
from utils.torch import *
class ResNet(nn.Module):
def __init__(self, out_dim, fix_params=False):
super().__init__()
self.out_dim = out_dim
self.resnet = models.resnet18(pretrained=True)
if fix_params:
for param in self.resnet.parameters():
param.requires_grad = False
self.resnet.fc = nn.Linear(self.resnet.fc.in_features, out_dim)
def forward(self, x):
return self.resnet(x)
if __name__ == '__main__':
import time
net = ResNet(128)
t0 = time.time()
input = ones(64, 3, 224, 224)
out = net(input)
print(time.time() - t0)
print(out.shape)
| 709 | 23.482759 | 71 | py |
PoseTriplet | PoseTriplet-main/imitator/models/video_state_net.py | import torch.nn as nn
from models.tcn import TemporalConvNet
from models.rnn import RNN
from models.empty import Empty
from utils.torch import *
class VideoStateNet(nn.Module):
def __init__(self, cnn_feat_dim, v_hdim=128, v_margin=10, v_net_type='lstm', v_net_param=None, causal=False):
super().__init__()
self.mode = 'test'
self.cnn_feat_dim = cnn_feat_dim
self.v_net_type = v_net_type
self.v_hdim = v_hdim
self.v_margin = v_margin
if v_net_type == 'lstm':
self.v_net = RNN(cnn_feat_dim, v_hdim, v_net_type, bi_dir=not causal)
elif v_net_type == 'tcn':
if v_net_param is None:
v_net_param = {}
tcn_size = v_net_param.get('size', [64, 128])
dropout = v_net_param.get('dropout', 0.2)
kernel_size = v_net_param.get('kernel_size', 3)
assert tcn_size[-1] == v_hdim
self.v_net = TemporalConvNet(cnn_feat_dim, tcn_size, kernel_size=kernel_size, dropout=dropout, causal=causal)
elif v_net_type == 'empty':
self.v_net = Empty(cnn_feat_dim, v_hdim, v_net_type, bi_dir=not causal)
self.v_out = None
self.t = 0
# training only
self.indices = None
self.scatter_indices = None
self.gather_indices = None
self.cnn_feat_ctx = None
def set_mode(self, mode):
self.mode = mode
def initialize(self, x):
if self.mode == 'test':
self.v_out = self.forward_v_net(x.unsqueeze(1)).squeeze(1)[self.v_margin:-self.v_margin]
self.t = 0
elif self.mode == 'train':
masks, cnn_feat, v_metas = x
device, dtype = masks.device, masks.dtype
end_indice = np.where(masks.cpu().numpy() == 0)[0]
v_metas = v_metas[end_indice, :] # 1562x2
num_episode = len(end_indice) # 1562
end_indice = np.insert(end_indice, 0, -1) # 1563
max_episode_len = int(np.diff(end_indice).max()) #97
self.indices = np.arange(masks.shape[0])
for i in range(1, num_episode):
start_index = end_indice[i] + 1
end_index = end_indice[i + 1] + 1
self.indices[start_index:end_index] += i * max_episode_len - start_index
self.cnn_feat_ctx = np.zeros((max_episode_len + 2*self.v_margin, num_episode, self.cnn_feat_dim))
for i in range(num_episode):
exp_ind, start_ind = v_metas[i, :]
self.cnn_feat_ctx[:, i, :] = cnn_feat[exp_ind][start_ind - self.v_margin: start_ind + max_episode_len + self.v_margin]
self.cnn_feat_ctx = tensor(self.cnn_feat_ctx, dtype=dtype, device=device)
self.scatter_indices = LongTensor(np.tile(self.indices[:, None], (1, self.cnn_feat_dim))).to(device)
self.gather_indices = LongTensor(np.tile(self.indices[:, None], (1, self.v_hdim))).to(device)
def forward(self, x):
if self.mode == 'test':
x = torch.cat((self.v_out[[self.t], :], x), dim=1)
self.t += 1
elif self.mode == 'train':
v_ctx = self.forward_v_net(self.cnn_feat_ctx)[self.v_margin:-self.v_margin]
v_ctx = v_ctx.transpose(0, 1).contiguous().view(-1, self.v_hdim)
v_out = torch.gather(v_ctx, 0, self.gather_indices)
x = torch.cat((v_out, x), dim=1)
return x
def forward_v_net(self, x): # x is t b c
if self.v_net_type == 'tcn':
x = x.permute(1, 2, 0).contiguous()
x = self.v_net(x)
if self.v_net_type == 'tcn':
x = x.permute(2, 0, 1).contiguous()
return x
| 3,688 | 43.445783 | 134 | py |
PoseTriplet | PoseTriplet-main/imitator/models/mlp.py | import torch.nn as nn
import torch
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dims=(128, 128), activation='tanh', bn_flag=False):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = torch.relu
elif activation == 'sigmoid':
self.activation = torch.sigmoid
self.out_dim = hidden_dims[-1]
self.affine_layers = nn.ModuleList()
last_dim = input_dim
for nh in hidden_dims:
self.affine_layers.append(nn.Linear(last_dim, nh))
if bn_flag:
self.affine_layers.append(nn.BatchNorm1d(nh))
last_dim = nh
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
return x
| 860 | 29.75 | 92 | py |
PoseTriplet | PoseTriplet-main/imitator/models/empty.py | import torch.nn as nn
from utils.torch import *
class Empty(nn.Module):
def __init__(self, input_dim, out_dim, cell_type='lstm', bi_dir=False):
super().__init__()
self.input_dim = input_dim
self.out_dim = out_dim
self.cell_type = cell_type
self.bi_dir = bi_dir
self.mode = 'batch'
rnn_cls = nn.LSTMCell if cell_type == 'lstm' else nn.GRUCell
hidden_dim = out_dim // 2 if bi_dir else out_dim
self.rnn_f = rnn_cls(self.input_dim, hidden_dim)
if bi_dir:
self.rnn_b = rnn_cls(self.input_dim, hidden_dim)
self.hx, self.cx = None, None
def set_mode(self, mode):
self.mode = mode
def initialize(self, batch_size=1):
if self.mode == 'step':
assert False, 'not sure the usage here'
# self.hx = zeros((batch_size, self.rnn_f.hidden_size))
# if self.cell_type == 'lstm':
# self.cx = zeros((batch_size, self.rnn_f.hidden_size))
def forward(self, x):
if self.mode == 'step':
assert False, 'not sure the usage here'
# self.hx, self.cx = batch_to(x.device, self.hx, self.cx)
# if self.cell_type == 'lstm':
# self.hx, self.cx = self.rnn_f(x, (self.hx, self.cx))
# else:
# self.hx = self.rnn_f(x, self.hx)
# rnn_out = self.hx
else:
# rnn_out_f = self.batch_forward(x)
# if not self.bi_dir:
# return rnn_out_f
# rnn_out_b = self.batch_forward(x, reverse=True)
# rnn_out = torch.cat((rnn_out_f, rnn_out_b), 2)
# according to simpoe, here we directly output the sequence. With time shift 1
tmp_x = x[..., 3:] * 1. # 去除qpos的root xyz, root quat.
rnn_out = torch.cat([tmp_x[1:], tmp_x[-1:]], dim=0) # 做一下time shift.
return rnn_out # 220x1x128
# def batch_forward(self, x, reverse=False): # 220x1x59
# rnn = self.rnn_b if reverse else self.rnn_f
# rnn_out = []
# hx = zeros((x.size(1), rnn.hidden_size), device=x.device)
# if self.cell_type == 'lstm':
# cx = zeros((x.size(1), rnn.hidden_size), device=x.device)
# ind = reversed(range(x.size(0))) if reverse else range(x.size(0))
# for t in ind:
# if self.cell_type == 'lstm':
# hx, cx = rnn(x[t, ...], (hx, cx))
# else:
# hx = rnn(x[t, ...], hx)
# rnn_out.append(hx.unsqueeze(0))
# if reverse:
# rnn_out.reverse()
# rnn_out = torch.cat(rnn_out, 0)
# return rnn_out
if __name__ == '__main__':
print('start')
rnn = Empty(12, 24, 'gru', bi_dir=True)
input = zeros(5, 3, 12)
out = rnn(input)
print(out.shape)
| 2,833 | 36.289474 | 90 | py |
PoseTriplet | PoseTriplet-main/imitator/models/rnn.py | import torch.nn as nn
from utils.torch import *
class RNN(nn.Module):
def __init__(self, input_dim, out_dim, cell_type='lstm', bi_dir=False):
super().__init__()
self.input_dim = input_dim
self.out_dim = out_dim
self.cell_type = cell_type
self.bi_dir = bi_dir
self.mode = 'batch'
rnn_cls = nn.LSTMCell if cell_type == 'lstm' else nn.GRUCell
hidden_dim = out_dim // 2 if bi_dir else out_dim
self.rnn_f = rnn_cls(self.input_dim, hidden_dim)
if bi_dir:
self.rnn_b = rnn_cls(self.input_dim, hidden_dim)
self.hx, self.cx = None, None
def set_mode(self, mode):
self.mode = mode
def initialize(self, batch_size=1):
if self.mode == 'step':
self.hx = zeros((batch_size, self.rnn_f.hidden_size))
if self.cell_type == 'lstm':
self.cx = zeros((batch_size, self.rnn_f.hidden_size))
def forward(self, x):
if self.mode == 'step':
self.hx, self.cx = batch_to(x.device, self.hx, self.cx)
if self.cell_type == 'lstm':
self.hx, self.cx = self.rnn_f(x, (self.hx, self.cx))
else:
self.hx = self.rnn_f(x, self.hx)
rnn_out = self.hx
else:
rnn_out_f = self.batch_forward(x)
if not self.bi_dir:
return rnn_out_f
rnn_out_b = self.batch_forward(x, reverse=True)
rnn_out = torch.cat((rnn_out_f, rnn_out_b), 2)
return rnn_out # 220x1x128
def batch_forward(self, x, reverse=False): # 220x1x59
rnn = self.rnn_b if reverse else self.rnn_f
rnn_out = []
hx = zeros((x.size(1), rnn.hidden_size), device=x.device)
if self.cell_type == 'lstm':
cx = zeros((x.size(1), rnn.hidden_size), device=x.device)
ind = reversed(range(x.size(0))) if reverse else range(x.size(0))
for t in ind:
if self.cell_type == 'lstm':
hx, cx = rnn(x[t, ...], (hx, cx))
else:
hx = rnn(x[t, ...], hx)
rnn_out.append(hx.unsqueeze(0))
if reverse:
rnn_out.reverse()
rnn_out = torch.cat(rnn_out, 0)
return rnn_out
if __name__ == '__main__':
print('start')
rnn = RNN(12, 24, 'gru', bi_dir=True)
input = zeros(5, 3, 12)
out = rnn(input)
print(out.shape)
| 2,422 | 33.614286 | 75 | py |
PoseTriplet | PoseTriplet-main/imitator/models/mobile_net.py | from torch import nn
from utils.torch import *
class MobileNet(nn.Module):
def __init__(self, out_dim):
super().__init__()
self.out_dim = out_dim
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
nn.AvgPool2d(7),
)
self.fc = nn.Linear(1024, out_dim)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 1024)
x = self.fc(x)
return x
if __name__ == '__main__':
import time
torch.set_grad_enabled(False)
net = MobileNet(128)
input = ones(1, 3, 224, 224)
for i in range(10):
t0 = time.time()
out = net(input)
print(time.time() - t0)
print(out.shape)
| 1,746 | 26.296875 | 74 | py |
PoseTriplet | PoseTriplet-main/imitator/models/tcn.py | import torch.nn as nn
from torch.nn.utils import weight_norm
from utils.torch import *
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super().__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, dropout, causal):
super().__init__()
padding = (kernel_size - 1) * dilation // (1 if causal else 2)
modules = []
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation))
modules.append(self.conv1)
if causal:
modules.append(Chomp1d(padding))
modules.append(nn.ReLU())
if dropout > 0:
modules.append(nn.Dropout(dropout))
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation))
modules.append(self.conv2)
if causal:
modules.append(Chomp1d(padding))
modules.append(nn.ReLU())
if dropout > 0:
modules.append(nn.Dropout(dropout))
self.net = nn.Sequential(*modules)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=3, dropout=0.2, causal=False):
super().__init__()
assert kernel_size % 2 == 1
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
dropout=dropout, causal=causal)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
if __name__ == '__main__':
tcn = TemporalConvNet(117, [64, 128], kernel_size=3, causal=False)
# tcn = TemporalConvNet(4, [8], kernel_size=3, causal=False)
input = zeros(3, 117, 80) # b c t
out = tcn(input)
print(tcn)
print(out.shape)
| 2,770 | 33.6375 | 129 | py |
PoseTriplet | PoseTriplet-main/imitator/models/linear_model.py | from __future__ import absolute_import
import torch
import torch.nn as nn
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
class Linear(nn.Module):
def __init__(self, linear_size, p_dropout=0.5):
super(Linear, self).__init__()
self.l_size = linear_size
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p_dropout)
self.w1 = nn.Linear(self.l_size, self.l_size)
self.batch_norm1 = nn.BatchNorm1d(self.l_size)
self.w2 = nn.Linear(self.l_size, self.l_size)
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
def forward(self, x):
y = self.w1(x)
y = self.batch_norm1(y)
y = self.relu(y)
y = self.dropout(y)
y = self.w2(y)
y = self.batch_norm2(y)
y = self.relu(y)
y = self.dropout(y)
out = x + y
return out
class LinearModel(nn.Module):
def __init__(self, input_size, output_size, linear_size=1024, num_stage=2, p_dropout=0.5):
super(LinearModel, self).__init__()
self.linear_size = linear_size
self.p_dropout = p_dropout
self.num_stage = num_stage
# 2d joints
self.input_size = input_size # 16 * 2
# 3d joints
self.output_size = output_size # 16 * 3
# process input to linear size
self.w1 = nn.Linear(self.input_size, self.linear_size)
self.batch_norm1 = nn.BatchNorm1d(self.linear_size)
self.linear_stages = []
for l in range(num_stage):
self.linear_stages.append(Linear(self.linear_size, self.p_dropout))
self.linear_stages = nn.ModuleList(self.linear_stages)
# post processing
self.w2 = nn.Linear(self.linear_size, self.output_size)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(self.p_dropout)
def forward(self, x):
# pre-processing
y = self.w1(x)
y = self.batch_norm1(y)
y = self.relu(y)
y = self.dropout(y)
# linear layers
for i in range(self.num_stage):
y = self.linear_stages[i](y)
y = self.w2(y)
return y
if __name__ == '__main__':
import time
print('start')
model = LinearModel(input_size=34, output_size=51, linear_size=1024, num_stage=2, p_dropout=0.25)
input = torch.zeros(2, 34)
t0 = time.time()
out = model(input)
print(time.time() - t0)
print(out.shape) | 2,492 | 25.242105 | 101 | py |
PoseTriplet | PoseTriplet-main/imitator/agents/agent.py | import multiprocessing
from core import LoggerRL, TrajBatch
from utils.memory import Memory
from utils.torch import *
import math
import time
class Agent:
def __init__(self, env, policy_net, value_net, dtype, device, custom_reward=None,
mean_action=False, render=False, running_state=None, num_threads=1):
self.env = env
self.policy_net = policy_net
self.value_net = value_net
self.dtype = dtype
self.device = device
self.custom_reward = custom_reward
self.mean_action = mean_action
self.running_state = running_state
self.render = render
self.num_threads = num_threads
self.noise_rate = 1.0
self.traj_cls = TrajBatch
self.logger_cls = LoggerRL
self.sample_modules = [policy_net]
self.update_modules = [policy_net, value_net]
def sample_worker(self, pid, queue, min_batch_size):
torch.randn(pid)
if hasattr(self.env, 'np_random'):
self.env.np_random.rand(pid)
memory = Memory()
logger = LoggerRL()
while logger.num_steps < min_batch_size:
state = self.env.reset()
if self.running_state is not None:
state = self.running_state(state)
logger.start_episode(self.env)
self.pre_episode()
for t in range(10000):
state_var = tensor(state).unsqueeze(0)
vs_out = self.trans_policy(state_var)
mean_action = self.mean_action or np.random.binomial(1, 1 - self.noise_rate)
action = self.policy_net.select_action(vs_out, mean_action)[0].numpy()
action = int(action) if self.policy_net.type == 'discrete' else action.astype(np.float64)
next_state, env_reward, done, info = self.env.step(action)
if self.running_state is not None:
next_state = self.running_state(next_state)
if self.custom_reward is not None:
c_reward, c_info = self.custom_reward(self.env, state, action, info)
reward = c_reward
else:
c_reward, c_info = 0.0, np.array([0.0])
reward = env_reward
logger.step(self.env, env_reward, c_reward, c_info)
mask = 0 if done else 1
exp = 1 - mean_action
self.push_memory(memory, state, action, mask, next_state, reward, exp)
if pid == 0 and self.render:
self.env.render()
if done:
break
state = next_state
logger.end_episode(self.env)
logger.end_sampling()
if queue is not None:
queue.put([pid, memory, logger])
else:
return memory, logger
def pre_episode(self):
return
def push_memory(self, memory, state, action, mask, next_state, reward, exp):
memory.push(state, action, mask, next_state, reward, exp)
def pre_sample(self):
return
def sample(self, min_batch_size):
t_start = time.time()
self.pre_sample()
to_test(*self.sample_modules)
with to_cpu(*self.sample_modules):
with torch.no_grad():
thread_batch_size = int(math.floor(min_batch_size / self.num_threads))
queue = multiprocessing.Queue()
memories = [None] * self.num_threads
loggers = [None] * self.num_threads
for i in range(self.num_threads-1):
worker_args = (i+1, queue, thread_batch_size)
worker = multiprocessing.Process(target=self.sample_worker, args=worker_args)
worker.start()
memories[0], loggers[0] = self.sample_worker(0, None, thread_batch_size)
for i in range(self.num_threads - 1):
pid, worker_memory, worker_logger = queue.get()
memories[pid] = worker_memory
loggers[pid] = worker_logger
traj_batch = self.traj_cls(memories)
logger = self.logger_cls.merge(loggers)
logger.sample_time = time.time() - t_start
return traj_batch, logger
def trans_policy(self, states):
"""transform states before going into policy net"""
return states
def trans_value(self, states):
"""transform states before going into value net"""
return states
def set_noise_rate(self, noise_rate):
self.noise_rate = noise_rate
| 4,626 | 36.617886 | 105 | py |
PoseTriplet | PoseTriplet-main/imitator/agents/agent_trpo.py | import scipy.optimize
from agents.agent_pg import AgentPG
from utils import *
def conjugate_gradients(Avp_f, b, nsteps, rdotr_tol=1e-10):
x = zeros(b.size())
if b.is_cuda:
x.to(b.get_device())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
Avp = Avp_f(p)
alpha = rdotr / torch.dot(p, Avp)
x += alpha * p
r -= alpha * Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < rdotr_tol:
break
return x
def line_search(model, f, x, fullstep, expected_improve_full, max_backtracks=10, accept_ratio=0.1):
fval = f(True).item()
for stepfrac in [.5**x for x in range(max_backtracks)]:
x_new = x + stepfrac * fullstep
set_flat_params_to(model, x_new)
fval_new = f(True).item()
actual_improve = fval - fval_new
expected_improve = expected_improve_full * stepfrac
ratio = actual_improve / expected_improve
if ratio > accept_ratio:
return True, x_new
return False, x
class AgentTRPO(AgentPG):
def __init__(self, max_kl=1e-2, damping=1e-2, use_fim=True, **kwargs):
super().__init__(**kwargs)
self.max_kl = max_kl
self.damping = damping
self.use_fim = use_fim
def update_value(self, states, returns):
def get_value_loss(flat_params):
set_flat_params_to(self.value_net, tensor(flat_params))
for param in self.value_net.parameters():
if param.grad is not None:
param.grad.data.fill_(0)
values_pred = self.value_net(self.trans_value(states))
value_loss = (values_pred - returns).pow(2).mean()
# weight decay
for param in self.value_net.parameters():
value_loss += param.pow(2).sum() * 1e-3
value_loss.backward()
return value_loss.item(), get_flat_grad_from(self.value_net.parameters()).cpu().numpy()
flat_params, _, opt_info = scipy.optimize.fmin_l_bfgs_b(get_value_loss,
get_flat_params_from(self.value_net).detach().cpu().numpy(),
maxiter=25)
set_flat_params_to(self.value_net, tensor(flat_params))
def update_policy(self, states, actions, returns, advantages, exps):
"""update policy"""
ind = exps.nonzero().squeeze(1)
self.update_value(states, returns)
with torch.no_grad():
fixed_log_probs = self.policy_net.get_log_prob(self.trans_policy(states)[ind], actions[ind])
"""define the loss function for TRPO"""
def get_loss(volatile=False):
with torch.set_grad_enabled(not volatile):
log_probs = self.policy_net.get_log_prob(self.trans_policy(states[ind]), actions[ind])
action_loss = -advantages[ind] * torch.exp(log_probs - fixed_log_probs)
return action_loss.mean()
"""use fisher information matrix for Hessian*vector"""
def Fvp_fim(v):
M, mu, info = self.policy_net.get_fim(self.trans_policy(states)[ind])
mu = mu.view(-1)
filter_input_ids = set([info['std_id']]) if self.policy_net.type == 'gaussian' else set()
t = ones(mu.size(), requires_grad=True)
mu_t = (mu * t).sum()
Jt = compute_flat_grad(mu_t, self.policy_net.parameters(), filter_input_ids=filter_input_ids, create_graph=True)
Jtv = (Jt * v).sum()
Jv = torch.autograd.grad(Jtv, t)[0]
MJv = M * Jv.detach()
mu_MJv = (MJv * mu).sum()
JTMJv = compute_flat_grad(mu_MJv, self.policy_net.parameters(), filter_input_ids=filter_input_ids).detach()
JTMJv /= states.shape[0]
if self.policy_net.type == 'gaussian':
std_index = info['std_index']
JTMJv[std_index: std_index + M.shape[0]] += 2 * v[std_index: std_index + M.shape[0]]
return JTMJv + v * self.damping
"""directly compute Hessian*vector from KL"""
def Fvp_direct(v):
kl = self.policy_net.get_kl(self.trans_policy(states)[ind])
kl = kl.mean()
grads = torch.autograd.grad(kl, self.policy_net.parameters(), create_graph=True)
flat_grad_kl = torch.cat([grad.view(-1) for grad in grads])
kl_v = (flat_grad_kl * v).sum()
grads = torch.autograd.grad(kl_v, self.policy_net.parameters())
flat_grad_grad_kl = torch.cat([grad.contiguous().view(-1) for grad in grads]).detach()
return flat_grad_grad_kl + v * self.damping
Fvp = Fvp_fim if self.use_fim else Fvp_direct
loss = get_loss()
grads = torch.autograd.grad(loss, self.policy_net.parameters())
loss_grad = torch.cat([grad.view(-1) for grad in grads]).detach()
stepdir = conjugate_gradients(Fvp, -loss_grad, 10)
shs = 0.5 * (stepdir.dot(Fvp(stepdir)))
lm = math.sqrt(self.max_kl / shs)
fullstep = stepdir * lm
expected_improve = -loss_grad.dot(fullstep)
prev_params = get_flat_params_from(self.policy_net)
success, new_params = line_search(self.policy_net, get_loss, prev_params, fullstep, expected_improve)
set_flat_params_to(self.policy_net, new_params)
| 5,491 | 38.797101 | 124 | py |
PoseTriplet | PoseTriplet-main/imitator/agents/agent_ppo.py | import math
from utils.torch import *
from agents.agent_pg import AgentPG
class AgentPPO(AgentPG):
def __init__(self, clip_epsilon=0.2, opt_batch_size=64, use_mini_batch=False,
policy_grad_clip=None, **kwargs):
super().__init__(**kwargs)
self.clip_epsilon = clip_epsilon
self.opt_batch_size = opt_batch_size
self.use_mini_batch = use_mini_batch
self.policy_grad_clip = policy_grad_clip
def update_policy(self, states, actions, returns, advantages, exps):
"""update policy"""
with to_test(*self.update_modules):
with torch.no_grad():
fixed_log_probs = self.policy_net.get_log_prob(self.trans_policy(states), actions)
optim_iter_num = int(math.ceil(states.shape[0] / self.opt_batch_size))
for _ in range(self.opt_num_epochs):
if self.use_mini_batch:
perm = np.arange(states.shape[0])
np.random.shuffle(perm)
perm = LongTensor(perm).to(self.device)
states, actions, returns, advantages, fixed_log_probs, exps = \
states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), \
fixed_log_probs[perm].clone(), exps[perm].clone()
for i in range(optim_iter_num):
ind = slice(i * self.opt_batch_size, min((i + 1) * self.opt_batch_size, states.shape[0]))
states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b, exps_b = \
states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind], exps[ind]
ind = exps_b.nonzero().squeeze(1)
self.update_value(states_b, returns_b)
surr_loss = self.ppo_loss(states_b, actions_b, advantages_b, fixed_log_probs_b, ind)
self.optimizer_policy.zero_grad()
surr_loss.backward()
self.clip_policy_grad()
self.optimizer_policy.step()
else:
ind = exps.nonzero().squeeze(1)
self.update_value(states, returns)
surr_loss = self.ppo_loss(states, actions, advantages, fixed_log_probs, ind)
self.optimizer_policy.zero_grad()
surr_loss.backward()
self.clip_policy_grad()
self.optimizer_policy.step()
def clip_policy_grad(self):
if self.policy_grad_clip is not None:
for params, max_norm in self.policy_grad_clip:
torch.nn.utils.clip_grad_norm_(params, max_norm)
def ppo_loss(self, states, actions, advantages, fixed_log_probs, ind):
log_probs = self.policy_net.get_log_prob(self.trans_policy(states)[ind], actions[ind])
ratio = torch.exp(log_probs - fixed_log_probs[ind])
advantages = advantages[ind]
surr1 = ratio * advantages
surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantages
surr_loss = -torch.min(surr1, surr2).mean()
return surr_loss
| 3,141 | 46.606061 | 115 | py |
PoseTriplet | PoseTriplet-main/imitator/agents/agent_pg.py | from core import estimate_advantages
from agents.agent import Agent
from utils.torch import *
import time
class AgentPG(Agent):
def __init__(self, gamma=0.99, tau=0.95, optimizer_policy=None, optimizer_value=None,
opt_num_epochs=1, value_opt_niter=1, **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.tau = tau
self.optimizer_policy = optimizer_policy
self.optimizer_value = optimizer_value
self.opt_num_epochs = opt_num_epochs
self.value_opt_niter = value_opt_niter
def update_value(self, states, returns):
"""update critic"""
for _ in range(self.value_opt_niter):
values_pred = self.value_net(self.trans_value(states))
value_loss = (values_pred - returns).pow(2).mean()
self.optimizer_value.zero_grad()
value_loss.backward()
self.optimizer_value.step()
def update_policy(self, states, actions, returns, advantages, exps):
"""update policy"""
# use a2c by default
ind = exps.nonzero().squeeze(1)
for _ in range(self.opt_num_epochs):
self.update_value(states, returns)
log_probs = self.policy_net.get_log_prob(self.trans_policy(states)[ind], actions[ind])
policy_loss = -(log_probs * advantages[ind]).mean()
self.optimizer_policy.zero_grad()
policy_loss.backward()
self.optimizer_policy.step()
def update_params(self, batch):
t0 = time.time()
to_train(*self.update_modules)
states = torch.from_numpy(batch.states).to(self.dtype).to(self.device)
actions = torch.from_numpy(batch.actions).to(self.dtype).to(self.device)
rewards = torch.from_numpy(batch.rewards).to(self.dtype).to(self.device)
masks = torch.from_numpy(batch.masks).to(self.dtype).to(self.device)
exps = torch.from_numpy(batch.exps).to(self.dtype).to(self.device)
with to_test(*self.update_modules):
with torch.no_grad():
values = self.value_net(self.trans_value(states))
"""get advantage estimation from the trajectories"""
advantages, returns = estimate_advantages(rewards, masks, values, self.gamma, self.tau)
self.update_policy(states, actions, returns, advantages, exps)
return time.time() - t0
| 2,377 | 40 | 98 | py |
PoseTriplet | PoseTriplet-main/imitator/utils/transformation.py | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2017, Christoph Gohlke
# Copyright (c) 2006-2017, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2017.02.17
Requirements
------------
* `CPython 2.7 or 3.5 <http://www.python.org>`_
* `Numpy 1.11 <http://www.numpy.org>`_
* `Transformations.c 2017.02.17 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2017.02.17'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
import numpy as np
def rotation_from_quaternion(quaternion, separate=False):
if 1 - quaternion[0] < 1e-8:
axis = np.array([1.0, 0.0, 0.0])
angle = 0.0
else:
s = math.sqrt(1 - quaternion[0]*quaternion[0])
axis = quaternion[1:4] / s
angle = 2 * math.acos(quaternion[0])
return (axis, angle) if separate else axis * angle
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
# angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
###################################
# add by kh, 0919
if q[0] > 0:
q = q * 1.
else:
q = q * -1.
###################################
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([
-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def is_same_quaternion(q0, q1):
"""Return True if two quaternions are equal."""
q0 = numpy.array(q0)
q1 = numpy.array(q1)
return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('_transformations', warn=False)
if __name__ == "__main__":
import doctest
import random # noqa: used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod() | 66,941 | 33.311635 | 79 | py |
PoseTriplet | PoseTriplet-main/imitator/utils/torch.py | import torch
import numpy as np
tensor = torch.tensor
DoubleTensor = torch.DoubleTensor
FloatTensor = torch.FloatTensor
LongTensor = torch.LongTensor
ByteTensor = torch.ByteTensor
ones = torch.ones
zeros = torch.zeros
class to_cpu:
def __init__(self, *models):
self.models = list(filter(lambda x: x is not None, models))
self.prev_devices = [x.device if hasattr(x, 'device') else next(x.parameters()).device for x in self.models]
for x in self.models:
x.to(torch.device('cpu'))
def __enter__(self):
pass
def __exit__(self, *args):
for x, device in zip(self.models, self.prev_devices):
x.to(device)
return False
class to_device:
def __init__(self, device, *models):
self.models = list(filter(lambda x: x is not None, models))
self.prev_devices = [x.device if hasattr(x, 'device') else next(x.parameters()).device for x in self.models]
for x in self.models:
x.to(device)
def __enter__(self):
pass
def __exit__(self, *args):
for x, device in zip(self.models, self.prev_devices):
x.to(device)
return False
class to_test:
def __init__(self, *models):
self.models = list(filter(lambda x: x is not None, models))
self.prev_modes = [x.training for x in self.models]
for x in self.models:
x.train(False)
def __enter__(self):
pass
def __exit__(self, *args):
for x, mode in zip(self.models, self.prev_modes):
x.train(mode)
return False
class to_train:
def __init__(self, *models):
self.models = list(filter(lambda x: x is not None, models))
self.prev_modes = [x.training for x in self.models]
for x in self.models:
x.train(True)
def __enter__(self):
pass
def __exit__(self, *args):
for x, mode in zip(self.models, self.prev_modes):
x.train(mode)
return False
def batch_to(dst, *args):
return [x.to(dst) for x in args if x is not None]
def get_flat_params_from(models):
if not hasattr(models, '__iter__'):
models = (models, )
params = []
for model in models:
for param in model.parameters():
params.append(param.data.view(-1))
flat_params = torch.cat(params)
return flat_params
def set_flat_params_to(model, flat_params):
prev_ind = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(
flat_params[prev_ind:prev_ind + flat_size].view(param.size()))
prev_ind += flat_size
def get_flat_grad_from(inputs, grad_grad=False):
grads = []
for param in inputs:
if grad_grad:
grads.append(param.grad.grad.view(-1))
else:
if param.grad is None:
grads.append(zeros(param.view(-1).shape))
else:
grads.append(param.grad.view(-1))
flat_grad = torch.cat(grads)
return flat_grad
def compute_flat_grad(output, inputs, filter_input_ids=set(), retain_graph=False, create_graph=False):
if create_graph:
retain_graph = True
inputs = list(inputs)
params = []
for i, param in enumerate(inputs):
if i not in filter_input_ids:
params.append(param)
grads = torch.autograd.grad(output, params, retain_graph=retain_graph, create_graph=create_graph)
j = 0
out_grads = []
for i, param in enumerate(inputs):
if i in filter_input_ids:
out_grads.append(zeros(param.view(-1).shape))
else:
out_grads.append(grads[j].view(-1))
j += 1
grads = torch.cat(out_grads)
for param in params:
param.grad = None
return grads
def set_optimizer_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def filter_state_dict(state_dict, filter_keys):
for key in list(state_dict.keys()):
for f_key in filter_keys:
if f_key in key:
del state_dict[key]
break
| 4,140 | 25.044025 | 116 | py |
PoseTriplet | PoseTriplet-main/imitator/utils/math.py | import torch
import math
import numpy as np
from utils.transformation import quaternion_matrix, quaternion_about_axis,\
quaternion_inverse, quaternion_multiply, rotation_from_quaternion, rotation_from_matrix
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1, keepdim=True)
def get_qvel_fd(cur_qpos, next_qpos, dt, transform=None):
v = (next_qpos[:3] - cur_qpos[:3]) / dt
qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7])) # A*inverse(B)=A/B
axis, angle = rotation_from_quaternion(qrel, True)
if angle > np.pi:
angle -= 2 * np.pi
elif angle < -np.pi:
angle += 2 * np.pi
rv = (axis * angle) / dt
rv = transform_vec(rv, cur_qpos[3:7], 'root') # angular velocity is in root coord
qvel = (next_qpos[7:] - cur_qpos[7:]) / dt
qvel = np.concatenate((v, rv, qvel))
if transform is not None:
v = transform_vec(v, cur_qpos[3:7], transform)
qvel[:3] = v
return qvel
def get_sktvel_fd(prev_skt_pos, cur_skt_pos, dt):
v = (cur_skt_pos - prev_skt_pos) / dt
return v
def get_angvel_fd(prev_bquat, cur_bquat, dt):
q_diff = multi_quat_diff(cur_bquat, prev_bquat)
n_joint = q_diff.shape[0] // 4
body_angvel = np.zeros(n_joint * 3)
for i in range(n_joint):
body_angvel[3*i: 3*i + 3] = rotation_from_quaternion(q_diff[4*i: 4*i + 4]) / dt
return body_angvel
def transform_vec(v, q, trans='root'):
if trans == 'root':
rot = quaternion_matrix(q)[:3, :3]
elif trans == 'heading':
hq = q.copy()
hq[1] = 0
hq[2] = 0
hq /= np.linalg.norm(hq)
rot = quaternion_matrix(hq)[:3, :3]
else:
assert False
v = rot.T.dot(v[:, None]).ravel()
return v
def get_heading_q(q):
hq = q.copy()
hq[1] = 0
hq[2] = 0
hq /= np.linalg.norm(hq)
return hq
def get_heading(q):
hq = q.copy()
hq[1] = 0
hq[2] = 0
if hq[3] < 0:
hq *= -1
hq /= np.linalg.norm(hq)
return 2 * math.acos(hq[0])
def de_heading(q):
return quaternion_multiply(quaternion_inverse(get_heading_q(q)), q)
def multi_quat_diff(nq1, nq0):
"""return the relative quaternions q1-q0 of N joints"""
nq_diff = np.zeros_like(nq0)
for i in range(nq1.shape[0] // 4):
ind = slice(4*i, 4*i + 4)
q1 = nq1[ind]
q0 = nq0[ind]
################################
# kh add, 0919
if q1[0] < 0:
q1 = q1 * -1.
if q0[0] < 0:
q0 = q0 * -1.
################################
nq_diff[ind] = quaternion_multiply(q1, quaternion_inverse(q0))
return nq_diff
def multi_quat_norm(nq):
"""return the scalar rotation of a N joints"""
nq_norm = np.arccos(np.clip(nq[::4], -1.0, 1.0))
return nq_norm
def quat_mul_vec(q, v):
return quaternion_matrix(q)[:3, :3].dot(v[:, None]).ravel()
def quat_to_bullet(q):
return np.array([q[1], q[2], q[3], q[0]])
def quat_from_bullet(q):
return np.array([q[3], q[0], q[1], q[2]])
def quat_from_expmap(e):
angle = np.linalg.norm(e)
if angle < 1e-12:
axis = np.array([1.0, 0.0, 0.0])
else:
axis = e / angle
return quaternion_about_axis(angle, axis)
| 3,516 | 24.860294 | 101 | py |
PoseTriplet | PoseTriplet-main/imitator/utils/functions.py | import torch
import numpy as np
import torch.nn as nn
def mask_features(features, masked_freq):
"""
mask feature with give frequency
:param features: t x feature size
:return:
"""
mask = np.zeros_like(features)
mask[::masked_freq] = 1.
feature_out = features * mask
return feature_out
def mask_features_withshuffle(features, masked_freq):
"""
mask feature with give frequency
:param features: t x feature size
:return:
"""
# mask feature
mask = np.zeros_like(features)
mask[::masked_freq] = 1.
feature_out = features * mask
# shuffle feature
b = feature_out[::masked_freq] * 1.
np.random.shuffle(b)
feature_out[::masked_freq, 2:] = b[:, 2:] * 1
return feature_out | 760 | 23.548387 | 53 | py |
PoseTriplet | PoseTriplet-main/imitator/utils/__init__.py | from utils.memory import *
from utils.zfilter import *
from utils.torch import *
from utils.math import *
from utils.tools import *
from utils.logger import *
from utils.tb_logger import *
from utils.functions import *
| 219 | 23.444444 | 29 | py |
PoseTriplet | PoseTriplet-main/imitator/utils/tb_logger.py | """
File: logger.py
Modified by: Senthil Purushwalkam
Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
Email: spurushw<at>andrew<dot>cmu<dot>edu
Github: https://github.com/senthilps8
Description:
"""
import tensorflow as tf
from torch.autograd import Variable
import numpy as np
import scipy.misc
import os
import torch
from os import path
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger(object):
def __init__(self, log_dir, name=None):
"""Create a summary writer logging to log_dir."""
self.name = name
if name is not None:
try:
os.makedirs(os.path.join(log_dir, name))
except:
pass
self.writer = tf.summary.FileWriter(os.path.join(log_dir, name),
filename_suffix=name)
else:
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def to_np(self, x):
return x.data.cpu().numpy()
def to_var(self, x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def model_param_histo_summary(self, model, step):
"""log histogram summary of model's parameters
and parameter gradients
"""
for tag, value in model.named_parameters():
if value.grad is None:
continue
tag = tag.replace('.', '/')
tag = self.name + '/' + tag
self.histo_summary(tag, self.to_np(value), step)
self.histo_summary(tag + '/grad', self.to_np(value.grad), step)
| 3,687 | 30.521368 | 89 | py |
SFOM-DRO | SFOM-DRO-main/FairnessML/FML_FMD_Solver.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import time
import gurobipy as gp
from gurobipy import GRB
from statistics import mean
from tqdm import tqdm
from FML_UBRegret import R_dim_FMD
from FML_utils import *
from copy import deepcopy
import torch
def FMD_x(x, p,step_alpha, X_train, y_train, s_norm, RHS):
"""
Full gradient descent on x
"""
n, d = X_train.shape
m = 3
F_hat = []
"""
i_hat calculation
"""
p_sum = np.sum(p, axis = 1)
X_theta = X_train @ x
#max_list = []
for i in range(m):
if i == 0:
f_val = p[i,:] @ (np.log(1 + np.exp(X_theta)) - np.multiply(1-y_train, X_theta))
f_val -= RHS[i] * p_sum[i]
F_hat.append(f_val)
elif i == 1:
f_val = p[i,:] @ np.multiply(s_norm, X_theta)
f_val -= RHS[i] * p_sum[i]
F_hat.append(f_val)
else:
f_val = p[i, :] @ np.multiply(-s_norm, X_theta)
f_val -= RHS[i] * p_sum[i]
F_hat.append(f_val)
i_hat = F_hat.index(max(F_hat))
"""
g_t calculation
"""
if i_hat == 0:
temp = np.exp(X_theta)
g_t = X_train.T @ (np.multiply(p[i_hat,:], temp /(1+ temp) - 1 + y_train))
elif i_hat == 1:
g_t = X_train.T @ (np.multiply(p[i_hat,:],s_norm))
else:
g_t = X_train.T @ (np.multiply(p[i_hat, :],-s_norm))
"""
x_{t+1} calculation
"""
x_update = x - step_alpha * g_t
return x_update, np.array(F_hat)
def find_alpha_full(w, n, delta, rho, tol):
alpha_up = 1
alpha_low = 0
g_alpha = 1
esp = 1e-10
# First sort w vector
while abs(g_alpha) > tol:
alpha = (alpha_up + alpha_low) / 2
if alpha <= esp or 1 - alpha <= esp:
return alpha
w_threshold = (delta - alpha) / (n * (1 - alpha))
w_alpha = np.where(w >= w_threshold, w, 0)
I_alpha = np.count_nonzero(w_alpha) # Size of set I(alhpa)
w_sum = np.sum(w_alpha) # sum of w in I(alpha)
w_sum_square = np.sum(w_alpha ** 2) # sum of squares of w in I(alpha)
g_alpha = w_sum_square / 2 - w_sum / n + I_alpha * ((1 - alpha) ** 2 -\
(1 - delta) ** 2) / (2 * (n ** 2) * ( 1 - alpha) ** 2) +\
(n * (1 - delta) ** 2 - 2 * rho) / (2 * (n ** 2) * (1 - alpha) ** 2)
if g_alpha < 0:
alpha_up = alpha
else:
alpha_low = alpha
alpha = (alpha_up + alpha_low) / 2
return alpha
def FMD_p(x, p, i, step_alpha, delta, rho, alpha_tol, X_train,y_train,s_norm, RHS):
"""
Full gradient descent on p
"""
# for each constraint and JL sample one index I_t ~ p_t^i
# If we are using chi-square as our f-divergence, we need to relax our uncertainty set.
n, d = X_train.shape
X_theta = X_train @ x
if i == 0:
grad_val = np.log(1 + np.exp(X_theta)) - np.multiply(1-y_train,X_theta) - RHS[i]
elif i == 1:
grad_val = np.multiply(s_norm,X_theta) - RHS[i]
else:
grad_val = np.multiply(-s_norm, X_theta) - RHS[i]
p += step_alpha * grad_val
# Projection to our chi-square uncertainty set
# Here we dont use efficient projection scheme, which uses tree structure
# Note that g'(alpha) is a decreasing function of alpha
# Input w_sum and w_square_sum are sum of p[t] and square sum of p[t].
alpha = find_alpha_full(p, n, delta, rho, alpha_tol)
# Update p value according to alpha that we find above
# For i != I_t, p_{t+1} = (1-alpha)w_i + alpha/n
p *= (1-alpha)
p += alpha/n
# For i = I_t, we should first take a look at w_{t,I_t}
p = np.where(p>delta/n,p, delta/n)
return p
#FMD Solver
def DRO_FMD(x_0, p_0, X_train, y_train, delta, rho, alpha_tol,\
opt_low, opt_up, obj_tol, RHS, ss_type,dual_gap_option, dual_gap_freq,dual_gap_freq2, print_option=1,\
K_test_flag=0, min_flag=1, feas_opt = 0, warm_start = 0):
"""
dual_gap_option: There are 4 different options for SP gap calcuation.
dual_gap_freq: Frequency of computing the SP gap.
min_flag: indicates whether our problem is minimization (=1) or maximization (=0).
If dual_gap_option == 0: then don't calculate dual_gap and use absolute dual_gap_freq.
If dual_gap_option == 1: then don't calculate dual_gap and use relative dual_gap_freq.
# For this example only, if dual_gap_option == 0 or 1, then we also calculate the duality gap and see if it is less than equal to eps/2.
We exclude duality gap calculation time from total_time. Also, turn off the hat_f calculation as we are not using for the algorithm of ICML.
Turn on again for Journal Version later.
If dual_gap_option == 2: then calculate dual_gap and use absolute dual_gap_freq.
If dual_gap_option == 3: then calculate dual_gap and use relative dual_gap_freq.
"""
n, d = X_train.shape
s_norm = X_train[:,0] - np.mean(X_train[:,0])
m = 3
print('\n')
print('************************************************************************************')
print('*******************************Problem Description**********************************')
print(' ')
print('Number of constraints: %s, x dimension: %s ,Uncertainty Set Dimension: %s' % (m, d, n))
print(' ')
print('*************************************************************************************')
print('*************************************************************************************')
i_flag_count = 0
if K_test_flag:
print('We are doing sample size test here!')
G = 0.25
M = np.ones(3) * 0.25
print('G:', G)
print('M:', M)
if ss_type == 'diminish':
T, R_x, R_p, c_x, c_p = R_dim_FMD(d, n, G, M, rho, obj_tol)
print("Max Iteration:", T)
print('alg_type: FGD with i^*')
ss_x_list = c_x * (np.sqrt(1 / (np.arange(T + 1) + 1)))
ss_p_list = []
for i in range(m):
ss_p_list.append(c_p[i] * (np.sqrt(1 / (np.arange(T + 1) + 1))))
print('Rx: %s' % R_x)
print('Rp: %s' % R_p)
bisection = []
bisection_count = 0
vartheta_list = []
dual_gap_list = [] # each element is a list of dual gap at each bisection iteration
iter_timer_list = [] # each element elapsed time per bisection
real_T_list = [] # each element is a list of terminated iteration by dual gap condition at each
# bisection iteration
early_term_count = 0
dual_gap_time = 0 #Measures the time used for calculating duality gap
solved_flag = 0 #Only used when dual_gap_cal = 0
total_tic = time.time()
sanity_check = 0
sanity_freq = 1000
if dual_gap_option == 2 or dual_gap_option == 0:
pass
elif dual_gap_option ==3 or dual_gap_option == 1:
dual_gap_freq = int(T * dual_gap_freq)
dual_gap_freq2 = int(T * dual_gap_freq2)
#Set calculation option
if dual_gap_option == 0 or dual_gap_option == 1:
dual_gap_cal = 0
else:
dual_gap_cal = 1
feas_flag = 0
change_flag = 1
# Implementing normal test
if K_test_flag == 0:
while opt_up - opt_low > obj_tol and not feas_flag:
iter_tic = time.time()
feas_flag = feas_opt
break_flag = 0
bisection_count += 1
# Change our objective function
obj_val = (opt_up + opt_low) / 2
RHS[0] = (-1 + 2 * min_flag) * obj_val
if print_option:
print('---------------------------')
print('%s-th bisection iteration' % bisection_count)
print('alg_type: FGD with i^*')
print('---------------------------')
print("current step objective value:", obj_val)
#Change Dual_Freq_Gap if warm_startup == 1 and bisection_count > 1
# if warm_start and bisection_count > 1:
# dual_gap_freq = 10
if warm_start and bisection_count > 1 and change_flag:
dual_gap_freq = dual_gap_freq2
change_flag = 0
x = np.empty([dual_gap_freq, d])
p = np.empty([dual_gap_freq, m, n])
f_val = np.zeros([dual_gap_freq, m])
f_val_ws = np.zeros(m)
# Set initial point according to warm_start
if warm_start:
if bisection_count == 1:
pass
else:
x_0 = deepcopy(x_ws / ss_sum_x)
p_0 = deepcopy(p_ws / ss_sum_p)
x[0, :] = x_0
p[0, :, :] = p_0
# Variables that is needed to update periodically.
iter_count = 0
ss_sum_x = 0
ss_sum_p = 0 # This does not have to be np.zeros([m])
x_ws = np.zeros(d)
p_ws = np.zeros([m, n])
tic = time.time()
dual_gap = [] # List that contains duality gap in this bisection
for t in range(T):
# Now our step-size is not uniform. Make sure to change x_bar and p_bar according to our step size
if (t+2) % dual_gap_freq == 0 and print_option:
toc = time.time()
print('=========================================')
print('%s-st iteration start time:' % (t + 1), toc - tic)
x[(t+1)%dual_gap_freq,:], f_val[t%dual_gap_freq,:] = FMD_x(x[t%dual_gap_freq,:], p[t%dual_gap_freq,:,:],\
ss_x_list[t], X_train, y_train, s_norm, RHS)
for i in range(m):
p[(t+1)%dual_gap_freq,i,:] = FMD_p(x[t%dual_gap_freq,:], p[t%dual_gap_freq,i,:], i, ss_p_list[i][t],\
delta, rho, alpha_tol, X_train,y_train,s_norm, RHS)
"""
Duality Gap Termination Condition(Implement when dual_flag = 1)
"""
# Update bar_x and bar_p
if dual_gap_cal == 0 and (t + 2) % dual_gap_freq == 0:
dual_gap_tic = time.time()
x_ws, ss_sum_x = bar_calculator_x(x_ws, x, dual_gap_freq, \
ss_x_list[
iter_count * dual_gap_freq:(iter_count + 1) * dual_gap_freq],
ss_sum_x)
p_ws, ss_sum_p = bar_calculator_p(p_ws, p, dual_gap_freq, \
ss_p_list[0][
iter_count * dual_gap_freq:(iter_count + 1) * dual_gap_freq],
ss_sum_p)
dual_gap_toc = time.time()
dual_gap_time += dual_gap_toc - dual_gap_tic
print('dual_gap_time:', dual_gap_time)
# Calculate Dual gap
if dual_gap_cal and (t+2) % dual_gap_freq == 0:
dual_gap_tic = time.time()
x_ws, ss_sum_x = bar_calculator_x(x_ws, x,dual_gap_freq,
ss_x_list[iter_count * dual_gap_freq:(iter_count+1) * dual_gap_freq],ss_sum_x)
p_ws, ss_sum_p = bar_calculator_p(p_ws, p, dual_gap_freq,
ss_p_list[0][iter_count * dual_gap_freq:(iter_count + 1) * dual_gap_freq],ss_sum_p)
sup_val = sup_pi(x_ws / ss_sum_x, X_train, y_train, s_norm, rho, delta,
alpha_tol, RHS)
inf_val = inf_pi(p_ws / ss_sum_p, X_train, y_train, s_norm, RHS)
diff = sup_val - inf_val
dual_gap.append(diff)
dual_gap_toc = time.time()
if dual_gap_cal and (t+2) % dual_gap_freq == 0 and print_option:
print("%s-st iteration duality gap:" % (t + 1), diff)
print("Dual Gap Calculation Time %s" % (dual_gap_toc - dual_gap_tic))
print("sup_val:", sup_val)
print("inf_val:", inf_val)
print('w_sum:',np.sum(p[(t+1)%dual_gap_freq,:,:],axis = 1)) #We need to turn this print option later.
p_temp = p[(t+1)%dual_gap_freq,:,:]
print('divergence:', np.sum((p_temp - 1/n)**2,axis=1) * n / 2)
if (t + 1) % dual_gap_freq == 0:
iter_count += 1
# If K_test_flag == 1, we don't use duality gap termination condition
if dual_gap_cal and (t+2) % dual_gap_freq == 0 and diff <= obj_tol / 2:
real_t = t + 1
break_flag = 1
iter_toc = time.time()
dual_gap_list.append(dual_gap)
iter_timer_list.append(iter_toc - iter_tic)
real_T_list.append(t)
early_term_count += 1
if print_option:
print("=============================================")
if bisection_count == 11:
print("%s-th bisection iteration terminated early!!" % bisection_count)
elif bisection_count == 12:
print("%s-th bisection iteration terminated early!!" % bisection_count)
elif bisection_count == 13:
print("%s-th bisection iteration terminated early!!" % bisection_count)
elif bisection_count % 10 == 1:
print("%s-st bisection iteration terminated early!!" % bisection_count)
elif bisection_count % 10 == 2:
print("%s-nd bisection iteration terminated early!!" % bisection_count)
elif bisection_count % 10 == 3:
print("%s-rd bisection iteration terminated early!!" % bisection_count)
else:
print("%s-th bisection iteration terminated early!!" % bisection_count)
print("Terminated in %s iterations" % (t + 1))
print("Max iteration %s" % T)
print('x_bar:', x_ws/ss_sum_x)
# print('p_bar:', p_bar)
print('Duality Gap:', diff)
print("=============================================")
if pi_val(x_ws/ss_sum_x, p_ws/ss_sum_p, X_train, y_train, s_norm, RHS) > obj_tol / 2:
if min_flag:
opt_low = obj_val
bisection.append('low')
else:
opt_up = obj_val
bisection.append('up')
else:
if min_flag:
opt_up = obj_val
bisection.append('up')
else:
opt_low = obj_val
bisection.append('low')
break
if dual_gap_cal and t == T - 1 and print_option:
real_t = T
x_ws, ss_sum_x = bar_calculator_x(x_ws, x, T + 1 - iter_count * dual_gap_freq, \
ss_x_list[iter_count * dual_gap_freq:], ss_sum_x)
p_ws, ss_sum_p = bar_calculator_p(p_ws, p, T + 1 - iter_count * dual_gap_freq, \
ss_p_list[0][iter_count * dual_gap_freq:], ss_sum_p)
sup_val = sup_pi(x_ws / ss_sum_x, X_train, y_train, s_norm, rho, delta,
alpha_tol, RHS)
inf_val = inf_pi(p_ws / ss_sum_p, X_train, y_train, s_norm, RHS)
diff = sup_val - inf_val
dual_gap.append(diff)
print("%s-st iteration duality gap:" % (t + 1), diff)
print("sup_val:", sup_val)
print("inf_val:", inf_val)
if dual_gap_cal == 0:
real_t = T
dual_gap_tic = time.time()
# Calculate the duality gap at the last iteration if dual_gap_cal == 0
x_ws, ss_sum_x = bar_calculator_x(x_ws, x, T + 1 - iter_count * dual_gap_freq, \
ss_x_list[iter_count * dual_gap_freq:], ss_sum_x)
p_ws, ss_sum_p = bar_calculator_p(p_ws, p, T + 1 - iter_count * dual_gap_freq, \
ss_p_list[0][iter_count * dual_gap_freq:], ss_sum_p)
sup_val = sup_pi(x_ws / ss_sum_x, X_train, y_train, s_norm, rho, delta, alpha_tol, RHS)
inf_val = inf_pi(p_ws / ss_sum_p, X_train, y_train, s_norm, RHS)
diff = sup_val - inf_val
dual_gap.append(diff)
print("%s-st iteration duality gap:" % T, diff)
dual_gap_toc = time.time()
dual_gap_time += dual_gap_toc - dual_gap_tic
print('duality gap computation time:', dual_gap_time)
if diff < obj_tol / 2:
solved_flag = 1
dual_gap_list.append(dual_gap)
real_T_list.append(t)
if break_flag:
continue
total_toc = time.time()
total_solved_time = total_toc - total_tic
obj_val = (opt_up + opt_low) / 2
print('Out of %s bisection iteration %s terminated early' % (bisection_count, early_term_count))
print('Average iteration:', mean(real_T_list))
print('==========================================')
stat = Statistics(n, m, n, n, ss_type, obj_val, dual_gap_list,
iter_timer_list, \
total_solved_time, real_T_list, T, R_x, R_p, i_flag_count, 0,0,
solved_flag=solved_flag, dual_gap_time = dual_gap_time)
# Update the last objective value
# obj_val = (-1 + 2*min_flag) * obj_val
return stat
def DRO_FMD_K_test_time(x_0, p_0,X_train, y_train, delta, rho, alpha_tol, opt_low, opt_up, obj_tol,RHS,
ss_type, dual_gap_option, dual_gap_freq, time_cap, time_freq, print_option=1, min_flag=1):
"""
Function for comparing the convergence speed of SOFO and OFO based approach.
We implement K_test_time with this function. At every time_freq, we calculate the dual gap and record
its value.
"""
n, d = X_train.shape
s_norm = X_train[:,0] - np.mean(X_train[:,0])
m = 3
# Calculate coefficients
print('\n')
print('************************************************************************************')
print('*******************************Problem Description**********************************')
print(' ')
print('Number of constraints: %s, x dimension: %s ,Uncertainty Set Dimension: %s' % (m, d, n))
print(' ')
print('*************************************************************************************')
print('*************************************************************************************')
i_flag_count = 0
# We calculate G and M here
G = 0.25
M = np.ones(3) * 0.25
print('G:', G)
print('M:', M)
if ss_type == 'diminish':
T, R_x, R_p, c_x, c_p = R_dim_FMD(d, n, G, M, rho, obj_tol)
print('alg_type: FGD with i^*')
print("Max Iteration:", T)
T_max = 1e7
ss_x_list = c_x * (np.sqrt(1 / (np.arange(T_max + 1) + 1)))
obj_tol = 1e-7
ss_p_list = []
for i in range(m):
ss_p_list.append(c_p[i] * (np.sqrt(1 / (np.arange(T_max + 1) + 1))))
print('Rx: %s' % R_x)
print('Rp: %s' % R_p)
# This String List would be used on inf_pi function
constr_list = []
for i in range(m):
constr_list.append('obj_' + str(i))
bisection = []
bisection_count = 0
vartheta_list = []
dual_gap_list = [] # each element is a list of dual gap at each bisection iteration
iter_timer_list = [] # each element elapsed time per bisection
real_T_list = [] # each element is a list of terminated iteration by dual gap condition at each
# bisection iteration
early_term_count = 0
total_tic = time.time()
# sample_freq = 10**5
sanity_check = 0
sanity_freq = 1000
if dual_gap_option == 2 or dual_gap_option == 0:
pass
elif dual_gap_option == 3 or dual_gap_option == 1:
dual_gap_freq = int(T * dual_gap_freq)
# Set calculation option
if dual_gap_option == 0 or dual_gap_option == 1:
dual_gap_cal = 0
else:
dual_gap_cal = 1
iter_tic = time.time()
break_flag = 0
bisection_count += 1
# Change our objective function
obj_val = (opt_up + opt_low) / 2
RHS[0] = (-1 + 2 * min_flag) * obj_val
if print_option:
print('---------------------------')
print('%s-th bisection iteration' % bisection_count)
print('alg_type: FGD with i^*')
print('---------------------------')
print("current step objective value:", obj_val)
x = np.empty([dual_gap_freq, d])
p = np.empty([dual_gap_freq, m, n])
f_val = np.zeros([dual_gap_freq, m])
f_val_ws = np.zeros(m)
# Variables that is needed to update periodically.
x[0, :] = x_0
p[0, :, :] = p_0
iter_count = 0
ss_sum_x = 0
ss_sum_p = 0 # This does not have to be np.zeros([m])
x_ws = np.zeros(d)
p_ws = np.zeros([m, n])
x_list = [x_0]
p_list = [p_0]
dual_gap = [] # List that contains duality gap in this bisection
tic = time.time()
toc = time.time()
t = 0
x_ws_list = []
p_ws_list = []
ss_sum_x_list = []
ss_sum_p_list = []
time_iter_count = 1
while toc - tic < time_cap:
toc = time.time()
if print_option and toc - tic > time_iter_count * time_freq:
print('=========================================')
print('%s-st iteration start time:' % (t + 1), toc - tic)
print('Time Iter Count: %s' % time_iter_count)
x_list.append(x_ws/ss_sum_x)
p_list.append(p_ws/ss_sum_p)
time_iter_count += 1
x[(t + 1) % dual_gap_freq, :], f_val[t % dual_gap_freq, :] = FMD_x(x[t % dual_gap_freq, :],
p[t % dual_gap_freq, :, :], \
ss_x_list[t], X_train, y_train, s_norm, RHS)
for i in range(m):
p[(t + 1) % dual_gap_freq, i, :] = FMD_p(x[t % dual_gap_freq, :], p[t % dual_gap_freq, i, :], i,
ss_p_list[i][t], \
delta, rho, alpha_tol, X_train, y_train, s_norm, RHS)
"""
Duality Gap Termination Condition(Implement when dual_flag = 1)
"""
# Calculate Dual gap
if dual_gap_cal and (t + 2) % dual_gap_freq == 0:
x_ws, ss_sum_x = bar_calculator_x(x_ws, x, dual_gap_freq, \
ss_x_list[
iter_count * dual_gap_freq:(iter_count + 1) * dual_gap_freq],
ss_sum_x)
p_ws, ss_sum_p = bar_calculator_p(p_ws, p, dual_gap_freq, \
ss_p_list[0][
iter_count * dual_gap_freq:(iter_count + 1) * dual_gap_freq],
ss_sum_p)
if (t + 1) % dual_gap_freq == 0:
# f_val_ws += np.tensordot(f_val, ss_x_list[iter_count * dual_gap_freq: \
# (iter_count + 1) * dual_gap_freq], axes=(0, 0))
iter_count += 1
t+=1
dual_gap_tic = time.time()
for idx in range(len(x_list)):
sup_val = sup_pi(x_list[idx], X_train, y_train, s_norm, rho, delta, alpha_tol, RHS)
inf_val = inf_pi(p_list[idx], X_train, y_train, s_norm, RHS)
diff = sup_val - inf_val
if print_option:
print('sup val:', sup_val)
print('inf val:', inf_val)
print('{}-th Dual Gap: {}'.format(idx, diff))
dual_gap.append(diff)
dual_gap_toc = time.time()
dual_gap_time = dual_gap_toc - dual_gap_tic
dual_gap_list.append(dual_gap)
total_toc = time.time()
total_solved_time = total_toc - total_tic
FMD_stat = Statistics(n, m, n, n, ss_type, obj_val, dual_gap_list,
iter_timer_list, total_solved_time, real_T_list, T, R_x, R_p, i_flag_count, 0,0,solved_flag = 0, dual_gap_time=dual_gap_time)
return FMD_stat
| 25,115 | 38.24375 | 151 | py |
PRSummarizer | PRSummarizer-master/prsum/decode.py | # encoding=utf-8
import os
import sys
import time
import copy
import torch
from . import utils
from .pointer_model import PointerEncoderDecoder
from prsum.dataset import data
from prsum.dataset.data import Vocab
from prsum.dataset.batcher import Batcher
from prsum.dataset.train_util import get_input_from_batch
from myrouge.rouge import Rouge
def get_rouge_ref_dir(decode_dir):
return os.path.join(decode_dir, 'rouge_ref')
def get_rouge_dec_dir(decode_dir):
return os.path.join(decode_dir, 'rouge_dec_dir')
class Beam(object):
def __init__(self, tokens, log_probs, state, context, coverage, ngram_set):
self.tokens = tokens
self.log_probs = log_probs
self.state = state
self.context = context
self.coverage = coverage
self.ngram_set = ngram_set
def extend(self, token, log_prob, state, context, coverage, new_3gram):
if self.ngram_set == None:
ngram_set = None
else:
ngram_set = copy.copy(self.ngram_set)
ngram_set.add(new_3gram)
return Beam(tokens=self.tokens + [token],
log_probs=self.log_probs + [log_prob],
state = state,
context = context,
coverage = coverage,
ngram_set=ngram_set)
def get_new_3gram(self, token):
new_3gram = tuple(self.tokens[-2:] + [token])
return new_3gram
def is_dup_3gram(self, new_3gram):
if new_3gram in self.ngram_set:
return True
return False
@property
def latest_token(self):
return self.tokens[-1]
@property
def avg_log_prob(self):
return sum(self.log_probs) / len(self.tokens)
class BeamSearch(object):
def __init__(self, params, model_file_path, data_file_prefix="test.", ngram_filter=False):
if data_file_prefix != "test." and ngram_filter:
print("Warning: Using ngram_filter when validating!")
model_name = os.path.basename(model_file_path)
dirname = os.path.dirname(model_file_path)
self._decode_dir = os.path.join(dirname, data_file_prefix + 'decode_%s' % (model_name))
self._rouge_ref_dir = get_rouge_ref_dir(self._decode_dir)
self._rouge_dec_dir = get_rouge_dec_dir(self._decode_dir)
for p in [self._decode_dir, self._rouge_ref_dir, self._rouge_dec_dir]:
if not os.path.exists(p):
os.mkdir(p)
self.vocab = Vocab(params.vocab_path, params.vocab_size)
decode_data_path = os.path.join(params.data_dir, data_file_prefix + params.data_file_suffix)
self.batcher = Batcher(params, decode_data_path, self.vocab, mode='decode',
batch_size=params.beam_size, single_pass=True)
self.pad_id = self.vocab.word2id(data.PAD_TOKEN)
assert (self.pad_id == 1)
time.sleep(10)
self.model = PointerEncoderDecoder(params, model_file_path, pad_id=self.pad_id, is_eval=True)
self.params = params
self.ngram_filter = ngram_filter
if not self.ngram_filter:
self.cand_beam_size = self.params.beam_size * 2
else:
self.cand_beam_size = self.params.beam_size * 5
def sort_beams(self, beams):
return sorted(beams, key=lambda h: h.avg_log_prob, reverse=True)
def decode(self):
start = time.time()
counter = 0
refs = []
hyps = []
batch = self.batcher.next_batch()
while batch is not None:
# Run beam search to get best Hypothesis
best_summary = self.beam_search(batch)
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_summary.tokens[1:]]
article_oovs = batch.art_oovs[0] if self.params.pointer_gen else None
decoded_words = data.outputids2decwords(output_ids, self.vocab, article_oovs, self.params.pointer_gen)
# there four duplicate exmaples, so we just need one of them
original_abstract = batch.original_abstracts[0]
utils.write_for_rouge(original_abstract, decoded_words, counter,
self._rouge_ref_dir, self._rouge_dec_dir)
hyps.append(utils.prepare_rouge_text(" ".join(decoded_words)))
refs.append(utils.prepare_rouge_text(original_abstract))
counter += 1
if counter % self.params.eval_print_interval == 0:
print('%d example in %d sec' % (counter, time.time() - start))
sys.stdout.flush()
start = time.time()
batch = self.batcher.next_batch()
print("Decoder has finished reading dataset for single_pass.")
rouge = Rouge()
scores = rouge.get_scores(hyps, refs, avg=True)
print("Scores of python rouge:")
print(scores)
print("Now starting ROUGE eval...")
results = utils.rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)
utils.rouge_log(results, self._decode_dir)
result_dict = utils.rouge_result_to_dict(results)
utils.dump_json_file(os.path.join(self._decode_dir, 'ROUGE_results.json'), result_dict)
return result_dict
def beam_search(self, batch):
device = torch.device(self.params.eval_device)
enc_batch, enc_padding_mask, enc_lens, enc_batch_extended, extend_vocab_zeros, c_t_1, coverage_t_0 = \
get_input_from_batch(self.params, batch, self.params.eval_device)
c_t_1 = c_t_1.unsqueeze(1)
enc_outputs, enc_features, s_0 = self.model.encoder(enc_batch, enc_lens)
dec_h, dec_c = s_0 # 1 x batch_size x 2*hidden_size
# batch_size x 2*hidden_size
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
# initialize beams
# TODO: maybe we only need one beam since only beams[0] will be used later at step 0
beams = [Beam(tokens=[self.vocab.word2id(data.START_DECODING)],
log_probs=[0.0],
state=(dec_h[0], dec_c[0]),
context=c_t_1[0],
coverage=(coverage_t_0[0] if self.params.is_coverage else None),
ngram_set=set() if self.ngram_filter else None)
for _ in range(self.params.beam_size)]
results = []
steps = 0
while steps < self.params.max_dec_steps and len(results) < self.params.beam_size and steps < enc_lens.max():
latest_tokens = [h.latest_token for h in beams]
latest_tokens = [t if t < self.vocab.size() else self.vocab.word2id(data.UNKNOWN_TOKEN) \
for t in latest_tokens]
y_t_1 = torch.LongTensor(latest_tokens).to(device)
all_state_h = []
all_state_c = []
all_context = []
for h in beams:
state_h, state_c = h.state
all_state_h.append(state_h)
all_state_c.append(state_c)
all_context.append(h.context)
s_t_1 = (torch.stack(all_state_h, 0).unsqueeze(0), torch.stack(all_state_c, 0).unsqueeze(0))
c_t_1 = torch.stack(all_context, 0)
coverage_t = None
if self.params.is_coverage:
all_coverage = []
for h in beams:
all_coverage.append(h.coverage)
coverage_t = torch.stack(all_coverage, 0)
final_dist, s_t, c_t, attn_dist, coverage_t_plus = self.model.decoder(y_t_1, s_t_1, c_t_1, enc_outputs,
enc_features, enc_padding_mask, extend_vocab_zeros, enc_batch_extended, coverage_t)
log_probs = torch.log(final_dist)
# for debug
if torch.isnan(log_probs).any():
print("Error: log probs contains NAN!")
topk_log_probs, topk_ids = torch.topk(log_probs, self.cand_beam_size)
dec_h, dec_c = s_t
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
all_beams = []
num_orig_beams = 1 if steps == 0 else len(beams)
for i in range(num_orig_beams):
h = beams[i]
state_i = (dec_h[i], dec_c[i])
context_i = c_t[i]
coverage_i = (coverage_t_plus[i] if self.params.is_coverage else None)
cur_count = 0
# we assume that all beam can get no_dup 3-grams in self.cand_beam_size
for j in range(self.cand_beam_size): # for each of the top can_beam_size hyps:
cur_token = topk_ids[i,j].item()
new_3gram = None
if self.ngram_filter:
new_3gram = h.get_new_3gram(cur_token)
if h.is_dup_3gram(new_3gram):
continue
new_beam = h.extend(token=topk_ids[i, j].item(),
log_prob=topk_log_probs[i, j].item(),
state=state_i,
context=context_i,
coverage=coverage_i,
new_3gram=new_3gram)
all_beams.append(new_beam)
cur_count += 1
if cur_count == self.params.beam_size:
break
if len(all_beams) < 4:
print("Error: Only find {} candidate beams.".format(all_beams))
beams = []
for h in self.sort_beams(all_beams):
if h.latest_token == self.vocab.word2id(data.STOP_DECODING):
if steps >= self.params.min_dec_steps:
results.append(h)
else:
beams.append(h)
if len(beams) == self.params.beam_size or len(results) == self.params.beam_size:
break
steps += 1
if len(results) == 0:
results = beams
beams_sorted = self.sort_beams(results)
return beams_sorted[0]
| 10,179 | 38.305019 | 116 | py |
PRSummarizer | PRSummarizer-master/prsum/pointer_model.py | # encoding=utf-8
"""Attentional Encoder Decoder Model"""
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .utils import try_load_state
class Encoder(nn.Module):
def __init__(self, hps, pad_id=1, batch_first=True):
super().__init__()
self._hps = hps
self._batch_first = batch_first
self.embedding = nn.Embedding(self._hps.vocab_size, self._hps.embed_dim, padding_idx=pad_id)
# bidirectional 1-layer LSTM
self.lstm = nn.LSTM(self._hps.embed_dim, self._hps.hidden_dim, num_layers=1, batch_first=self._batch_first,
bidirectional=True)
# W_h in Equation 1
self.W_h = nn.Linear(2 * self._hps.hidden_dim, 2 * self._hps.hidden_dim, bias=False)
# Reduce the dim of the last hidden state
self.reduce_h = nn.Linear(2 * self._hps.hidden_dim, self._hps.hidden_dim)
self.reduce_c = nn.Linear(2 * self._hps.hidden_dim, self._hps.hidden_dim)
def save_gradients(self, module, grad_input, grad_output):
self.gradients[module.name] = {
'grad_input': grad_input,
'grad_output': grad_output
}
def forward(self, enc_inps, enc_seq_lens):
"""
:param enc_inps: batch_size x max_seq_len
:param seq_lens: batch_size
:return:
enc_outputs: batch_size x max_seq_len x 2*hidden_dim
enc_features: batch_size x max_seq_len x 2*hidden_dim
s_0: tuple of two batch_size x 2*hidden_dim
"""
# batch_size x max_seq_len -> batch_size x max_seq_len x embed_dim
enc_embeddings = self.embedding(enc_inps)
# batch_size x max_seq_len x embed_dim -> packed sequences
packed_inps = pack_padded_sequence(enc_embeddings, enc_seq_lens, batch_first=self._batch_first)
# enc_h_t & enc_c_t: 2 x batch_size x hidden_dim
packed_outputs, (enc_h_t, enc_c_t) = self.lstm(packed_inps)
# packed sequences -> batch_size x max_seq_len x 2*hidden_dim
enc_outputs, _ = pad_packed_sequence(packed_outputs, batch_first=self._batch_first)
# batch_size x max_seq_len x 2*hidden_dim
enc_features = self.W_h(enc_outputs)
# 2 x batch_size x hidden_dim -> batch_size x 2*hidden_dim
enc_h_t = enc_h_t.transpose(0, 1).reshape(-1, 2 * self._hps.hidden_dim)
enc_c_t = enc_c_t.transpose(0, 1).reshape(-1, 2 * self._hps.hidden_dim)
# 1 x batch_size x 2*hidden_dim
reduced_h_t = F.relu(self.reduce_h(enc_h_t)).unsqueeze(0)
reduced_c_t = F.relu(self.reduce_c(enc_c_t)).unsqueeze(0)
s_0 = (reduced_h_t, reduced_c_t)
return enc_outputs, enc_features, s_0
class AttentionDecoder(nn.Module):
"""
Procedure
dec_embeddings = embedding(y_t_1)
lstm_input = [c_t_1, dec_embedding]
lstm_output, s_t = lstm(lstm_input, s_t_1)
# enc_seq_len
e_t = v^T tanh(enc_features + W_s*s_t + b_{attn})
a_t = softmax(e_t)
Mask pads
# element-wise
c_t = sum(a_t * enc_outputs, -1)
vocab_dist = softmax(V'(V[lstm_output,c_t] + b) + b')
"""
def __init__(self, hps, pad_id=1, batch_first=True):
super().__init__()
self._hps = hps
self._batch_first = batch_first
self.W_s = nn.Linear(2 * self._hps.hidden_dim, 2 * self._hps.hidden_dim)
self.v = nn.Linear(2 * self._hps.hidden_dim, 1, bias=False)
self.embedding = nn.Embedding(self._hps.vocab_size, self._hps.embed_dim, padding_idx=pad_id)
# concatenate x with c_t_1
self.x_context = nn.Linear(self._hps.embed_dim + 2 * self._hps.hidden_dim, self._hps.embed_dim)
# uni-directional
self.lstm = nn.LSTM(self._hps.embed_dim, self._hps.hidden_dim, num_layers=1, batch_first=self._batch_first)
# NOTE: different from atulkum's implemenation, I concatenate s_t instead of lstm_output with h_t_star
# which conforms to Equation 4 of the original paper
self.V1 = nn.Linear(3 * self._hps.hidden_dim, self._hps.hidden_dim)
# self.V1 = nn.Linear(4 * self._hps.hidden_dim, self._hps.hidden_dim)
self.V2 = nn.Linear(self._hps.hidden_dim, self._hps.vocab_size)
if self._hps.pointer_gen:
# project c_t + s_t + x_t
self.p_gen_linear = nn.Linear(2 * 2 * self._hps.hidden_dim + self._hps.embed_dim, 1)
if self._hps.is_coverage:
self.W_cover = nn.Linear(1, 2*self._hps.hidden_dim, bias=False)
def save_gradients(self, module, grad_input, grad_output):
self.gradients[module.name] = {
'grad_input': grad_input,
'grad_output': grad_output
}
def tensor_hook(self, name):
def hook(grad):
self.gradients[name] = grad
return hook
def forward(self, y_t_1, s_t_1, c_t_1, enc_outputs, enc_features, enc_pad_mask, extend_vocab_zeros,
enc_inps_extended, coverage_t):
"""
:param y_t_1: batch_size x 1
:param s_t_1: (1 x batch_size x hidden_dim, 1 x batch_size x hidden_dim)
:param c_t_1: batch_size x 1 x 2*hidden_dim
:param enc_outputs: batch_size x max_seq_len x 2*hidden_dim
:param enc_features: batch_size x max_seq_len x 2*hidden_dim
:param enc_pad_mask: batch_size x max_seq_len
:param extend_vocab_zeros: batch_size x extend_vocab_size or None
:param enc_inps_extended: batch_size x enc_max_seq_len
:param coverage_t: batch_size x enc_max_seq, the coverage vector of the current step, which is the sum
of the attention_dist from step 0 to step t-1
:return:
vocab_dist: batch_size x vocab_size
s_t: (1 x batch_size x hidden_size, 1 x batch_size x hidden_size)
c_t: batch_size x 1 x 2*hidden_dim
attn_dist
coverage_t
"""
# STEP 1: calculate s_t
enc_max_seq_len = enc_features.size()[1]
# batch_size -> batch_size x 1 x embed_dim
dec_embeddings = self.embedding(y_t_1.view(-1, 1))
# batch_size x 1 x (embed_dim+2*hidden_state) -> batch_size x 1 x embed_dim
lstm_input = self.x_context( torch.cat([dec_embeddings, c_t_1], dim=-1) )
# lstm_output: batch_size x 1 x hidden_dim
# s_t: (1 x batch_size x hidden_size, 1 x batch_size x hidden_size)
lstm_output, s_t = self.lstm(lstm_input, s_t_1)
# STEP2: calculate c_t, i.e., context vector
# 1 x batch_size x 2*hidden_size
s_t_cat = torch.cat(s_t, -1)
# batch_size x 1 x 2*hidden_size
s_t_cat_T = s_t_cat.transpose(0, 1)
# 1 x batch_size x 2*hidden_state -> batch_size x enc_max_seq_len x 2*hidden_state
s_t_hat = s_t_cat_T.expand(-1, enc_max_seq_len, -1).contiguous()
if self._hps.is_coverage:
# batch x enc_max_seq x 1
coverage_t_hat = coverage_t.unsqueeze(2)
# batch x enc_max_seq x different_dim -> batch_size x enc_max_seq_len
e_t = self.v(torch.tanh( enc_features + self.W_s(s_t_hat) + self.W_cover(coverage_t_hat))).squeeze(-1)
else:
# batch x enc_max_seq x different_dim -> batch_size x enc_max_seq_len
e_t = self.v(torch.tanh( enc_features + self.W_s(s_t_hat) )).squeeze(-1)
# batch_size x enc_max_seq_len
a_t_1 = F.softmax(e_t, dim=-1)
# mask pads in enc_inps
a_t = a_t_1 * enc_pad_mask
# each item is the sum of that batch
# batch_size x 1
normalizer = a_t.sum(dim=-1, keepdim=True)
# sum of a_i * hi can be calculated using bmm
# batch_size x enc_max_seq_len
attn_dist = a_t / (normalizer + self._hps.eps)
if self._hps.is_coverage:
# batch_size x enc_max_seq_len
coverage_t = coverage_t + attn_dist
# batch_size x 1 x enc_max_seq_len bmm batch_size x enc_max_seq_len x 2*hidden_dim
# -> batch x 1 x 2*hidden_dim
# c_t is the context vector
c_t = torch.bmm(attn_dist.unsqueeze(1), enc_outputs)
# STEP3: calculate the vocab_dist using lstm_output and c_t
# NOTE: in abisee's implementation, they use lstm_output instead of s_t
# this is different from the equations in the original paper
# batch x 3*hidden_dim
dec_output = torch.cat((lstm_output, c_t), dim=-1).squeeze(1)
# dec_output = torch.cat( (s_t_cat.squeeze(0), h_t_star), -1 )
# batch_size x vocab_size
vocab_dist = F.softmax( self.V2( self.V1(dec_output) ), dim=-1)
# Add pointer mechanism
if self._hps.pointer_gen:
p_gen_input = torch.cat((c_t, s_t_cat_T, dec_embeddings), dim=-1)
# batch x 1 x 1 -> batch x 1
p_gen = torch.sigmoid(self.p_gen_linear(p_gen_input)).view(-1, 1)
# batch x vocab_size
vocab_dist_ = p_gen * vocab_dist
# batch x extend_vocab_size
if extend_vocab_zeros is not None:
vocab_dist_ = torch.cat( (vocab_dist_, extend_vocab_zeros), dim=-1)
# batch x enc_max_seq_len
attn_dist_ = (1 - p_gen) * attn_dist
# enc_inps_extended: batch x enc_max_seq_len
final_dist = vocab_dist_.scatter_add(1, enc_inps_extended, attn_dist_)
else:
final_dist = vocab_dist
return final_dist, s_t, c_t, attn_dist, coverage_t
class PointerEncoderDecoder(object):
def __init__(self, hps, model_file_path, pad_id=1, is_eval=False):
if is_eval:
device = hps.eval_device
else:
device = hps.device
print(device)
encoder = Encoder(hps, pad_id)
decoder = AttentionDecoder(hps, pad_id)
decoder.embedding.weight = encoder.embedding.weight
if is_eval:
encoder = encoder.eval()
decoder = decoder.eval()
device = torch.device(device)
encoder = encoder.to(device)
decoder = decoder.to(device)
self.encoder = encoder
self.decoder = decoder
if model_file_path is not None:
state = try_load_state(model_file_path)
self.encoder.load_state_dict(state['encoder_state_dict'])
# since we need to leverage coverage
self.decoder.load_state_dict(state['decoder_state_dict'], strict=False)
@property
def parameters(self):
return list(self.encoder.parameters()) + list(self.decoder.parameters())
| 10,543 | 42.570248 | 115 | py |
PRSummarizer | PRSummarizer-master/prsum/utils.py | # encoding=utf-8
import re
import os
import sys
import csv
import json
import math
import time
import torch
import tempfile
from myrouge.rouge import Rouge
from nltk import sent_tokenize
import subprocess as sp
from typing import List
import logging
from pyrouge import Rouge155
from pyrouge.utils import log
import tensorflow as tf
try:
_ROUGE_PATH = os.environ['ROUGE']
except KeyError:
print('Warning: ROUGE is not configured')
_ROUGE_PATH = None
csv.field_size_limit(sys.maxsize)
class Params():
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
def use_cuda(device):
return device and device.startswith("cuda") and torch.cuda.is_available()
def load_json_file(path :str):
with open(path, 'r') as f:
obj = json.load(f)
return obj
def dump_json_file(path :str, obj :object):
with open(path, 'w') as f:
json.dump(obj, f)
def load_csv_file(path :str, fieldnames :List = None) -> List[dict]:
rows = []
with open(path, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
if fieldnames is not None:
_ = next(reader)
for row in reader:
rows.append(row)
return rows
def dump_csv_file(path :str, obj : List[dict]) -> None:
obj = list(obj)
with open(path, 'w') as csvfile:
out_field_names = list(obj[0].keys())
writer = csv.DictWriter(csvfile, out_field_names, quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
writer.writerows(obj)
def calc_running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99):
if running_avg_loss == 0: # on the first iteration just take the loss
running_avg_loss = loss
else:
running_avg_loss = running_avg_loss * decay + (1 - decay) * loss
running_avg_loss = min(running_avg_loss, 12) # clip
loss_sum = tf.Summary()
tag_name = 'running_avg_loss/decay=%f' % (decay)
loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)
summary_writer.add_summary(loss_sum, step)
return running_avg_loss
def print_results(article, abstract, decoded_output):
print ("")
print('ARTICLE: %s', article)
print('REFERENCE SUMMARY: %s', abstract)
print('GENERATED SUMMARY: %s', decoded_output)
print( "")
def make_html_safe(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def rouge_eval(ref_dir, dec_dir, dec_pattern='(\d+)_decoded.txt', ref_pattern='#ID#_reference.txt',
cmd="-c 95 -r 1000 -n 2 -m", system_id=1):
# only print rouge 1 2 L
assert _ROUGE_PATH is not None
log.get_global_console_logger().setLevel(logging.WARNING)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dec_dir = os.path.join(tmp_dir, 'dec')
tmp_ref_dir = os.path.join(tmp_dir, 'ref')
Rouge155.convert_summaries_to_rouge_format(
dec_dir, tmp_dec_dir)
Rouge155.convert_summaries_to_rouge_format(
ref_dir, tmp_ref_dir)
Rouge155.write_config_static(
tmp_dec_dir, dec_pattern,
tmp_ref_dir, ref_pattern,
os.path.join(tmp_dir, 'settings.xml'),
system_id
)
cmd = (os.path.join(_ROUGE_PATH, 'ROUGE-1.5.5.pl')
+ ' -e {} '.format(os.path.join(_ROUGE_PATH, 'data'))
+ cmd
+ ' -a {}'.format(os.path.join(tmp_dir, 'settings.xml')))
output = sp.check_output(cmd.split(' '), universal_newlines=True)
return output
def rouge_result_to_dict(rouge_result):
return Rouge155().output_to_dict(rouge_result)
def rouge_log(results, dir_to_write):
results_file = os.path.join(dir_to_write, "ROUGE_results.txt")
print("Writing final ROUGE results to %s..." % (results_file))
with open(results_file, "w") as f:
f.write(results)
def write_for_rouge(reference, decoded_words, ex_index, _rouge_ref_dir, _rouge_dec_dir):
"""
require un_sent_tokenize text, and will use ntlk to conduct sent_tokenize
"""
decoded_abstract = " ".join(decoded_words)
write_for_rouge_raw(reference, decoded_abstract, ex_index, _rouge_ref_dir, _rouge_dec_dir)
def replace_nl(text):
return re.sub(r'\s*<nl>\s*', r'\n', text)
def get_ref_file(ref_dir, index):
return os.path.join(ref_dir, "%06d_reference.txt" % index)
def get_dec_file(dec_dir, index):
return os.path.join(dec_dir, "%06d_decoded.txt" % index)
def prepare_rouge_text(text):
# replace <nl> to \n
text = replace_nl(text)
# pyrouge calls a perl script that puts the data into HTML files.
# Therefore we need to make our output HTML safe.
text = make_html_safe(text)
sents = sent_tokenize(text)
text = "\n".join(sents)
return text
def write_for_rouge_raw(reference, decoded_abstract, ex_index, _rouge_ref_dir, _rouge_dec_dir):
"""
require un_sent_tokenize text, and will use ntlk to conduct sent_tokenize
"""
decoded_abstract = prepare_rouge_text(decoded_abstract)
reference = prepare_rouge_text(reference)
ref_file = get_ref_file(_rouge_ref_dir, ex_index)
decoded_file = get_dec_file(_rouge_dec_dir, ex_index)
with open(ref_file, "w") as f:
f.write(reference)
with open(decoded_file, "w") as f:
f.write(decoded_abstract)
# print("Wrote example %i to file" % ex_index)
def make_rouge_dir(decode_dir):
rouge_ref_dir = os.path.join(decode_dir, 'rouge_ref')
rouge_dec_dir = os.path.join(decode_dir, 'rouge_dec_dir')
for p in [decode_dir, rouge_ref_dir, rouge_dec_dir]:
if not os.path.exists(p):
os.makedirs(p)
return rouge_ref_dir, rouge_dec_dir
def try_load_state(model_file_path):
counter = 0
state = None
while True:
if counter >= 10:
raise FileNotFoundError
try:
state = torch.load(model_file_path, map_location=lambda storage, location: storage)
except:
time.sleep(30)
counter += 1
continue
break
return state
def as_minutes(s):
m = s // 60
s = math.ceil(s % 60)
return "{}m {}s".format(m, s)
def time_since(since, percent):
s = time.time() - since
total = s / percent
remain = total - s
return as_minutes(s), as_minutes(remain)
def sentence_end(text):
pattern = r'.*[.!?]$'
if re.match(pattern, text, re.DOTALL):
return True
else:
return False
def ext_art_preprocess(text):
paras = text.split(' <para-sep> ')
cms = paras[0].split(' <cm-sep> ')
sents = cms + paras[1:]
new_sents = []
for s in sents:
s = s.strip()
# although we already add . when preprocessing
if s:
if not sentence_end(s):
s = s + ' .'
new_sents.append(s)
return " ".join(new_sents)
def ext_art_sent_tokenize(text):
art = ext_art_preprocess(text)
art_sents = sent_tokenize(art)
return art_sents
def ext_abs_sent_tokenize(text):
return sent_tokenize(text)
| 7,582 | 27.294776 | 99 | py |
PRSummarizer | PRSummarizer-master/prsum/prsum.py | # -*- coding: utf-8 -*-
import re
import os
import csv
import sys
import time
import random
import torch
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from torch.distributions.categorical import Categorical
from torch.optim import Adam
import torch.multiprocessing as mp
import fire
from . import utils
from .decode import BeamSearch
from .pointer_model import PointerEncoderDecoder
from prsum.dataset import data, batcher
from prsum.dataset.train_util import get_input_from_batch, get_output_from_batch
from .utils import dump_json_file
from myrouge.rouge import Rouge
# deterministic
random.seed(318)
torch.manual_seed(318)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
csv.field_size_limit(sys.maxsize)
def reward_function(decoded_seqs, ref_seqs, device):
decoded_seqs = [utils.prepare_rouge_text(seq) for seq in decoded_seqs]
ref_seqs = [utils.prepare_rouge_text(seq) for seq in ref_seqs]
rouge = Rouge()
try:
scores = rouge.get_scores(decoded_seqs, ref_seqs)
except Exception:
print("Rouge failed for multi sentence evaluation.. Finding exact pair")
scores = []
for i in range(len(decoded_seqs)):
try:
score = rouge.get_scores(decoded_seqs[i], ref_seqs[i])
except Exception:
print("Error occured at:")
print("decoded_sents:", decoded_seqs[i])
print("original_sents:", ref_seqs[i])
score = [{"rouge-l": {"f": 0.0}}]
scores.append(score[0])
sys.stdout.flush()
rouge_l_f1 = [score["rouge-l"]["f"] for score in scores]
rouge_l_f1 = torch.tensor(rouge_l_f1, dtype=torch.float, device=device)
return rouge_l_f1
class Procedure(object):
"""Base class of all process-related classes in order to share similar process"""
def __init__(self, params, is_eval=False):
self.vocab = data.Vocab(params.vocab_path, params.vocab_size)
train_data_path = os.path.join(params.data_dir, "train." + params.data_file_suffix)
eval_data_path = os.path.join(params.data_dir, "valid." + params.data_file_suffix)
if not is_eval:
self.batcher = batcher.Batcher(params, train_data_path, self.vocab, mode='train',
batch_size=params.batch_size, single_pass=False)
else:
self.batcher = batcher.Batcher(params, eval_data_path, self.vocab, mode='eval',
batch_size=params.batch_size, single_pass=True)
self.pad_id = self.vocab.word2id(data.PAD_TOKEN)
self.end_id = self.vocab.word2id(data.STOP_DECODING)
self.unk_id = self.vocab.word2id(data.UNKNOWN_TOKEN)
assert(self.pad_id == 1)
self.dump_dir = None
self.params = params
self.is_eval = is_eval
def infer_one_batch(self, batch, iter=None, is_eval=False):
if is_eval:
device = self.params.eval_device
else:
device = self.params.device
device = torch.device(device)
train_ml = getattr(self.params, "train_ml", True)
train_rl = getattr(self.params, "train_rl", False)
# c_t_1: batch_size x 2*hidden_dim
enc_batch, enc_padding_mask, enc_lens, enc_batch_extended, extend_vocab_zeros, c_t_1, coverage_0 = \
get_input_from_batch(self.params, batch, device)
# get encoder_output
enc_outputs, enc_features, s_0 = self.model.encoder(enc_batch, enc_lens)
enc_package = [s_0, c_t_1, coverage_0, enc_outputs, enc_features, enc_padding_mask, extend_vocab_zeros,
enc_batch_extended]
if train_ml:
ml_loss = self.infer_one_batch_ml(batch, *enc_package, iter, device=device)
else:
ml_loss = torch.tensor(0.0, dtype=torch.float, device=device)
if train_rl:
rl_loss, reward = self.infer_one_batch_rl(batch, *enc_package, iter, device=device)
else:
rl_loss = torch.tensor(0.0, dtype=torch.float, device=device)
reward = torch.tensor(0.0, dtype=torch.float, device=device)
rl_weight = getattr(self.params, "rl_weight", 0.0)
loss = rl_weight * rl_loss + (1 - rl_weight) * ml_loss
if not is_eval:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item(), reward.item()
def infer_one_batch_ml(self, batch, s_0, c_t_1, coverage_0, enc_outputs, enc_features, enc_padding_mask,
extend_vocab_zeros, enc_batch_extended, iter, device):
dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
get_output_from_batch(self.params, batch, device)
s_t_1 = s_0
c_t_1 = c_t_1.unsqueeze(1)
coverage_t = coverage_0
teacher_forcing_ratio = getattr(self.params, "teacher_forcing_ratio", 1.0)
teacher_forcing = True if random.random() < teacher_forcing_ratio else False
step_losses = []
for di in range(min(max_dec_len, self.params.max_dec_steps)):
if di == 0 or teacher_forcing:
y_t_1 = dec_batch[:, di]
else:
y_t_1 = y_t
# first we have coverage_t_1, then we have a_t
final_dist, s_t_1, c_t_1, attn_dist, coverage_t_plus = self.model.decoder(y_t_1, s_t_1, c_t_1, enc_outputs,
enc_features, enc_padding_mask, extend_vocab_zeros, enc_batch_extended, coverage_t)
# if pointer_gen is True, the target will use the extend_vocab
target = target_batch[:, di]
# batch
y_t = final_dist.max(1)[1]
# batch x extend_vocab_size -> batch x 1 -> batch
gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze()
step_loss = -torch.log(gold_probs + self.params.eps)
if self.params.is_coverage:
# batch
step_coverage_loss = torch.sum(torch.min(attn_dist, coverage_t), dim=1)
step_loss = step_loss + self.params.cov_loss_wt * step_coverage_loss
coverage_t = coverage_t_plus
step_mask = dec_padding_mask[:, di]
step_loss = step_loss * step_mask
step_losses.append(step_loss)
sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
batch_avg_loss = sum_losses / dec_lens_var
loss = torch.mean(batch_avg_loss)
return loss
def infer_one_batch_rl(self, batch, s_0, c_t_1, coverage_0, enc_outputs, enc_features, enc_padding_mask,
extend_vocab_zeros, enc_batch_extended, iter, device):
if self.params.is_coverage == True:
raise ValueError("do not support training rl loss with coverage now")
s_t_1 = s_0
c_t_1 = c_t_1.unsqueeze(1)
coverage_t = coverage_0
# decode one batch
decode_input = [batch, s_t_1, c_t_1, enc_outputs, enc_features, enc_padding_mask, extend_vocab_zeros,
enc_batch_extended, coverage_t, device]
sample_seqs, rl_log_probs = self.decode_one_batch_rl(False, *decode_input)
with torch.autograd.no_grad():
baseline_seqs, _ = self.decode_one_batch_rl(True, *decode_input)
sample_reward = reward_function(sample_seqs, batch.original_abstracts, device=device)
baseline_reward = reward_function(baseline_seqs, batch.original_abstracts, device=device)
rl_loss = -(sample_reward - baseline_reward) * rl_log_probs
rl_loss = torch.mean(rl_loss)
batch_reward = torch.mean(sample_reward)
return rl_loss, batch_reward
def decode_one_batch_rl(self, greedy, batch, s_t_1, c_t_1, enc_outputs, enc_features, enc_padding_mask,
extend_vocab_zeros, enc_batch_extended, coverage_t, device):
# No teacher forcing for RL
dec_batch, _, max_dec_len, dec_lens_var, target_batch = get_output_from_batch(self.params, batch, device)
log_probs = []
decode_ids = []
# we create the dec_padding_mask at the runtime
dec_padding_mask = []
y_t = dec_batch[:, 0]
mask_t = torch.ones(len(enc_outputs), dtype=torch.long, device=device)
# there is at least one token in the decoded seqs, which is STOP_DECODING
for di in range(min(max_dec_len, self.params.max_dec_steps)):
y_t_1 = y_t
# first we have coverage_t_1, then we have a_t
final_dist, s_t_1, c_t_1, attn_dist, coverage_t_plus = self.model.decoder(y_t_1, s_t_1, c_t_1, enc_outputs,
enc_features, enc_padding_mask,
extend_vocab_zeros,
enc_batch_extended, coverage_t)
if not greedy:
# sampling
multi_dist = Categorical(final_dist)
y_t = multi_dist.sample()
log_prob = multi_dist.log_prob(y_t)
log_probs.append(log_prob)
y_t = y_t.detach()
dec_padding_mask.append(mask_t.detach().clone())
mask_t[(mask_t == 1) + (y_t == self.end_id) == 2] = 0
else:
# baseline
y_t = final_dist.max(1)[1]
y_t = y_t.detach()
decode_ids.append(y_t)
# for next input
is_oov = (y_t >= self.vocab.size()).long()
y_t = (1 - is_oov) * y_t + is_oov * self.unk_id
decode_ids = torch.stack(decode_ids, 1)
if not greedy:
dec_padding_mask = torch.stack(dec_padding_mask, 1).float()
log_probs = torch.stack(log_probs, 1) * dec_padding_mask
dec_lens = dec_padding_mask.sum(1)
log_probs = log_probs.sum(1) / dec_lens
if (dec_lens == 0).any():
print("Decode lengths encounter zero!")
print(dec_lens)
decoded_seqs = []
for i in range(len(enc_outputs)):
dec_ids = decode_ids[i].cpu().numpy()
article_oovs = batch.art_oovs[i]
dec_words = data.outputids2decwords(dec_ids, self.vocab, article_oovs,
self.params.pointer_gen)
if len(dec_words) < 2:
dec_seq = "xxx"
else:
dec_seq = " ".join(dec_words)
decoded_seqs.append(dec_seq)
return decoded_seqs, log_probs
class Train(Procedure):
def __init__(self, params, model_file_path=None):
super().__init__(params, is_eval=False)
# wait for creating threads
time.sleep(10)
cur_time = int(time.time())
if model_file_path is None:
train_dir = os.path.join(self.params.model_root, 'train_%d' % (cur_time))
else:
# model_file_path is expected to be train_dir/model/model_name
train_dir = os.path.dirname( os.path.dirname(os.path.abspath(model_file_path)) )
if not os.path.exists(train_dir):
os.makedirs(train_dir)
self.model_dir = os.path.join(train_dir, 'model')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
# dump the params
param_path = os.path.join(train_dir, 'params_{}.json'.format(cur_time))
print("Dump hyper-parameters to {}.".format(param_path))
params.save(param_path)
self.model_file_path = model_file_path
self.summary_writer = tf.summary.FileWriter(train_dir)
self.summary_flush_interval = self.params.summary_flush_interval
self.print_interval = self.params.print_interval
self.save_interval = self.params.save_interval
def _get_save_path(self, iter):
cur_time = time.time()
if self.params.is_coverage:
prefix = 'coverage_model_{}_{}'
param_prefix = 'coverage_params_{}_{}'
else:
prefix = 'model_{}_{}'
param_prefix = 'params_{}_{}'
if self.params.train_rl:
prefix = 'rl_' + prefix
param_prefix = 'rl_' + param_prefix
model_save_path = os.path.join(self.model_dir, prefix.format(iter, cur_time))
param_save_path = os.path.join(self.model_dir, param_prefix.format(iter, cur_time))
return model_save_path, param_save_path
def save_model(self, iter, running_avg_loss, model_save_path):
state = {
'iter': iter,
'encoder_state_dict': self.model.encoder.state_dict(),
'decoder_state_dict': self.model.decoder.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_loss': running_avg_loss
}
torch.save(state, model_save_path)
def setup_train(self, model_file_path):
# check params
rl_weight = getattr(self.params, "rl_weight", 0.0)
if self.params.train_rl and rl_weight == 0.0:
raise ValueError("Train RL is True, while rl_weight is 0.0. Contradiction!")
self.model = PointerEncoderDecoder(self.params, model_file_path, pad_id=self.pad_id)
initial_lr = self.params.lr if not self.params.is_coverage else self.params.lr_coverage
optim_name = self.params.optim.lower()
if optim_name == "adam":
self.optimizer = Adam(self.model.parameters, lr=initial_lr)
else:
raise ValueError("Unknow optim {}".format(optim_name))
start_iter, start_loss = 0, 0
if model_file_path is not None:
state = torch.load(model_file_path, map_location=lambda storage, location: storage)
start_iter = state['iter']
start_loss = state['current_loss']
train_rl = self.params.train_rl
reoptim = self.params.reoptim
if not train_rl and reoptim:
raise ValueError("Not training rl but recreate the optimizer")
# We need not to load the checkpoint if we use coverage to retrain
if not self.params.is_coverage and not reoptim:
print("Load the optimizer...")
sys.stdout.flush()
self.optimizer.load_state_dict(state['optimizer'])
if utils.use_cuda(self.params.device):
device = torch.device(self.params.device)
for state in self.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
return start_iter, start_loss
def train_one_batch(self, batch, iter):
return self.infer_one_batch(batch, iter, is_eval=False)
def train(self, n_iters=None, eval=False):
"""
:param n_iters: the iterations of training process
:param model_file_path: the stored model file
:return:
do not return anything, but will print logs and store models
"""
eval_processes = []
if n_iters == None:
n_iters = self.params.max_iterations
iter, running_avg_loss = self.setup_train(self.model_file_path)
start_iter = iter
total_iter = n_iters - start_iter
start = time.time()
start_time = start
print("start training.")
sys.stdout.flush()
loss_total = 0
reward_total = 0
while iter < n_iters:
batch = self.batcher.next_batch()
loss, reward = self.train_one_batch(batch, iter)
running_avg_loss = utils.calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter)
loss_total += loss
reward_total += reward
iter += 1
if iter % self.summary_flush_interval == 0:
self.summary_writer.flush()
if iter % self.print_interval == 0:
elapse, remain = utils.time_since(start_time, (iter - start_iter) / total_iter)
iter_num = iter - start_iter
print('Train steps %d, seconds for %d batch: %.2f , loss: %f, reward: %f, elapse: %s, remain: %s' %
(iter, self.print_interval,time.time() - start, loss_total/iter_num, reward_total/iter_num,
elapse, remain))
sys.stdout.flush()
start = time.time()
if np.isnan(loss) or np.isnan(running_avg_loss):
raise ValueError("Loss becomes nan")
if iter % self.save_interval == 0:
model_save_path, param_save_path = self._get_save_path(iter)
self.save_model(iter, running_avg_loss, model_save_path)
self.params.save(param_save_path)
if eval:
kwargs = {
"params": self.params,
"model_path": model_save_path,
"ngram_filter": False,
"data_file_prefix": "valid."
}
# p = mp.Process(target=PRSum.eval_raw, kwargs=kwargs)
# decode instead of evaluate
p = mp.Process(target=PRSum.decode_raw, kwargs=kwargs)
eval_processes.append(p)
p.start()
for cur_p in eval_processes:
cur_p.join()
print("end training.")
class PRSum(object):
@classmethod
def train(cls, param_path, model_path=None, eval=False):
"""
:param param_path: path of the params file
:param model_path: path of the model to be loaded, None means train from scratch
:param eval: whether to evaluate after saving
"""
if model_path is not None:
print("Try to resume from trained model {}".format(model_path))
params = utils.Params(param_path)
train_processor = Train(params, model_file_path=model_path)
train_processor.train(eval=eval)
@classmethod
def decode(cls, param_path, model_path, ngram_filter, data_file_prefix="test."):
params = utils.Params(param_path)
cls.decode_raw(params, model_path, ngram_filter, data_file_prefix)
@classmethod
def decode_raw(cls, params, model_path, ngram_filter, data_file_prefix):
decode_processor = BeamSearch(params, model_path, data_file_prefix=data_file_prefix, ngram_filter=ngram_filter)
decode_processor.decode()
@staticmethod
def find_model_path(model_dir, model_name_pattern, iter, files):
model_prefix = model_name_pattern.format(iter)
names = []
for file in files:
if file.startswith(model_prefix):
names.append(file)
name = sorted(names)[-1]
model_path = os.path.join(model_dir, name)
return model_path
@classmethod
def select_model(cls, param_path, model_pattern, start_iter=3000, end_iter=36000):
"""
:param param_path:
:param model_pattern: model_{}_
:param start_iter:
:param end_iter:
:return:
"""
model_dir = os.path.dirname(model_pattern)
model_name_pattern = os.path.basename(model_pattern)
assert(os.path.isdir(model_dir))
files = []
for file in os.listdir(model_dir):
if os.path.isfile(os.path.join(model_dir, file)):
files.append(file)
params = utils.Params(param_path)
save_interval = params.save_interval
l_f_scores = {}
for iter in range(start_iter, end_iter, save_interval):
model_path = cls.find_model_path(model_dir, model_name_pattern, iter, files)
print("Param path {}".format(param_path))
print("Model path {}".format(model_path))
decode_processor = BeamSearch(params, model_path, data_file_prefix="valid.", ngram_filter=False)
result_dict = decode_processor.decode()
l_f_scores[iter] = result_dict['rouge_l_f_score']
items = sorted(l_f_scores.items(), key=lambda x:x[1], reverse=True)
output_file = os.path.join(model_dir, 'valid_decode_results.json')
dump_json_file(output_file, items)
print(items)
def repair_missing_valid(cls, param_path, model_prefix, start_iter=1000, end_iter=36000):
"""
:param param_path: models/train_xxx/valid.decode_model_,
:param model_prefix:
:param start_iter:
:param end_iter:
:return:
"""
model_dir = os.path.dirname(model_prefix)
model_name_prefix = os.path.basename(model_prefix)
model_pattern = '_'.join(model_name_prefix.split('_')[1:]) + "{}_"
assert (os.path.isdir(model_dir))
files = []
for file in os.listdir(model_dir):
if os.path.isfile(os.path.join(model_dir, file)):
files.append(file)
iters, _, _ = find_valid_results(model_dir, model_name_prefix)
params = utils.Params(param_path)
save_iterval = params.save_interval
all_iters = set(range(start_iter, end_iter, save_iterval))
missing_iters = all_iters - set(iters)
print("Missing Iters: {}".format(missing_iters))
for iter in tqdm(missing_iters):
model_path = cls.find_model_path(model_dir, model_pattern, iter, files)
print("Param path {}".format(param_path))
print("Model path {}".format(model_path))
decode_processor = BeamSearch(params, model_path, data_file_prefix="valid.", ngram_filter=False)
decode_processor.decode()
print("Done!")
@classmethod
def collect_valid_results(cls, model_prefix, interval=1000, start_iter=1000, end_iter=26000, type="ml"):
"""
:param model_prefix: like models/train_xxx/valid.decode_model_, the prefix of the dir storing the ROUGE results
:param interval: 1000
:param start_iter: 1000
:param end_iter: 26000
:return:
"""
model_dir = os.path.dirname(model_prefix)
model_name_prefix = os.path.basename(model_prefix)
assert (os.path.isdir(model_dir))
scores = {}
iters, result_files, missing_iters = find_valid_results(model_dir, model_name_prefix)
if missing_iters:
raise FileNotFoundError("Missing iters {}".format(missing_iters))
for iter, result_json_file in zip(iters, result_files):
rouge_l_f = utils.load_json_file(result_json_file)['rouge_l_f_score']
if iter in scores:
raise ValueError("already found iteration {}".format(iter))
scores[iter] = rouge_l_f
sorted_iters = list(range(start_iter, end_iter, interval))
sorted_keys = sorted(list(scores.keys()))
if sorted_iters != sorted_keys:
print("Miss some iterations!\n"
"Expect: {}\n"
"Get: {}".format(sorted_iters, sorted_keys))
raise ValueError("Miss iterations")
items = sorted(scores.items(), key=lambda x:x[1], reverse=True)
output_file = os.path.join(model_dir, type+'.valid_decode_results.json')
dump_json_file(output_file, items)
print(items)
def find_valid_results(model_dir, model_name_prefix):
pattern = model_name_prefix + r'(\d+)_[\d.]+'
iters = []
result_files = []
missing_iters = []
for file in os.listdir(model_dir):
match = re.match(pattern, file)
if match:
iter = int(match[1])
result_json_file = os.path.join(model_dir, file, 'ROUGE_results.json')
if not os.path.isfile(result_json_file):
missing_iters.append(iter)
continue
iters.append(iter)
result_files.append(result_json_file)
return iters, result_files, missing_iters
if __name__ == '__main__':
mp.set_start_method('spawn', force=True)
fire.Fire(PRSum)
| 24,173 | 41.634921 | 123 | py |
PRSummarizer | PRSummarizer-master/prsum/dataset/train_util.py | import numpy as np
import torch
def get_input_from_batch(params, batch, device):
device = torch.device(device)
batch_size = len(batch.enc_lens)
enc_batch = torch.from_numpy(batch.enc_batch).long().to(device)
enc_padding_mask = torch.from_numpy(batch.enc_padding_mask).float().to(device)
enc_lens = batch.enc_lens
extra_zeros = None
enc_batch_extend_vocab = None
if params.pointer_gen:
enc_batch_extend_vocab = torch.from_numpy(batch.enc_batch_extend_vocab).long().to(device)
# max_art_oovs is the max over all the article oov list in the batch
if batch.max_art_oovs > 0:
extra_zeros = torch.zeros((batch_size, batch.max_art_oovs)).to(device)
c_t_1 = torch.zeros((batch_size, 2 * params.hidden_dim)).to(device)
coverage = None
if params.is_coverage:
coverage = torch.zeros(enc_batch.size()).to(device)
return enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage
def get_output_from_batch(params, batch, device):
device = torch.device(device)
dec_batch = torch.from_numpy(batch.dec_batch).long().to(device)
dec_padding_mask = torch.from_numpy(batch.dec_padding_mask).float().to(device)
dec_lens = batch.dec_lens
max_dec_len = np.max(dec_lens)
dec_lens_var = torch.from_numpy(dec_lens).float().to(device)
target_batch = torch.from_numpy(batch.target_batch).long().to(device)
return dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch
| 1,522 | 36.146341 | 102 | py |
Smile-Pruning | Smile-Pruning-master/src/main.py | """This code is based on the official PyTorch ImageNet training example 'main.py'. Commit ID: 69d2798, 04/23/2020.
URL: https://github.com/pytorch/examples/tree/master/imagenet
"""
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
# --- @mst
import copy, math
import numpy as np
from importlib import import_module
from dataset import Data
# import sys; sys.path.insert(0, '../UtilsHub/smilelogging')
from smilelogging import Logger
from utils import get_n_params, get_n_flops, get_n_params_, get_n_flops_
from utils import add_noise_to_model, compute_jacobian, _weights_init_orthogonal, get_jacobian_singular_values
from utils import Dataset_lmdb_batch
from utils import AverageMeter, ProgressMeter, adjust_learning_rate, accuracy
from model import model_dict, is_single_branch
from option import args, check_args
pjoin = os.path.join
original_print = print
logger = Logger(args)
accprint = logger.log_printer.accprint
netprint = logger.netprint
logger.misc = {}
logger.passer = {}
class MyDataParallel(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
# ---
def main():
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# Set up pipeline
from method_modules import module_dict
from utils import update_args_from_file
pipeline, configs = get_pipeline(args.pipeline)
# Data loading code
train_sampler = None
if args.dataset not in ['imagenet', 'imagenet_subset_200']:
loader = Data(args)
else:
traindir = os.path.join(args.data_path, args.dataset, 'train')
val_folder = 'val'
if args.debug:
val_folder = 'val_tmp' # val_tmp is a tiny version of val to accelerate test in debugging
val_folder_path = f'{args.data_path}/{args.dataset}/{val_folder}'
if not os.path.exists(val_folder_path):
os.makedirs(val_folder_path)
dirs = os.listdir(f'{args.data_path}/{args.dataset}/val')[:3]
[shutil.copytree(f'{args.data_path}/{args.dataset}/val/{d}', f'{val_folder_path}/{d}') for d in dirs]
valdir = os.path.join(args.data_path, args.dataset, val_folder)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transforms_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
transforms_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize])
if args.use_lmdb:
lmdb_path_train = traindir + '/lmdb'
lmdb_path_val = valdir + '/lmdb'
assert os.path.exists(lmdb_path_train) and os.path.exists(lmdb_path_val)
print(f'Loading data in LMDB format: "{lmdb_path_train}" and "{lmdb_path_val}"')
train_dataset = Dataset_lmdb_batch(lmdb_path_train, transforms_train)
val_dataset = Dataset_lmdb_batch(lmdb_path_val, transforms_val)
else:
train_dataset = datasets.ImageFolder(traindir, transforms_train)
val_dataset = datasets.ImageFolder(valdir, transforms_val)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
logger.passer['criterion'] = criterion
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
dataset = import_module("dataset.%s" % args.dataset)
num_classes = dataset.num_classes
assert len(dataset.input_shape) == 3
num_channels, input_height, input_width = dataset.input_shape
logger.passer['input_size'] = [1, num_channels, input_height, input_width]
logger.passer['is_single_branch'] = is_single_branch
if args.dataset in ["imagenet", "imagenet_subset_200"]:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=num_classes, pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=num_classes)
else: # @mst: added non-imagenet models
model = model_dict[args.arch](num_classes=num_classes, num_channels=num_channels, use_bn=args.use_bn, conv_type=args.conv_type)
if args.init in ['orth', 'exact_isometry_from_scratch']:
model.apply(lambda m: _weights_init_orthogonal(m, act=args.activation))
print("==> Use weight initialization: 'orthogonal_'. Activation: %s" % args.activation)
print(f'==> Use conv_type: {args.conv_type}')
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = MyDataParallel(model.features)
model.cuda()
else:
model = MyDataParallel(model).cuda()
# Load the unpruned model for pruning
# This may be useful for the non-imagenet cases where we use our pretrained models.
if args.base_model_path:
ckpt = torch.load(args.base_model_path)
logstr = f'==> Load pretrained ckpt successfully: "{args.base_model_path}".'
if 'model' in ckpt:
model = ckpt['model']
logstr += ' Use the model stored in ckpt.'
model.load_state_dict(ckpt['state_dict'])
if args.test_pretrained:
acc1, acc5, loss_test = validate(val_loader, model, criterion, args)
logstr += f'Its accuracy: {acc1:.4f}.'
print(logstr)
################################## Core pipeline ##################################
ix_module = 0
for m_name, config in zip(pipeline, configs):
ix_module += 1
print(f'')
print(f'***************** Model processor #{ix_module} ({m_name}) starts *****************')
module = module_dict[m_name]
args_copy = copy.deepcopy(args)
if config:
args_copy = update_args_from_file(args_copy, config)
args_copy = check_args(args_copy)
print(f'==> Args updated from file "{config}":')
print_args(args_copy)
logger.suffix = f'_{logger.ExpID}_methodmodule{ix_module}_{m_name}'
model = module(model, loader, args_copy, logger)
###################################################################################
def get_pipeline(method:str):
pipeline, configs = [], []
for mp in method.split(','):
if ':' in mp:
module, config_path = mp.split(':')
pipeline += [module]
configs += [config_path]
else:
pipeline += [mp]
configs += [None]
return pipeline, configs
def print_args(args):
# build a key map for later sorting
key_map = {}
for k in args.__dict__:
k_lower = k.lower()
if k_lower in key_map:
key_map[k_lower + '_' + k_lower] = k
else:
key_map[k_lower] = k
# print in the order of sorted lower keys
logtmp = ''
for k_ in sorted(key_map.keys()):
real_key = key_map[k_]
logtmp += "('%s': %s) " % (real_key, args.__dict__[real_key])
print(logtmp + '\n', unprefix=True)
if __name__ == '__main__':
main() | 11,592 | 41.156364 | 135 | py |
Smile-Pruning | Smile-Pruning-master/src/option.py | import torchvision.models as models
import configargparse
import sys
from utils import update_args
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = configargparse.ArgumentParser(description='Regularization-Pruning PyTorch')
parser.add_argument('--data', metavar='DIR', # @mst: 'data' -> '--data'
help='path to dataset')
parser.add_argument('--dataset',
help='dataset name', choices=['mnist', 'fmnist', 'cifar10', 'cifar100', 'imagenet', 'imagenet_subset_200', 'tiny_imagenet'])
parser.add_argument('--use_lmdb', action='store_true',
help='use lmdb format data instead of images of .JPEG/.PNG etc.')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
# choices=model_names, # @mst: We will use more than the imagenet models, so remove this
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', '--batch_size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# @mst
import os
from utils import strlist_to_list, strdict_to_dict, check_path, parse_prune_ratio_vgg
from model import num_layers, is_single_branch
pjoin = os.path.join
# routine params
parser.add_argument('--project_name', '--experiment_name', dest='project_name', type=str, default="")
parser.add_argument('--debug', action="store_true")
parser.add_argument('--screen_print', action="store_true")
parser.add_argument('--note', type=str, default='', help='experiment note')
parser.add_argument('--print_interval', type=int, default=100)
parser.add_argument('--test_interval', type=int, default=2000)
parser.add_argument('--plot_interval', type=int, default=100000000)
parser.add_argument('--save_interval', type=int, default=2000, help="the interval to save model")
parser.add_argument('--ExpID', type=str, default='',
help='Experiment id. In default it will be assigned automatically')
# base model related
parser.add_argument('--resume_path', type=str, default=None, help="supposed to replace the original 'resume' feature")
parser.add_argument('--directly_ft_weights', type=str, default=None, help="the path to a pretrained model")
parser.add_argument('--base_model_path', type=str, default=None, help="the path to the unpruned base model")
parser.add_argument('--test_pretrained', action="store_true", help='test the pretrained model')
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--save_init_model', action="store_true", help='save the model after initialization')
# general pruning method related
parser.add_argument('--pipeline', type=str, default="", # choices=['', 'L1', 'L1_Iter', 'FixReg', 'GReg-1', 'GReg-2', 'Oracle', 'OPP', 'Merge'],
help='pruning method name; default is "", implying the original training without any pruning')
parser.add_argument('--stage_pr', type=str, default="", help='to appoint layer-wise pruning ratio')
parser.add_argument('--index_layer', type=str, default="numbers", choices=['numbers', 'name_matching'],
help='the rule to index layers in a network by its name; used in designating pruning ratio')
parser.add_argument('--previous_layers', type=str, default='')
parser.add_argument('--skip_layers', type=str, default="", help='layer id to skip when pruning')
parser.add_argument('--lr_ft', type=str, default="{0:0.01,30:0.001,60:0.0001,75:0.00001}")
parser.add_argument('--data_path', type=str, default="../data")
parser.add_argument('--wg', type=str, default="filter", choices=['filter', 'channel', 'weight'])
parser.add_argument('--pick_pruned', type=str, default='min', choices=['min', 'max', 'rand'], help='the criterion to select weights to prune')
parser.add_argument('--reinit', type=str, default='', choices=['', 'default', 'pth_reset', 'kaiming_normal', 'kaiming_normal', 'orth',
'exact_isometry_from_scratch', 'exact_isometry_based_on_existing', 'exact_isometry_based_on_existing_delta', 'approximate_isometry'],
help='before finetuning, the pruned model will be reinited')
parser.add_argument('--reinit_scale', type=float, default=1.)
parser.add_argument('--rescale', type=str, default='')
parser.add_argument('--not_use_bn', dest='use_bn', default=True, action="store_false", help='if use BN in the network')
parser.add_argument('--block_loss_grad', action="store_true", help="block the grad from loss, only apply weight decay")
parser.add_argument('--save_mag_reg_log', action="store_true", help="save log of L1-norm of filters wrt reg")
parser.add_argument('--save_order_log', action="store_true")
parser.add_argument('--mag_ratio_limit', type=float, default=1000)
parser.add_argument('--base_pr_model', type=str, default=None, help='the model that provides layer-wise pr')
parser.add_argument('--inherit_pruned', type=str, default='index', choices=['index', 'pr'],
help='when --base_pr_model is provided, we can choose to inherit the pruned index or only the pruning ratio (pr)')
parser.add_argument('--model_noise_std', type=float, default=0, help='add Gaussian noise to model weights')
parser.add_argument('--model_noise_num', type=int, default=10)
parser.add_argument('--oracle_pruning', action="store_true")
parser.add_argument('--ft_in_oracle_pruning', action="store_true")
parser.add_argument('--last_n_epoch', type=int, default=5, help='in correlation analysis, collect the last_n_epoch loss and average them')
parser.add_argument('--jsv_loop', type=int, default=0, help="num of batch loops when checking Jacobian singuar values")
parser.add_argument('--jsv_interval', type=int, default=-1, help="the interval of printing jsv")
parser.add_argument('--jsv_rand_data', action="store_true", help='if use data in random order to check JSV')
parser.add_argument('--init', type=str, default='default', help="parameter initialization scheme")
parser.add_argument('--activation', type=str, default='relu', help="activation function", choices=['relu', 'leaky_relu', 'linear', 'tanh', 'sigmoid'])
parser.add_argument('--lr_AI', type=float, default=0.001, help="lr in approximate_isometry_optimize")
parser.add_argument('--solver', type=str, default='SGD')
parser.add_argument('--verbose', action="store_true", help='if true, print debug logs')
parser.add_argument('--test_trainset', action="store_true")
# GReg method related (default setting is for ImageNet):
parser.add_argument('--batch_size_prune', type=int, default=64)
parser.add_argument('--update_reg_interval', type=int, default=5)
parser.add_argument('--stabilize_reg_interval', type=int, default=40000)
parser.add_argument('--lr_prune', type=float, default=0.001)
parser.add_argument('--reg_upper_limit', type=float, default=1.0)
parser.add_argument('--reg_upper_limit_pick', type=float, default=1e-2)
parser.add_argument('--reg_granularity_pick', type=float, default=1e-5)
parser.add_argument('--reg_granularity_prune', type=float, default=1e-4)
parser.add_argument('--reg_granularity_recover', type=float, default=-1e-4)
parser.add_argument('--conv_type', type=str, default='default', choices=['default', 'wn'])
parser.add_argument('--lw_l1', type=float, default=0, help='loss weight for l1 regularization when using both l1 and l2')
parser.add_argument('--lw_l2', type=float, default=1, help='loss weight for l2 regularization when using both l1 and l2')
# OPP method related:
parser.add_argument('--opp_scheme', type=str, default="v1", help='scheme id, used to develop new methods')
parser.add_argument('--lw_opp', type=float, default=1000)
parser.add_argument('--reinit_interval', type=int, default=100000000)
parser.add_argument('--no_transpose', dest='transpose', action='store_false', default=True, help='not use transpose for orth_regularization')
parser.add_argument('--feat_analyze', action="store_true", help='analyze features of conv/fc layers')
parser.add_argument('--interval_apply_cluster_reg', type=int, default=2)
parser.add_argument('--consider_bn', action="store_true")
parser.add_argument('--clustering', type=str, help='clustering method: l1, kmeans, random')
# orthogonal regularization train
parser.add_argument('--orth_reg_iter', type=int, default=0)
parser.add_argument('--orth_reg_iter_ft', type=int, default=0)
parser.add_argument('--orth_reg_method', type=str, default='CVPR20', choices=['CVPR20', 'CVPR17'])
parser.add_argument('--lw_orth_reg', type=float, default=0.1,
help='loss weight of orth reg. refers to CVPR20 Orthogonal-Convolutional-Neural-Networks code (14de526)')
parser.add_argument('--not_apply_reg', action="store_true", help='not apply L2 reg to gradients')
parser.add_argument('--greg_via_loss', action="store_true", help='implement greg via loss instead of gradient')
parser.add_argument('--no_bn_reg', dest='bn_reg', action="store_false", default=True,
help='not apply bn reg')
# LTH related
parser.add_argument('--num_cycles', type=int, default=0,
help='num of cycles in iterative pruning')
parser.add_argument('--lr_ft_mini', type=str, default='',
help='finetuning lr in each iterative pruning cycle')
parser.add_argument('--epochs_mini', type=int, default=0,
help='num of epochs in each iterative pruning cycle')
# Advanced LR scheduling related
parser.add_argument('--advanced_lr.ON', action="store_true")
parser.add_argument('--advanced_lr.warmup_epoch', type=int, default=0)
parser.add_argument('--advanced_lr.lr_decay', type=str, choices=['step', 'cos', 'linear', 'schedule'])
# This code base also serve to quick-check properties of deep neural networks. These functionalities are summarized here.
parser.add_argument('--utils.ON', action="store_true")
parser.add_argument('--utils.check_kernel_spatial_dist', action="store_true")
parser.add_argument('--utils.check_grad_norm', action="store_true")
parser.add_argument('--utils.check_weight_stats', action="store_true")
# Customize smilelogging
parser.add_argument('--hacksmile.ON', action='store_true')
parser.add_argument('--hacksmile.config', type=str, default='configs/smilelogging_config.txt')
parser.add_argument('--pruner', type=str, default='l1')
parser.add_argument('--reiniter', type=str, default='pth_reset')
args = parser.parse_args()
def check_args(args):
# parse for layer-wise prune ratio
# stage_pr is a list of float, skip_layers is a list of strings
if isinstance(args.stage_pr, str) and args.stage_pr:
if args.index_layer == 'numbers': # deprecated, kept for now for back-compatability, will be removed
if is_single_branch(args.arch): # e.g., alexnet, vgg
args.stage_pr = parse_prune_ratio_vgg(args.stage_pr, num_layers=num_layers[args.arch]) # example: [0-4:0.5, 5:0.6, 8-10:0.2]
args.skip_layers = strlist_to_list(args.skip_layers, str) # example: [0, 2, 6]
assert args.stage_pr[num_layers[args.arch] - 1] == 0, 'The output layer should NOT be pruned. Please check your "--stage_pr" setting.'
else: # e.g., resnet
args.stage_pr = strlist_to_list(args.stage_pr, float) # example: [0, 0.4, 0.5, 0]
args.skip_layers = strlist_to_list(args.skip_layers, str) # example: [2.3.1, 3.1]
elif args.index_layer == 'name_matching':
args.stage_pr = strdict_to_dict(args.stage_pr, float)
# else:
# assert args.base_pr_model, 'If stage_pr is not provided, base_pr_model must be provided'
# Set up finetuning lr
assert args.lr_ft, 'lr_ft must be provided'
if isinstance(args.lr_ft, str):
args.lr_ft = strdict_to_dict(args.lr_ft, float)
args.resume_path = check_path(args.resume_path)
args.directly_ft_weights = check_path(args.directly_ft_weights)
args.base_model_path = check_path(args.base_model_path)
args.base_pr_model = check_path(args.base_pr_model)
if isinstance(args.previous_layers, str):
args.previous_layers = strdict_to_dict(args.previous_layers, str)
# TODO
if args.pipeline in ['L1_Iter']:
assert args.num_cycles > 0
if isinstance(args.lr_ft_mini, str):
args.lr_ft_mini = strdict_to_dict(args.lr_ft_mini, float)
# some deprecated params to maintain back-compatibility
args.copy_bn_w = True
args.copy_bn_b = True
args.reg_multiplier = 1
return args
args = check_args(args)
args = update_args(args) | 14,975 | 61.661088 | 150 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.