id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,006 | from termcolor import colored
import os
from os.path import join
import shutil
import subprocess
import time
import datetime
def log_time(text):
strf = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
print(colored(strf, 'yellow'), colored(text, 'green')) | null |
13,007 | from termcolor import colored
import os
from os.path import join
import shutil
import subprocess
import time
import datetime
def myprint(cmd, level):
color = {'run': 'blue', 'info': 'green', 'warn': 'yellow', 'error': 'red'}[level]
print(colored(cmd, color))
warning_infos = set()
def oncewarn(text):
if text in warning_infos:
return
warning_infos.add(text)
myprint(text, 'warn') | null |
13,008 | from termcolor import colored
import os
from os.path import join
import shutil
import subprocess
import time
import datetime
def mkdir(path):
if os.path.exists(path):
return 0
log('mkdir {}'.format(path))
os.makedirs(path, exist_ok=True)
def cp(srcname, dstname):
mkdir(join(os.path.dirname(dstname)))
shutil.copyfile(srcname, dstname) | null |
13,009 | from termcolor import colored
import os
from os.path import join
import shutil
import subprocess
import time
import datetime
def print_table(header, contents):
from tabulate import tabulate
length = len(contents[0])
tables = [[] for _ in range(length)]
mean = ['Mean']
for icnt, content in enumerate(contents):
for i in range(length):
if isinstance(content[i], float):
tables[i].append('{:6.2f}'.format(content[i]))
else:
tables[i].append('{}'.format(content[i]))
if icnt > 0:
mean.append('{:6.2f}'.format(sum(content)/length))
tables.append(mean)
print(tabulate(tables, header, tablefmt='fancy_grid')) | null |
13,010 | import cv2
import numpy as np
import os
from os.path import join
def camera_from_img(img):
height, width = img.shape[0], img.shape[1]
# focal = 1.2*max(height, width) # as colmap
focal = 1.2*min(height, width) # as colmap
K = np.array([focal, 0., width/2, 0., focal, height/2, 0. ,0., 1.]).reshape(3, 3)
camera = {'K':K ,'R': np.eye(3), 'T': np.zeros((3, 1)), 'dist': np.zeros((1, 5))}
camera['invK'] = np.linalg.inv(camera['K'])
camera['P'] = camera['K'] @ np.hstack((camera['R'], camera['T']))
return camera | null |
13,011 | import cv2
import numpy as np
import os
from os.path import join
def unproj(kpts, invK):
homo = np.hstack([kpts[:, :2], np.ones_like(kpts[:, :1])])
homo = homo @ invK.T
return np.hstack([homo[:, :2], kpts[:, 2:]]) | null |
13,012 | import cv2
import numpy as np
import os
from os.path import join
def get_Pall(cameras, camnames):
Pall = np.stack([cameras[cam]['K'] @ np.hstack((cameras[cam]['R'], cameras[cam]['T'])) for cam in camnames])
return Pall | null |
13,013 | import cv2
import numpy as np
import os
from os.path import join
def get_fundamental_matrix(cameras, basenames):
skew_op = lambda x: np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
fundamental_op = lambda K_0, R_0, T_0, K_1, R_1, T_1: np.linalg.inv(K_0).T @ (
R_0 @ R_1.T) @ K_1.T @ skew_op(K_1 @ R_1 @ R_0.T @ (T_0 - R_0 @ R_1.T @ T_1))
fundamental_RT_op = lambda K_0, RT_0, K_1, RT_1: fundamental_op (K_0, RT_0[:, :3], RT_0[:, 3], K_1,
RT_1[:, :3], RT_1[:, 3] )
F = np.zeros((len(basenames), len(basenames), 3, 3)) # N x N x 3 x 3 matrix
F = {(icam, jcam): np.zeros((3, 3)) for jcam in basenames for icam in basenames}
for icam in basenames:
for jcam in basenames:
F[(icam, jcam)] += fundamental_RT_op(cameras[icam]['K'], cameras[icam]['RT'], cameras[jcam]['K'], cameras[jcam]['RT'])
if F[(icam, jcam)].sum() == 0:
F[(icam, jcam)] += 1e-12 # to avoid nan
return F | null |
13,014 | import cv2
import numpy as np
import os
from os.path import join
def interp_cameras(cameras, keys, step=20, loop=True, allstep=-1, **kwargs):
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
if allstep != -1:
tall = np.linspace(0., 1., allstep+1)[:-1].reshape(-1, 1, 1)
elif allstep == -1 and loop:
tall = np.linspace(0., 1., 1+step*len(keys))[:-1].reshape(-1, 1, 1)
elif allstep == -1 and not loop:
tall = np.linspace(0., 1., 1+step*(len(keys)-1))[:-1].reshape(-1, 1, 1)
cameras_new = {}
for ik in range(len(keys)):
if ik == len(keys) -1 and not loop:
break
if loop:
start, end = (ik * tall.shape[0])//len(keys), int((ik+1)*tall.shape[0])//len(keys)
print(ik, start, end, tall.shape)
else:
start, end = (ik * tall.shape[0])//(len(keys)-1), int((ik+1)*tall.shape[0])//(len(keys)-1)
t = tall[start:end].copy()
t = (t-t.min())/(t.max()-t.min())
left, right = keys[ik], keys[0 if ik == len(keys)-1 else ik + 1]
camera_left = cameras[left]
camera_right = cameras[right]
# 插值相机中心: center = - R.T @ T
center_l = - camera_left['R'].T @ camera_left['T']
center_r = - camera_right['R'].T @ camera_right['T']
center_l, center_r = center_l[None], center_r[None]
if False:
centers = center_l * (1-t) + center_r * t
else:
# 球面插值
norm_l, norm_r = np.linalg.norm(center_l), np.linalg.norm(center_r)
center_l, center_r = center_l/norm_l, center_r/norm_r
costheta = (center_l*center_r).sum()
sintheta = np.sqrt(1. - costheta**2)
theta = np.arctan2(sintheta, costheta)
centers = (np.sin(theta*(1-t)) * center_l + np.sin(theta * t) * center_r)/sintheta
norm = norm_l * (1-t) + norm_r * t
centers = centers * norm
key_rots = R.from_matrix(np.stack([camera_left['R'], camera_right['R']]))
key_times = [0, 1]
slerp = Slerp(key_times, key_rots)
interp_rots = slerp(t.squeeze()).as_matrix()
# 计算相机T RX + T = 0 => T = - R @ X
T = - np.einsum('bmn,bno->bmo', interp_rots, centers)
K = camera_left['K'] * (1-t) + camera_right['K'] * t
for i in range(T.shape[0]):
cameras_new['{}-{}-{}'.format(left, right, i)] = \
{
'K': K[i],
'dist': np.zeros((1, 5)),
'R': interp_rots[i],
'T': T[i]
}
return cameras_new | null |
13,015 | import numpy as np
def simple_reprojection_error(kpts1, kpts1_proj):
# (N, 3)
error = np.mean((kpts1[:, :2] - kpts1_proj[:, :2])**2)
return error | null |
13,016 | import numpy as np
def solveZ(A):
u, s, v = np.linalg.svd(A)
X = v[-1, :]
X = X / X[3]
return X[:3]
def simple_triangulate(kpts, Pall):
# kpts: (nViews, 3)
# Pall: (nViews, 3, 4)
# return: kpts3d(3,), conf: float
nViews = len(kpts)
A = np.zeros((nViews*2, 4), dtype=np.float)
result = np.zeros(4)
result[3] = kpts[:, 2].sum()/(kpts[:, 2]>0).sum()
for i in range(nViews):
P = Pall[i]
A[i*2, :] = kpts[i, 2]*(kpts[i, 0]*P[2:3,:] - P[0:1,:])
A[i*2 + 1, :] = kpts[i, 2]*(kpts[i, 1]*P[2:3,:] - P[1:2,:])
result[:3] = solveZ(A)
return result | null |
13,017 | import numpy as np
def projectN3(kpts3d, Pall):
# kpts3d: (N, 3)
nViews = len(Pall)
kp3d = np.hstack((kpts3d[:, :3], np.ones((kpts3d.shape[0], 1))))
kp2ds = []
for nv in range(nViews):
kp2d = Pall[nv] @ kp3d.T
kp2d[:2, :] /= kp2d[2:, :]
kp2ds.append(kp2d.T[None, :, :])
kp2ds = np.vstack(kp2ds)
if kpts3d.shape[-1] == 4:
kp2ds[..., -1] = kp2ds[..., -1] * (kpts3d[None, :, -1] > 0.)
return kp2ds
def batch_triangulate(keypoints_, Pall, keypoints_pre=None, lamb=1e3):
# keypoints: (nViews, nJoints, 3)
# Pall: (nViews, 3, 4)
# A: (nJoints, nViewsx2, 4), x: (nJoints, 4, 1); b: (nJoints, nViewsx2, 1)
v = (keypoints_[:, :, -1]>0).sum(axis=0)
valid_joint = np.where(v > 1)[0]
keypoints = keypoints_[:, valid_joint]
conf3d = keypoints[:, :, -1].sum(axis=0)/v[valid_joint]
# P2: P矩阵的最后一行:(1, nViews, 1, 4)
P0 = Pall[None, :, 0, :]
P1 = Pall[None, :, 1, :]
P2 = Pall[None, :, 2, :]
# uP2: x坐标乘上P2: (nJoints, nViews, 1, 4)
uP2 = keypoints[:, :, 0].T[:, :, None] * P2
vP2 = keypoints[:, :, 1].T[:, :, None] * P2
conf = keypoints[:, :, 2].T[:, :, None]
Au = conf * (uP2 - P0)
Av = conf * (vP2 - P1)
A = np.hstack([Au, Av])
if keypoints_pre is not None:
# keypoints_pre: (nJoints, 4)
B = np.eye(4)[None, :, :].repeat(A.shape[0], axis=0)
B[:, :3, 3] = -keypoints_pre[valid_joint, :3]
confpre = lamb * keypoints_pre[valid_joint, 3]
# 1, 0, 0, -x0
# 0, 1, 0, -y0
# 0, 0, 1, -z0
# 0, 0, 0, 0
B[:, 3, 3] = 0
B = B * confpre[:, None, None]
A = np.hstack((A, B))
u, s, v = np.linalg.svd(A)
X = v[:, -1, :]
X = X / X[:, 3:]
# out: (nJoints, 4)
result = np.zeros((keypoints_.shape[1], 4))
result[valid_joint, :3] = X[:, :3]
result[valid_joint, 3] = conf3d
return result
def simple_recon_person(keypoints_use, Puse):
out = batch_triangulate(keypoints_use, Puse)
# compute reprojection error
kpts_repro = projectN3(out, Puse)
square_diff = (keypoints_use[:, :, :2] - kpts_repro[:, :, :2])**2
conf = np.repeat(out[None, :, -1:], len(Puse), 0)
kpts_repro = np.concatenate((kpts_repro, conf), axis=2)
return out, kpts_repro | null |
13,018 | import numpy as np
def check_limb(keypoints3d, limb_means, thres=0.5):
# keypoints3d: (nJ, 4)
valid = True
cnt = 0
for (src, dst), val in limb_means.items():
if not (keypoints3d[src, 3] > 0 and keypoints3d[dst, 3] > 0):
continue
cnt += 1
# 计算骨长
l_est = np.linalg.norm(keypoints3d[src, :3] - keypoints3d[dst, :3])
if abs(l_est - val['mean'])/val['mean']/val['std'] > thres:
valid = False
break
# 至少两段骨头可以使用
valid = valid and cnt > 2
return valid | null |
13,019 | import shutil
import sys
import os
import sqlite3
import numpy as np
from os.path import join
import cv2
from .debug_utils import mkdir, run_cmd, log, mywarn
from .colmap_structure import Camera, Image, CAMERA_MODEL_NAMES
from .colmap_structure import rotmat2qvec
from .colmap_structure import read_points3d_binary
MAX_IMAGE_ID = 2**31 - 1
def image_ids_to_pair_id(image_id1, image_id2):
if image_id1 > image_id2:
image_id1, image_id2 = image_id2, image_id1
return image_id1 * MAX_IMAGE_ID + image_id2 | null |
13,020 | import shutil
import sys
import os
import sqlite3
import numpy as np
from os.path import join
import cv2
from .debug_utils import mkdir, run_cmd, log, mywarn
from .colmap_structure import Camera, Image, CAMERA_MODEL_NAMES
from .colmap_structure import rotmat2qvec
from .colmap_structure import read_points3d_binary
MAX_IMAGE_ID = 2**31 - 1
def pair_id_to_image_ids(pair_id):
image_id2 = pair_id % MAX_IMAGE_ID
image_id1 = (pair_id - image_id2) // MAX_IMAGE_ID
return image_id1, image_id2 | null |
13,021 | import shutil
import sys
import os
import sqlite3
import numpy as np
from os.path import join
import cv2
from .debug_utils import mkdir, run_cmd, log, mywarn
from .colmap_structure import Camera, Image, CAMERA_MODEL_NAMES
from .colmap_structure import rotmat2qvec
from .colmap_structure import read_points3d_binary
IS_PYTHON3 = sys.version_info[0] >= 3
def array_to_blob(array):
if IS_PYTHON3:
return array.tobytes()
else:
return np.getbuffer(array) | null |
13,022 | import shutil
import sys
import os
import sqlite3
import numpy as np
from os.path import join
import cv2
from .debug_utils import mkdir, run_cmd, log, mywarn
from .colmap_structure import Camera, Image, CAMERA_MODEL_NAMES
from .colmap_structure import rotmat2qvec
from .colmap_structure import read_points3d_binary
IS_PYTHON3 = sys.version_info[0] >= 3
def blob_to_array(blob, dtype, shape=(-1,)):
if blob is None:
return np.empty((0, 2), dtype=dtype)
if IS_PYTHON3:
return np.frombuffer(blob, dtype=dtype).reshape(*shape)
else:
return np.frombuffer(blob, dtype=dtype).reshape(*shape) | null |
13,023 | import shutil
import sys
import os
import sqlite3
import numpy as np
from os.path import join
import cv2
from .debug_utils import mkdir, run_cmd, log, mywarn
from .colmap_structure import Camera, Image, CAMERA_MODEL_NAMES
from .colmap_structure import rotmat2qvec
from .colmap_structure import read_points3d_binary
Camera = collections.namedtuple(
"Camera", ["id", "model", "width", "height", "params"])
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
for camera_model in CAMERA_MODELS])
def create_cameras(db, cameras, subs, width, height, share_intri=True):
model = 'OPENCV'
if share_intri:
cam_id = 1
K = cameras[subs[0]]['K']
D = cameras[subs[0]]['dist'].reshape(1, 5)
fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6 = K[0, 0], K[1, 1], K[0, 2], K[1, 2], D[0, 0], D[0, 1], D[0, 2], D[0, 3], D[0, 4], 0, 0, 0
params = [fx, fy, cx, cy, k1, k2, p1, p2]
# params = [fx, fy, cx, cy, 0, 0, 0, 0]
camera = Camera(
id=cam_id,
model=model,
width=width,
height=height,
params=params
)
cameras_colmap = {cam_id: camera}
cameras_map = {sub:cam_id for sub in subs}
#
db.add_camera(CAMERA_MODEL_NAMES[model].model_id, width, height, params,
prior_focal_length=False, camera_id=cam_id)
else:
raise NotImplementedError
return cameras_colmap, cameras_map | null |
13,024 | import shutil
import sys
import os
import sqlite3
import numpy as np
from os.path import join
import cv2
from .debug_utils import mkdir, run_cmd, log, mywarn
from .colmap_structure import Camera, Image, CAMERA_MODEL_NAMES
from .colmap_structure import rotmat2qvec
from .colmap_structure import read_points3d_binary
class Image(BaseImage):
def qvec2rotmat(self):
return qvec2rotmat(self.qvec)
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
def create_images(db, cameras, cameras_map, image_names):
subs = sorted(list(image_names.keys()))
images = {}
for sub, image_name in image_names.items():
img_id = subs.index(sub) + 1
R = cameras[sub]['R']
T = cameras[sub]['T']
qvec = rotmat2qvec(R)
tvec = T.T[0]
image = Image(
id=img_id,
qvec=qvec,
tvec=tvec,
camera_id=cameras_map[sub],
name=os.path.basename(image_name),
xys=[],
point3D_ids=[]
)
images[img_id] = image
db.add_image(image.name, camera_id=image.camera_id,
prior_q=image.qvec, prior_t=image.tvec, image_id=img_id)
return images | null |
13,025 | import os
import json
import numpy as np
from os.path import join
def save_numpy_dict(file, data):
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
res = {}
for key, val in data.items():
res[key] = val.tolist()
with open(file, 'w') as f:
json.dump(res, f, indent=4) | null |
13,026 | import os
import json
import numpy as np
from os.path import join
def read_numpy_dict(path):
assert os.path.exists(path), path
with open(path) as f:
data = json.load(f)
for key, val in data.items():
data[key] = np.array(val, dtype=np.float32)
return data | null |
13,027 | import os
import json
import numpy as np
from os.path import join
def read_json(path):
assert os.path.exists(path), path
with open(path) as f:
try:
data = json.load(f)
except:
print('Reading error {}'.format(path))
data = []
return data
def append_json(file, data):
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
if os.path.exists(file):
res = read_json(file)
assert isinstance(res, list)
res.append(data)
data = res
with open(file, 'w') as f:
json.dump(data, f, indent=4) | null |
13,028 | import os
import json
import numpy as np
from os.path import join
def getFileList(root, ext='.jpg'):
files = []
dirs = os.listdir(root)
while len(dirs) > 0:
path = dirs.pop()
fullname = join(root, path)
if os.path.isfile(fullname) and fullname.endswith(ext):
files.append(path)
elif os.path.isdir(fullname):
for s in os.listdir(fullname):
newDir = join(path, s)
dirs.append(newDir)
files = sorted(files)
return files | null |
13,029 | import os
import json
import numpy as np
from os.path import join
def array2raw(array, separator=' ', fmt='%.3f'):
assert len(array.shape) == 2, 'Only support MxN matrix, {}'.format(array.shape)
res = []
for data in array:
res.append(separator.join([fmt%(d) for d in data])) | null |
13,030 | import os
import json
import numpy as np
from os.path import join
def write_common_results(dumpname=None, results=[], keys=[], fmt='%2.3f'):
format_out = {'float_kind':lambda x: fmt % x}
out_text = []
out_text.append('[\n')
for idata, data in enumerate(results):
out_text.append(' {\n')
output = {}
output['id'] = data['id']
for k in ['type']:
if k in data.keys():output[k] = '\"{}\"'.format(data[k])
keys_current = [k for k in keys if k in data.keys()]
for key in keys_current:
# BUG: This function will failed if the rows of the data[key] is too large
# output[key] = np.array2string(data[key], max_line_width=1000, separator=', ', formatter=format_out)
output[key] = myarray2string(data[key], separator=', ', fmt=fmt)
for key in output.keys():
out_text.append(' \"{}\": {}'.format(key, output[key]))
if key != keys_current[-1]:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(' }')
if idata != len(results) - 1:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(']\n')
if dumpname is not None:
mkout(dumpname)
with open(dumpname, 'w') as f:
f.writelines(out_text)
else:
return ''.join(out_text)
def write_keypoints3d(dumpname, results, keys = ['keypoints3d']):
# TODO:rewrite it
write_common_results(dumpname, results, keys, fmt='%6.7f') | null |
13,031 | import os
import json
import numpy as np
from os.path import join
def write_common_results(dumpname=None, results=[], keys=[], fmt='%2.3f'):
format_out = {'float_kind':lambda x: fmt % x}
out_text = []
out_text.append('[\n')
for idata, data in enumerate(results):
out_text.append(' {\n')
output = {}
output['id'] = data['id']
for k in ['type']:
if k in data.keys():output[k] = '\"{}\"'.format(data[k])
keys_current = [k for k in keys if k in data.keys()]
for key in keys_current:
# BUG: This function will failed if the rows of the data[key] is too large
# output[key] = np.array2string(data[key], max_line_width=1000, separator=', ', formatter=format_out)
output[key] = myarray2string(data[key], separator=', ', fmt=fmt)
for key in output.keys():
out_text.append(' \"{}\": {}'.format(key, output[key]))
if key != keys_current[-1]:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(' }')
if idata != len(results) - 1:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(']\n')
if dumpname is not None:
mkout(dumpname)
with open(dumpname, 'w') as f:
f.writelines(out_text)
else:
return ''.join(out_text)
def write_vertices(dumpname, results):
keys = ['vertices']
write_common_results(dumpname, results, keys, fmt='%6.5f') | null |
13,032 | import os
import json
import numpy as np
from os.path import join
def batch_bbox_from_pose(keypoints2d, height, width, rate=0.1):
# TODO:write this in batch
bboxes = np.zeros((keypoints2d.shape[0], 5), dtype=np.float32)
border = 20
for bn in range(keypoints2d.shape[0]):
valid = keypoints2d[bn, :, -1] > 0
if valid.sum() == 0:
continue
p2d = keypoints2d[bn, valid, :2]
x_min, y_min = p2d.min(axis=0)
x_max, y_max = p2d.max(axis=0)
x_mean, y_mean = p2d.mean(axis=0)
if x_mean < -border or y_mean < -border or x_mean > width + border or y_mean > height + border:
continue
dx = (x_max - x_min)*rate
dy = (y_max - y_min)*rate
bboxes[bn] = [x_min-dx, y_min-dy, x_max+dx, y_max+dy, 1]
return bboxes | null |
13,033 | import os
import json
import numpy as np
from os.path import join
def merge_params(param_list, share_shape=True):
output = {}
for key in ['poses', 'shapes', 'Rh', 'Th', 'expression']:
if key in param_list[0].keys():
output[key] = np.vstack([v[key] for v in param_list])
if share_shape:
output['shapes'] = output['shapes'].mean(axis=0, keepdims=True)
return output | null |
13,034 | import os
import sys
import collections
import numpy as np
import struct
import cv2
def read_cameras_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasText(const std::string& path)
void Reconstruction::ReadCamerasText(const std::string& path)
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
def read_cameras_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for camera_line_index in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
points3D = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
point3D_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_points3d_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
points3D = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for point_line_index in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
point3D_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D[point3D_id] = Point3D(
id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_model(path, ext):
if ext == ".txt":
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
images = read_images_text(os.path.join(path, "images" + ext))
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
else:
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
images = read_images_binary(os.path.join(path, "images" + ext))
points3D = read_points3d_binary(os.path.join(path, "points3D") + ext)
return cameras, images, points3D | null |
13,035 | import os
import sys
import collections
import numpy as np
import struct
import cv2
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) | null |
13,036 | import os
import sys
import collections
import numpy as np
import struct
import cv2
The provided code snippet includes necessary dependencies for implementing the `write_cameras_text` function. Write a Python function `def write_cameras_text(cameras, path)` to solve the following problem:
see: src/base/reconstruction.cc void Reconstruction::WriteCamerasText(const std::string& path) void Reconstruction::ReadCamerasText(const std::string& path)
Here is the function:
def write_cameras_text(cameras, path):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasText(const std::string& path)
void Reconstruction::ReadCamerasText(const std::string& path)
"""
HEADER = '# Camera list with one line of data per camera:\n'
'# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n'
'# Number of cameras: {}\n'.format(len(cameras))
with open(path, "w") as fid:
fid.write(HEADER)
for _, cam in cameras.items():
to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params]
line = " ".join([str(elem) for elem in to_write])
fid.write(line + "\n") | see: src/base/reconstruction.cc void Reconstruction::WriteCamerasText(const std::string& path) void Reconstruction::ReadCamerasText(const std::string& path) |
13,037 | import os
import sys
import collections
import numpy as np
import struct
import cv2
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
for camera_model in CAMERA_MODELS])
def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
"""pack and write to a binary file.
:param fid:
:param data: data to send, if multiple elements are sent at the same time,
they should be encapsuled either in a list or a tuple
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
should be the same length as the data list or tuple
:param endian_character: Any of {@, =, <, >, !}
"""
if isinstance(data, (list, tuple)):
bytes = struct.pack(endian_character + format_char_sequence, *data)
else:
bytes = struct.pack(endian_character + format_char_sequence, data)
fid.write(bytes)
The provided code snippet includes necessary dependencies for implementing the `write_cameras_binary` function. Write a Python function `def write_cameras_binary(cameras, path_to_model_file)` to solve the following problem:
see: src/base/reconstruction.cc void Reconstruction::WriteCamerasBinary(const std::string& path) void Reconstruction::ReadCamerasBinary(const std::string& path)
Here is the function:
def write_cameras_binary(cameras, path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(cameras), "Q")
for _, cam in cameras.items():
model_id = CAMERA_MODEL_NAMES[cam.model].model_id
camera_properties = [cam.id,
model_id,
cam.width,
cam.height]
write_next_bytes(fid, camera_properties, "iiQQ")
for p in cam.params:
write_next_bytes(fid, float(p), "d")
return cameras | see: src/base/reconstruction.cc void Reconstruction::WriteCamerasBinary(const std::string& path) void Reconstruction::ReadCamerasBinary(const std::string& path) |
13,038 | import os
import sys
import collections
import numpy as np
import struct
import cv2
def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
"""pack and write to a binary file.
:param fid:
:param data: data to send, if multiple elements are sent at the same time,
they should be encapsuled either in a list or a tuple
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
should be the same length as the data list or tuple
:param endian_character: Any of {@, =, <, >, !}
"""
if isinstance(data, (list, tuple)):
bytes = struct.pack(endian_character + format_char_sequence, *data)
else:
bytes = struct.pack(endian_character + format_char_sequence, data)
fid.write(bytes)
The provided code snippet includes necessary dependencies for implementing the `write_images_binary` function. Write a Python function `def write_images_binary(images, path_to_model_file)` to solve the following problem:
see: src/base/reconstruction.cc void Reconstruction::ReadImagesBinary(const std::string& path) void Reconstruction::WriteImagesBinary(const std::string& path)
Here is the function:
def write_images_binary(images, path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(images), "Q")
for _, img in images.items():
write_next_bytes(fid, img.id, "i")
write_next_bytes(fid, img.qvec.tolist(), "dddd")
write_next_bytes(fid, img.tvec.tolist(), "ddd")
write_next_bytes(fid, img.camera_id, "i")
for char in img.name:
write_next_bytes(fid, char.encode("utf-8"), "c")
write_next_bytes(fid, b"\x00", "c")
write_next_bytes(fid, len(img.point3D_ids), "Q")
for xy, p3d_id in zip(img.xys, img.point3D_ids):
write_next_bytes(fid, [*xy, p3d_id], "ddq") | see: src/base/reconstruction.cc void Reconstruction::ReadImagesBinary(const std::string& path) void Reconstruction::WriteImagesBinary(const std::string& path) |
13,039 | import os
import sys
import collections
import numpy as np
import struct
import cv2
The provided code snippet includes necessary dependencies for implementing the `write_images_text` function. Write a Python function `def write_images_text(images, path)` to solve the following problem:
see: src/base/reconstruction.cc void Reconstruction::ReadImagesText(const std::string& path) void Reconstruction::WriteImagesText(const std::string& path)
Here is the function:
def write_images_text(images, path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
if len(images) == 0:
mean_observations = 0
else:
mean_observations = sum((len(img.point3D_ids) for _, img in images.items()))/len(images)
HEADER = '# Image list with two lines of data per image:\n'
'# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n'
'# POINTS2D[] as (X, Y, POINT3D_ID)\n'
'# Number of images: {}, mean observations per image: {}\n'.format(len(images), mean_observations)
with open(path, "w") as fid:
fid.write(HEADER)
for _, img in images.items():
image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name]
first_line = " ".join(map(str, image_header))
fid.write(first_line + "\n")
points_strings = []
for xy, point3D_id in zip(img.xys, img.point3D_ids):
points_strings.append(" ".join(map(str, [*xy, point3D_id])))
fid.write(" ".join(points_strings) + "\n") | see: src/base/reconstruction.cc void Reconstruction::ReadImagesText(const std::string& path) void Reconstruction::WriteImagesText(const std::string& path) |
13,040 | import os
import sys
import collections
import numpy as np
import struct
import cv2
The provided code snippet includes necessary dependencies for implementing the `write_points3D_text` function. Write a Python function `def write_points3D_text(points3D, path)` to solve the following problem:
see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path)
Here is the function:
def write_points3D_text(points3D, path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
if len(points3D) == 0:
mean_track_length = 0
else:
mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items()))/len(points3D)
HEADER = '# 3D point list with one line of data per point:\n'
'# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n'
'# Number of points: {}, mean track length: {}\n'.format(len(points3D), mean_track_length)
with open(path, "w") as fid:
fid.write(HEADER)
for _, pt in points3D.items():
point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error]
fid.write(" ".join(map(str, point_header)) + " ")
track_strings = []
for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs):
track_strings.append(" ".join(map(str, [image_id, point2D])))
fid.write(" ".join(track_strings) + "\n") | see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path) |
13,041 | import os
import sys
import collections
import numpy as np
import struct
import cv2
def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
"""pack and write to a binary file.
:param fid:
:param data: data to send, if multiple elements are sent at the same time,
they should be encapsuled either in a list or a tuple
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
should be the same length as the data list or tuple
:param endian_character: Any of {@, =, <, >, !}
"""
if isinstance(data, (list, tuple)):
bytes = struct.pack(endian_character + format_char_sequence, *data)
else:
bytes = struct.pack(endian_character + format_char_sequence, data)
fid.write(bytes)
The provided code snippet includes necessary dependencies for implementing the `write_points3d_binary` function. Write a Python function `def write_points3d_binary(points3D, path_to_model_file)` to solve the following problem:
see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path)
Here is the function:
def write_points3d_binary(points3D, path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(points3D), "Q")
for _, pt in points3D.items():
write_next_bytes(fid, pt.id, "Q")
write_next_bytes(fid, pt.xyz.tolist(), "ddd")
write_next_bytes(fid, pt.rgb.tolist(), "BBB")
write_next_bytes(fid, pt.error, "d")
track_length = pt.image_ids.shape[0]
write_next_bytes(fid, track_length, "Q")
for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs):
write_next_bytes(fid, [image_id, point2D_id], "ii") | see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) |
13,042 | import os
import argparse
from os.path import join
def load_parser():
parser = argparse.ArgumentParser('EasyMocap commond line tools')
parser.add_argument('path', type=str)
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--cfg', type=str, default=None)
parser.add_argument('--camera', type=str, default=None)
parser.add_argument('--annot', type=str, default='annots', help="sub directory name to store the generated annotation files, default to be annots")
parser.add_argument('--sub', type=str, nargs='+', default=[],
help='the sub folder lists when in video mode')
parser.add_argument('--from_file', type=str, default=None)
parser.add_argument('--pid', type=int, nargs='+', default=[0],
help='the person IDs')
parser.add_argument('--max_person', type=int, default=-1,
help='maximum number of person')
parser.add_argument('--start', type=int, default=0,
help='frame start')
parser.add_argument('--end', type=int, default=100000,
help='frame end')
parser.add_argument('--step', type=int, default=1,
help='frame step')
#
# keypoints and body model
#
parser.add_argument('--cfg_model', type=str, default=None)
parser.add_argument('--body', type=str, default='body25', choices=['body15', 'body25', 'h36m', 'bodyhand', 'bodyhandface', 'handl', 'handr', 'total'])
parser.add_argument('--model', type=str, default='smpl', choices=['smpl', 'smplh', 'smplx', 'manol', 'manor'])
parser.add_argument('--gender', type=str, default='neutral',
choices=['neutral', 'male', 'female'])
# Input control
detec = parser.add_argument_group('Detection control')
detec.add_argument("--thres2d", type=float, default=0.3,
help="The threshold for suppress noisy kpts")
#
# Optimization control
#
recon = parser.add_argument_group('Reconstruction control')
recon.add_argument('--smooth3d', type=int,
help='the size of window to smooth keypoints3d', default=0)
recon.add_argument('--MAX_REPRO_ERROR', type=int,
help='The threshold of reprojection error', default=50)
recon.add_argument('--MAX_SPEED_ERROR', type=int,
help='The threshold of reprojection error', default=50)
recon.add_argument('--robust3d', action='store_true')
#
# visualization part
#
output = parser.add_argument_group('Output control')
output.add_argument('--vis_det', action='store_true')
output.add_argument('--vis_repro', action='store_true')
output.add_argument('--vis_smpl', action='store_true')
output.add_argument('--write_smpl_full', action='store_true')
parser.add_argument('--write_vertices', action='store_true')
output.add_argument('--vis_mask', action='store_true')
output.add_argument('--undis', action='store_true')
output.add_argument('--sub_vis', type=str, nargs='+', default=[],
help='the sub folder lists for visualization')
#
# debug
#
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--save_origin', action='store_true')
parser.add_argument('--restart', action='store_true')
parser.add_argument('--no_opt', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--opts',
help="Modify config options using the command-line",
default=[],
nargs='+')
parser.add_argument('--cfg_opts',
help="Modify config options using the command-line",
default=[],
nargs='+')
return parser | null |
13,043 | import os
import argparse
from os.path import join
def save_parser(args):
import yaml
res = vars(args)
os.makedirs(args.out, exist_ok=True)
with open(join(args.out, 'exp.yml'), 'w') as f:
yaml.dump(res, f)
def parse_parser(parser):
args = parser.parse_args()
if args.out is None:
print(' - [Warning] Please specify the output path `--out ${out}`')
print(' - [Warning] Default to {}/output'.format(args.path))
args.out = join(args.path, 'output')
if args.from_file is not None:
assert os.path.exists(args.from_file), args.from_file
with open(args.from_file) as f:
datas = f.readlines()
subs = [d for d in datas if not d.startswith('#')]
subs = [d.rstrip().replace('https://www.youtube.com/watch?v=', '') for d in subs]
newsubs = sorted(os.listdir(join(args.path, 'images')))
clips = []
for newsub in newsubs:
if newsub.split('+')[0] in subs:
clips.append(newsub)
for sub in subs:
if os.path.exists(join(args.path, 'images', sub)):
clips.append(sub)
args.sub = clips
if len(args.sub) == 0 and os.path.exists(join(args.path, 'images')):
args.sub = sorted(os.listdir(join(args.path, 'images')))
if args.sub[0].isdigit():
args.sub = sorted(args.sub, key=lambda x:int(x))
args.opts = {args.opts[2*i]:float(args.opts[2*i+1]) for i in range(len(args.opts)//2)}
save_parser(args)
return args | null |
13,044 | import cv2
import numpy as np
import json
def generate_colorbar(N = 20, cmap = 'jet', rand=True,
ret_float=False, ret_array=False, ret_rgb=False):
bar = ((np.arange(N)/(N-1))*255).astype(np.uint8).reshape(-1, 1)
colorbar = cv2.applyColorMap(bar, cv2.COLORMAP_JET).squeeze()
if False:
colorbar = np.clip(colorbar + 64, 0, 255)
if rand:
import random
random.seed(666)
index = [i for i in range(N)]
random.shuffle(index)
rgb = colorbar[index, :]
else:
rgb = colorbar
if ret_rgb:
rgb = rgb[:, ::-1]
if ret_float:
rgb = rgb/255.
if not ret_array:
rgb = rgb.tolist()
return rgb | null |
13,045 | import cv2
import numpy as np
import json
def get_rgb(index):
if isinstance(index, int):
if index == -1:
return (255, 255, 255)
if index < -1:
return (0, 0, 0)
# elif index == 0:
# return (245, 150, 150)
col = list(colors_bar_rgb[index%len(colors_bar_rgb)])[::-1]
elif isinstance(index, str):
col = colors_table.get(index, (1, 0, 0))
col = tuple([int(c*255) for c in col[::-1]])
else:
raise TypeError('index should be int or str')
return col
def get_rgb_01(index):
col = get_rgb(index)
return [i*1./255 for i in col[:3]] | null |
13,046 | import cv2
import numpy as np
import json
def plot_point(img, x, y, r, col, pid=-1, font_scale=-1, circle_type=-1):
cv2.circle(img, (int(x+0.5), int(y+0.5)), r, col, circle_type)
if font_scale == -1:
font_scale = img.shape[0]/4000
if pid != -1:
cv2.putText(img, '{}'.format(pid), (int(x+0.5), int(y+0.5)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, col, 1) | null |
13,047 | import cv2
import numpy as np
import json
def get_rgb(index):
def plot_keypoints(img, points, pid, config, vis_conf=False, use_limb_color=True, lw=2, fliplr=False):
lw = max(lw, 2)
H, W = img.shape[:2]
for ii, (i, j) in enumerate(config['kintree']):
if i >= len(points) or j >= len(points):
continue
if (i >25 or j > 25) and config['nJoints'] != 42:
_lw = max(int(lw/4), 1)
else:
_lw = lw
pt1, pt2 = points[i], points[j]
if fliplr:
pt1 = (W-pt1[0], pt1[1])
pt2 = (W-pt2[0], pt2[1])
if use_limb_color:
col = get_rgb(config['colors'][ii])
else:
col = get_rgb(pid)
if pt1[-1] > 0.01 and pt2[-1] > 0.01:
image = cv2.line(
img, (int(pt1[0]+0.5), int(pt1[1]+0.5)), (int(pt2[0]+0.5), int(pt2[1]+0.5)),
col, _lw)
for i in range(min(len(points), config['nJoints'])):
x, y = points[i][0], points[i][1]
if fliplr:
x = W - x
c = points[i][-1]
if c > 0.01:
text_size = img.shape[0]/1000
col = get_rgb(pid)
radius = int(lw/1.5)
if i > 25 and config['nJoints'] != 42:
radius = max(int(radius/4), 1)
cv2.circle(img, (int(x+0.5), int(y+0.5)), radius, col, -1)
if vis_conf:
cv2.putText(img, '{:.1f}'.format(c), (int(x), int(y)),
cv2.FONT_HERSHEY_SIMPLEX, text_size, col, 2) | null |
13,048 | import cv2
import numpy as np
import json
def plot_bbox(img, bbox, pid, scale=1, vis_id=True):
# 画bbox: (l, t, r, b)
x1, y1, x2, y2, c = bbox
if c < 0.01:return img
x1 = int(round(x1*scale))
x2 = int(round(x2*scale))
y1 = int(round(y1*scale))
y2 = int(round(y2*scale))
color = get_rgb(pid)
lw = max(img.shape[0]//300, 2)
cv2.rectangle(img, (x1, y1), (x2, y2), color, lw)
if vis_id:
font_scale = img.shape[0]/1000
cv2.putText(img, '{}'.format(pid), (x1, y1+int(25*font_scale)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, 2)
def plot_keypoints_auto(img, points, pid, vis_conf=False, use_limb_color=True, scale=1, lw=-1, config_name=None, lw_factor=1):
from ..dataset.config import CONFIG
if config_name is None:
config_name = {25: 'body25', 15: 'body15', 21: 'hand', 42:'handlr', 17: 'coco', 1:'points', 67:'bodyhand', 137: 'total', 79:'up',
19:'ochuman'}[len(points)]
config = CONFIG[config_name]
if lw == -1:
lw = img.shape[0]//200
if config_name == 'hand':
lw = img.shape[0]//100
lw = max(lw, 1)
for ii, (i, j) in enumerate(config['kintree']):
if i >= len(points) or j >= len(points):
continue
if i >= 25 and config_name in ['bodyhand', 'total']:
lw = max(img.shape[0]//400, 1)
pt1, pt2 = points[i], points[j]
if use_limb_color:
col = get_rgb(config['colors'][ii])
else:
col = get_rgb(pid)
if pt1[0] < -10000 or pt1[1] < -10000 or pt1[0] > 10000 or pt1[1] > 10000:
continue
if pt2[0] < -10000 or pt2[1] < -10000 or pt2[0] > 10000 or pt2[1] > 10000:
continue
if pt1[-1] > 0.01 and pt2[-1] > 0.01:
image = cv2.line(
img, (int(pt1[0]*scale+0.5), int(pt1[1]*scale+0.5)), (int(pt2[0]*scale+0.5), int(pt2[1]*scale+0.5)),
col, lw)
lw = img.shape[0]//200
if config_name == 'hand':
lw = img.shape[0]//500
lw = max(lw, 1)
for i in range(len(points)):
x, y = points[i][0]*scale, points[i][1]*scale
if x < 0 or y < 0 or x >10000 or y >10000:
continue
if i >= 25 and config_name in ['bodyhand', 'total']:
lw = max(img.shape[0]//400, 1)
c = points[i][-1]
if c > 0.01:
col = get_rgb(pid)
if len(points) == 1:
_lw = max(0, int(lw * lw_factor))
cv2.circle(img, (int(x+0.5), int(y+0.5)), _lw*2, col, lw*2)
plot_cross(img, int(x+0.5), int(y+0.5), width=_lw, col=col, lw=lw*2)
else:
cv2.circle(img, (int(x+0.5), int(y+0.5)), lw*2, col, -1)
if vis_conf:
cv2.putText(img, '{:.1f}'.format(c), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, col, 2)
def plot_keypoints_total(img, annots, scale, pid_offset=0):
_lw = img.shape[0] // 150
for annot in annots:
pid = annot['personID'] + pid_offset
for key in ['keypoints', 'handl2d', 'handr2d']:
if key not in annot.keys():continue
if key in ['handl2d', 'handr2d', 'face2d']:
lw = _lw // 2
else:
lw = _lw
lw = max(lw, 1)
plot_keypoints_auto(img, annot[key], pid, vis_conf=False, use_limb_color=False, scale=scale, lw=lw)
if 'bbox' not in annot.keys() or (annot['bbox'][0] < 0 or annot['bbox'][0] >10000):
continue
plot_bbox(img, annot['bbox'], pid, scale=scale, vis_id=True)
return img | null |
13,049 | import cv2
import numpy as np
import json
def plot_line(img, pt1, pt2, lw, col):
cv2.line(img, (int(pt1[0]+0.5), int(pt1[1]+0.5)), (int(pt2[0]+0.5), int(pt2[1]+0.5)),
col, lw)
def plot_cross(img, x, y, col, width=-1, lw=-1):
if lw == -1:
lw = max(1, int(round(img.shape[0]/1000)))
width = lw * 5
cv2.line(img, (int(x-width), int(y)), (int(x+width), int(y)), col, lw)
cv2.line(img, (int(x), int(y-width)), (int(x), int(y+width)), col, lw)
def plot_points2d(img, points2d, lines, lw=-1, col=(0, 255, 0), putText=True, style='+'):
# 将2d点画上去
if points2d.shape[1] == 2:
points2d = np.hstack([points2d, np.ones((points2d.shape[0], 1))])
if lw == -1:
lw = img.shape[0]//200
for i, (x, y, v) in enumerate(points2d):
if v < 0.01:
continue
c = col
if '+' in style:
plot_cross(img, x, y, width=10, col=c, lw=lw*2)
if 'o' in style:
cv2.circle(img, (int(x), int(y)), 10, c, lw*2)
cv2.circle(img, (int(x), int(y)), lw, c, lw)
if putText:
c = col[::-1]
font_scale = img.shape[0]/1000
cv2.putText(img, '{}'.format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, c, 2)
for i, j in lines:
if points2d[i][2] < 0.01 or points2d[j][2] < 0.01:
continue
plot_line(img, points2d[i], points2d[j], max(1, lw//2), col) | null |
13,050 | import cv2
import numpy as np
import json
def get_row_col(l, square):
def merge(images, row=-1, col=-1, resize=False, ret_range=False, square=False, **kwargs):
if row == -1 and col == -1:
row, col = get_row_col(len(images), square)
height = images[0].shape[0]
width = images[0].shape[1]
# special case
if height > width:
if len(images) == 3:
row, col = 1, 3
if len(images[0].shape) > 2:
ret_img = np.zeros((height * row, width * col, images[0].shape[2]), dtype=np.uint8) + 255
else:
ret_img = np.zeros((height * row, width * col), dtype=np.uint8) + 255
ranges = []
for i in range(row):
for j in range(col):
if i*col + j >= len(images):
break
img = images[i * col + j]
# resize the image size
img = cv2.resize(img, (width, height))
ret_img[height * i: height * (i+1), width * j: width * (j+1)] = img
ranges.append((width*j, height*i, width*(j+1), height*(i+1)))
if resize:
min_height = 1000
if ret_img.shape[0] > min_height:
scale = min_height/ret_img.shape[0]
ret_img = cv2.resize(ret_img, None, fx=scale, fy=scale)
if ret_range:
return ret_img, ranges
return ret_img | null |
13,051 | import numpy as np
import os
from os.path import join
from glob import glob
from .file_utils import read_json, read_annot
def read_annot(annotname, mode='body25'):
data = read_json(annotname)
if not isinstance(data, list):
data = data['annots']
for i in range(len(data)):
if 'id' not in data[i].keys():
data[i]['id'] = data[i].pop('personID')
if 'keypoints2d' in data[i].keys() and 'keypoints' not in data[i].keys():
data[i]['keypoints'] = data[i].pop('keypoints2d')
for key in ['bbox', 'keypoints',
'bbox_handl2d', 'handl2d',
'bbox_handr2d', 'handr2d',
'bbox_face2d', 'face2d']:
if key not in data[i].keys():continue
data[i][key] = np.array(data[i][key])
if key == 'face2d':
# TODO: Make parameters, 17 is the offset for the eye brows,
# etc. 51 is the total number of FLAME compatible landmarks
data[i][key] = data[i][key][17:17+51, :]
if 'bbox' in data[i].keys():
data[i]['bbox'] = data[i]['bbox'][:5]
if data[i]['bbox'][-1] < 0.001:
print('{}/{} bbox conf = 0, may be error'.format(annotname, i))
data[i]['bbox'][-1] = 0
# combine the basic results
if mode == 'body25':
data[i]['keypoints'] = data[i].get('keypoints', np.zeros((25, 3)))
elif mode == 'body15':
data[i]['keypoints'] = data[i]['keypoints'][:15, :]
elif mode in ['handl', 'handr']:
data[i]['keypoints'] = np.array(data[i][mode+'2d']).astype(np.float32)
key = 'bbox_'+mode+'2d'
if key not in data[i].keys():
data[i]['bbox'] = np.array(get_bbox_from_pose(data[i]['keypoints'])).astype(np.float32)
else:
data[i]['bbox'] = data[i]['bbox_'+mode+'2d'][:5]
elif mode == 'total':
data[i]['keypoints'] = np.vstack([data[i][key] for key in ['keypoints', 'handl2d', 'handr2d', 'face2d']])
elif mode == 'bodyhand':
data[i]['keypoints'] = np.vstack([data[i][key] for key in ['keypoints', 'handl2d', 'handr2d']])
elif mode == 'bodyhandface':
data[i]['keypoints'] = np.vstack([data[i][key] for key in ['keypoints', 'handl2d', 'handr2d', 'face2d']])
conf = data[i]['keypoints'][..., -1]
conf[conf<0] = 0
data.sort(key=lambda x:x['id'])
return data
def read_keypoints2d(filename, mode):
return read_annot(filename, mode) | null |
13,052 | import numpy as np
import os
from os.path import join
from glob import glob
from .file_utils import read_json, read_annot
def read_json(path):
def read_keypoints3d_dict(filename):
data = read_json(filename)
res_ = {}
for d in data:
pid = d['id'] if 'id' in d.keys() else d['personID']
pose3d = np.array(d['keypoints3d'], dtype=np.float32)
if pose3d.shape[1] == 3:
pose3d = np.hstack([pose3d, np.ones((pose3d.shape[0], 1))])
res_[pid] = {
'id': pid,
'keypoints3d': pose3d
}
return res_ | null |
13,053 | import numpy as np
import os
from os.path import join
from glob import glob
from .file_utils import read_json, read_annot
def read_keypoints3d_a4d(outname):
res_ = []
with open(outname, "r") as file:
lines = file.readlines()
if len(lines) < 2:
return res_
nPerson, nJoints = int(lines[0]), int(lines[1])
# 只包含每个人的结果
lines = lines[1:]
# 每个人的都写了关键点数量
line_per_person = 1 + 1 + nJoints
for i in range(nPerson):
trackId = int(lines[i*line_per_person+1])
content = ''.join(lines[i*line_per_person+2:i*line_per_person+2+nJoints])
pose3d = np.fromstring(content, dtype=float, sep=' ').reshape((nJoints, 4))
# association4d 的关节顺序和正常的定义不一样
pose3d = pose3d[[4, 1, 5, 9, 13, 6, 10, 14, 0, 2, 7, 11, 3, 8, 12], :]
res_.append({'id':trackId, 'keypoints3d':np.array(pose3d, dtype=np.float32)})
return res_ | null |
13,054 | import time
import tabulate
def dummyfunc():
time.sleep(1) | null |
13,055 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
def make_Cnk(n, k):
import itertools
res = {}
for n_ in range(3, n+1):
n_0 = [i for i in range(n_)]
for k_ in range(2, k+1):
res[(n_, k_)] = list(map(list, itertools.combinations(n_0, k_)))
return res | null |
13,056 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
def batch_triangulate(keypoints_, Pall, min_view=2):
""" triangulate the keypoints of whole body
Args:
keypoints_ (nViews, nJoints, 3): 2D detections
Pall (nViews, 3, 4) | (nViews, nJoints, 3, 4): projection matrix of each view
min_view (int, optional): min view for visible points. Defaults to 2.
Returns:
keypoints3d: (nJoints, 4)
"""
# keypoints: (nViews, nJoints, 3)
# Pall: (nViews, 3, 4)
# A: (nJoints, nViewsx2, 4), x: (nJoints, 4, 1); b: (nJoints, nViewsx2, 1)
v = (keypoints_[:, :, -1]>0).sum(axis=0)
valid_joint = np.where(v >= min_view)[0]
keypoints = keypoints_[:, valid_joint]
conf3d = keypoints[:, :, -1].sum(axis=0)/v[valid_joint]
# P2: P矩阵的最后一行:(1, nViews, 1, 4)
if len(Pall.shape) == 3:
P0 = Pall[None, :, 0, :]
P1 = Pall[None, :, 1, :]
P2 = Pall[None, :, 2, :]
else:
P0 = Pall[:, :, 0, :].swapaxes(0, 1)
P1 = Pall[:, :, 1, :].swapaxes(0, 1)
P2 = Pall[:, :, 2, :].swapaxes(0, 1)
# uP2: x坐标乘上P2: (nJoints, nViews, 1, 4)
uP2 = keypoints[:, :, 0].T[:, :, None] * P2
vP2 = keypoints[:, :, 1].T[:, :, None] * P2
conf = keypoints[:, :, 2].T[:, :, None]
Au = conf * (uP2 - P0)
Av = conf * (vP2 - P1)
A = np.hstack([Au, Av])
u, s, v = np.linalg.svd(A)
X = v[:, -1, :]
X = X / X[:, 3:]
# out: (nJoints, 4)
result = np.zeros((keypoints_.shape[1], 4))
result[valid_joint, :3] = X[:, :3]
result[valid_joint, 3] = conf3d #* (conf[..., 0].sum(axis=-1)>min_view)
return result
def remove_outview(kpts2d, out_view, debug):
if len(out_view) == 0:
return False
outv = out_view[0]
if debug:
mywarn('[triangulate] remove outview: {} from {}'.format(outv, out_view))
kpts2d[outv] = 0.
return True
def remove_outjoint(kpts2d, Pall, out_joint, dist_max, min_view=3, debug=False):
if len(out_joint) == 0:
return False
if debug:
mywarn('[triangulate] remove outjoint: {}'.format(out_joint))
for nj in out_joint:
valid = np.where(kpts2d[:, nj, -1] > 0)[0]
if len(valid) < min_view:
# if less than 3 visible view, set these unvisible
kpts2d[:, nj, -1] = 0
continue
if len(valid) > MAX_VIEWS:
# only select max points
conf = -kpts2d[:, nj, -1]
valid = conf.argsort()[:MAX_VIEWS]
index_j, point = robust_triangulate_point(kpts2d[valid, nj:nj+1], Pall[valid], dist_max=dist_max, min_v=3)
index_j = valid[index_j]
# print('select {} for joint {}'.format(index_j, nj))
set0 = np.zeros(kpts2d.shape[0])
set0[index_j] = 1.
kpts2d[:, nj, -1] *= set0
return True
def project_and_distance(kpts3d, RT, kpts2d):
kpts_proj = project_points(kpts3d, RT)
# 1. distance between input and projection
conf = (kpts3d[None, :, -1] > 0) * (kpts2d[:, :, -1] > 0)
dist = np.linalg.norm(kpts_proj[..., :2] - kpts2d[..., :2], axis=-1) * conf
return dist, conf
def log(text):
myprint(text, 'info')
def mywarn(text):
myprint(text, 'warn')
def iterative_triangulate(kpts2d, RT, previous=None,
min_conf=0.1, min_view=3, min_joints=3, dist_max=0.05, dist_vel=0.05,
thres_outlier_view=0.4, thres_outlier_joint=0.4, debug=False):
kpts2d = kpts2d.copy()
conf = kpts2d[..., -1]
kpts2d[conf<min_conf] = 0.
if debug:
log('[triangulate] kpts2d: {}'.format(kpts2d.shape))
# TODO: consider large motion
if previous is not None:
dist, conf = project_and_distance(previous, RT, kpts2d)
nottrack = (dist > dist_vel) & conf
if nottrack.sum() > 0:
kpts2d[nottrack] = 0.
if debug:
log('[triangulate] Remove with track {}'.format(np.where(nottrack)))
while True:
# 0. triangulate and project
kpts3d = batch_triangulate(kpts2d, RT, min_view=min_view)
dist, conf = project_and_distance(kpts3d, RT, kpts2d)
# 2. find the outlier
vv, jj = np.where(dist > dist_max)
if vv.shape[0] < 1:
if debug:
log('[triangulate] Not found outlier, break')
break
ratio_outlier_view = (dist>dist_max).sum(axis=1)/(1e-5 + conf.sum(axis=1))
ratio_outlier_joint = (dist>dist_max).sum(axis=0)/(1e-5 + conf.sum(axis=0))
# 3. find the totally wrong detections
out_view = np.where(ratio_outlier_view > thres_outlier_view)[0]
out_joint = np.where(ratio_outlier_joint > thres_outlier_joint)[0]
if len(out_view) > 1:
dist_view = dist.sum(axis=1)/(1e-5 + conf.sum(axis=1))
out_view = out_view.tolist()
out_view.sort(key=lambda x:-dist_view[x])
if debug: mywarn('[triangulate] Remove outlier view: {}'.format(ratio_outlier_view))
if remove_outview(kpts2d, out_view, debug): continue
if remove_outjoint(kpts2d, RT, out_joint, dist_max, debug=debug): continue
if debug:
log('[triangulate] Directly remove {}, {}'.format(vv, jj))
kpts2d[vv, jj, -1] = 0.
if debug:
log('[triangulate] finally {} valid points'.format((kpts3d[..., -1]>0).sum()))
if (kpts3d[..., -1]>0).sum() < min_joints:
kpts3d[..., -1] = 0.
kpts2d[..., -1] = 0.
return kpts3d, kpts2d
return kpts3d, kpts2d | null |
13,057 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
def skew_op(x):
skew_op = lambda x: np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
res = np.zeros((3, 3), dtype=x.dtype)
# 0, -z, y
res[0, 1] = -x[2, 0]
res[0, 2] = x[1, 0]
# z, 0, -x
res[1, 0] = x[2, 0]
res[1, 2] = -x[0, 0]
# -y, x, 0
res[2, 0] = -x[1, 0]
res[2, 1] = x[0, 0]
return res
def fundamental_op(K0, K1, R_0, T_0, R_1, T_1):
invK0 = np.linalg.inv(K0)
return invK0.T @ (R_0 @ R_1.T) @ K1.T @ skew_op(K1 @ R_1 @ R_0.T @ (T_0 - R_0 @ R_1.T @ T_1)) | null |
13,058 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
The provided code snippet includes necessary dependencies for implementing the `drawlines` function. Write a Python function `def drawlines(img1,img2,lines,pts1,pts2)` to solve the following problem:
img1 - image on which we draw the epilines for the points in img2 lines - corresponding epilines
Here is the function:
def drawlines(img1,img2,lines,pts1,pts2):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c = img1.shape[:2]
for r,pt1,pt2 in zip(lines,pts1,pts2):
pt1 = list(map(lambda x:int(x+0.5), pt1[:2].tolist()))
pt2 = list(map(lambda x:int(x+0.5), pt2[:2].tolist()))
if pt1[0] < 0 or pt1[1] < 0:
continue
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1)
img1 = cv2.circle(img1,tuple(pt1),5,color,-1)
img2 = cv2.circle(img2,tuple(pt2),5,color,-1)
return img1,img2 | img1 - image on which we draw the epilines for the points in img2 lines - corresponding epilines |
13,059 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
def check_cluster(affinity, row, views, dimGroups, indices, p2dAssigned, visited):
affinity_row = affinity[row].copy()
# given affinity and row, select the combine of all possible set
cluster = np.where((affinity[row]>0)&(p2dAssigned==-1)&(visited==0))[0].tolist()
cluster.sort(key=lambda x:-affinity[row, x])
views_ = views[cluster]
view_count = np.bincount(views[cluster])
indices_all = [indices]
for col in cluster:
v = views[col]
nOld = len(indices_all)
if indices[v] != -1: # already assigned, copy and make new
for i in range(nOld):
ind = indices_all[i].copy()
ind[v] = col
indices_all.append(ind)
else: # not assigned, assign
for i in range(nOld):
indices_all[i][v] = col
return indices_all | null |
13,060 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
def views_from_dimGroups(dimGroups):
views = np.zeros(dimGroups[-1], dtype=np.int)
for nv in range(len(dimGroups) - 1):
views[dimGroups[nv]:dimGroups[nv+1]] = nv
return views | null |
13,061 | import numpy as np
import cv2
from easymocap.datasets.base import crop_image
from easymocap.estimator.wrapper_base import bbox_from_keypoints
from easymocap.mytools.vis_base import merge, plot_keypoints_auto
from .debug_utils import log, mywarn, myerror
def SimpleConstrain(dimGroups):
class SimpleMatchAndTriangulator(SimpleTriangulator):
def __init__(self, num_joints, min_views, min_joints, cfg_svt, cfg_track, **cfg) -> None:
def log(self, text):
def warn(self, text):
def distance_by_epipolar(pts0, pts1, K0, K1, R0, T0, R1, T1):
def _simple_associate2d_triangulate(self, data, affinity, dimGroups, prev_id):
def calculate_affinity_MxM(dims, dimGroups, data, key, DIST_MAX):
def _calculate_affinity_MxM(self, dims, dimGroups, data, key):
def _calculate_affinity_MxN(self, dims, dimGroups, data, key, results):
def _svt_optimize_affinity(self, affinity, dimGroups):
def _track_add(self, res):
def _track_update(self, res, pid):
def _track_merge(self, res, pid):
def _track_and_update(self, data, results):
def check_dist(k3d_check):
def __call__(self, data):
def simple_match(data):
key = 'keypoints2d'
dims = [d.shape[0] for d in data[key]]
dimGroups = np.cumsum([0] + dims)
affinity = SimpleMatchAndTriangulator.calculate_affinity_MxM(dims, dimGroups, data, key, DIST_MAX=0.1)
import pymatchlr
observe = np.ones_like(affinity)
cfg_svt = {
'debug': 1,
'maxIter': 10,
'w_sparse': 0.1,
'w_rank': 50,
'tol': 0.0001,
'aff_min': 0.3,
}
affinity = pymatchlr.matchSVT(affinity, dimGroups, SimpleConstrain(dimGroups), observe, cfg_svt)
return affinity, dimGroups | null |
13,062 | import cv2
import numpy as np
from ..mytools.file_utils import write_common_results
def write_common_results(dumpname=None, results=[], keys=[], fmt='%2.3f'):
def encode_detect(data):
res = write_common_results(None, data, ['keypoints3d'])
res = res.replace('\r', '').replace('\n', '').replace(' ', '')
return res.encode('ascii') | null |
13,063 | import cv2
import numpy as np
from ..mytools.file_utils import write_common_results
def write_common_results(dumpname=None, results=[], keys=[], fmt='%2.3f'):
format_out = {'float_kind':lambda x: fmt % x}
out_text = []
out_text.append('[\n')
for idata, data in enumerate(results):
out_text.append(' {\n')
output = {}
output['id'] = data['id']
for k in ['type']:
if k in data.keys():output[k] = '\"{}\"'.format(data[k])
keys_current = [k for k in keys if k in data.keys()]
for key in keys_current:
# BUG: This function will failed if the rows of the data[key] is too large
# output[key] = np.array2string(data[key], max_line_width=1000, separator=', ', formatter=format_out)
output[key] = myarray2string(data[key], separator=', ', fmt=fmt)
for key in output.keys():
out_text.append(' \"{}\": {}'.format(key, output[key]))
if key != keys_current[-1]:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(' }')
if idata != len(results) - 1:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(']\n')
if dumpname is not None:
mkout(dumpname)
with open(dumpname, 'w') as f:
f.writelines(out_text)
else:
return ''.join(out_text)
def encode_smpl(data):
res = write_common_results(None, data, ['poses', 'shapes', 'expression', 'Rh', 'Th'])
res = res.replace('\r', '').replace('\n', '').replace(' ', '')
return res.encode('ascii') | null |
13,064 | import cv2
import numpy as np
from ..mytools.file_utils import write_common_results
def encode_image(image):
fourcc = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
#frame을 binary 형태로 변환 jpg로 decoding
result, img_encode = cv2.imencode('.jpg', image, fourcc)
data = np.array(img_encode) # numpy array로 안바꿔주면 ERROR
stringData = data.tostring()
return stringData | null |
13,065 | import socket
import time
from threading import Thread
from queue import Queue
def log(x):
from datetime import datetime
time_now = datetime.now().strftime("%m-%d-%H:%M:%S.%f ")
print(time_now + x) | null |
13,066 | import open3d as o3d
from ..config import load_object
from ..visualize.o3dwrapper import Vector3dVector, create_mesh, load_mesh
from ..mytools import Timer
from ..mytools.vis_base import get_rgb_01
from .base import BaseSocket, log
import json
import numpy as np
from os.path import join
import os
from ..assignment.criterion import CritRange
import copy
rotate = False
def o3d_callback_rotate(vis=None):
global rotate
rotate = not rotate
return False | null |
13,067 | import torch
import torch.nn as nn
from .lbs import batch_rodrigues
from .lbs import lbs, dqs
import os.path as osp
import pickle
import numpy as np
import os
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype) | null |
13,068 | import torch
import torch.nn as nn
from .lbs import batch_rodrigues
from .lbs import lbs, dqs
import os.path as osp
import pickle
import numpy as np
import os
def to_tensor(array, dtype=torch.float32, device=torch.device('cpu')):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype).to(device)
else:
return array.to(device)
def load_regressor(regressor_path):
if regressor_path.endswith('.npy'):
X_regressor = to_tensor(np.load(regressor_path))
elif regressor_path.endswith('.txt'):
data = np.loadtxt(regressor_path)
with open(regressor_path, 'r') as f:
shape = f.readline().split()[1:]
reg = np.zeros((int(shape[0]), int(shape[1])))
for i, j, v in data:
reg[int(i), int(j)] = v
X_regressor = to_tensor(reg)
else:
import ipdb; ipdb.set_trace()
return X_regressor | null |
13,069 | import torch
import torch.nn as nn
from .lbs import batch_rodrigues
from .lbs import lbs, dqs
import os.path as osp
import pickle
import numpy as np
import os
def load_bodydata(model_type, model_path, gender):
if osp.isdir(model_path):
model_fn = '{}_{}.{ext}'.format(model_type.upper(), gender.upper(), ext='pkl')
smpl_path = osp.join(model_path, model_fn)
else:
smpl_path = model_path
assert osp.exists(smpl_path), 'Path {} does not exist!'.format(
smpl_path)
with open(smpl_path, 'rb') as smpl_file:
data = pickle.load(smpl_file, encoding='latin1')
return data | null |
13,070 | import numpy as np
from os.path import join
def merge_params(param_list, share_shape=True):
output = {}
for key in ['poses', 'shapes', 'Rh', 'Th', 'expression']:
if key in param_list[0].keys():
output[key] = np.vstack([v[key] for v in param_list])
if share_shape:
output['shapes'] = output['shapes'].mean(axis=0, keepdims=True)
return output | null |
13,071 | import numpy as np
from os.path import join
def select_nf(params_all, nf):
output = {}
for key in ['poses', 'Rh', 'Th']:
output[key] = params_all[key][nf:nf+1, :]
if 'expression' in params_all.keys():
output['expression'] = params_all['expression'][nf:nf+1, :]
if params_all['shapes'].shape[0] == 1:
output['shapes'] = params_all['shapes']
else:
output['shapes'] = params_all['shapes'][nf:nf+1, :]
return output | null |
13,072 | import numpy as np
from os.path import join
class SMPLlayer(nn.Module):
def __init__(self, model_path, model_type='smpl', gender='neutral', device=None,
regressor_path=None,
use_pose_blending=True, use_shape_blending=True, use_joints=True,
with_color=False, use_lbs=True,
**kwargs) -> None:
super(SMPLlayer, self).__init__()
dtype = torch.float32
self.dtype = dtype
self.use_pose_blending = use_pose_blending
self.use_shape_blending = use_shape_blending
self.use_joints = use_joints
if isinstance(device, str):
device = torch.device(device)
self.device = device
self.model_type = model_type
self.NUM_POSES = NUM_POSES[model_type]
# create the SMPL model
if use_lbs:
self.lbs = lbs
else:
self.lbs = dqs
data = load_bodydata(model_type, model_path, gender)
if with_color:
self.color = data['vertex_colors']
else:
self.color = None
self.faces = data['f']
self.register_buffer('faces_tensor',
to_tensor(to_np(self.faces, dtype=np.int64),
dtype=torch.long))
for key in ['J_regressor', 'v_template', 'weights']:
val = to_tensor(to_np(data[key]), dtype=dtype)
self.register_buffer(key, val)
# add poseblending
if use_pose_blending:
# Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207
num_pose_basis = data['posedirs'].shape[-1]
# 207 x 20670
posedirs = data['posedirs']
data['posedirs'] = np.reshape(data['posedirs'], [-1, num_pose_basis]).T
val = to_tensor(to_np(data['posedirs']), dtype=dtype)
self.register_buffer('posedirs', val)
else:
self.posedirs = None
# add shape blending
if use_shape_blending:
val = to_tensor(to_np(data['shapedirs']), dtype=dtype)
self.register_buffer('shapedirs', val)
else:
self.shapedirs = None
if use_shape_blending:
self.J_shaped = None
else:
val = to_tensor(to_np(data['J']), dtype=dtype)
self.register_buffer('J_shaped', val)
self.nVertices = self.v_template.shape[0]
# indices of parents for each joints
parents = to_tensor(to_np(data['kintree_table'][0])).long()
parents[0] = -1
self.register_buffer('parents', parents)
if self.use_shape_blending:
if self.model_type == 'smplx':
# shape
self.num_expression_coeffs = 10
self.num_shapes = 10
self.shapedirs = self.shapedirs[:, :, :self.num_shapes+self.num_expression_coeffs]
elif self.model_type in ['smpl', 'smplh']:
self.shapedirs = self.shapedirs[:, :, :NUM_SHAPES]
# joints regressor
if regressor_path is not None and use_joints:
X_regressor = load_regressor(regressor_path)
X_regressor = torch.cat((self.J_regressor, X_regressor), dim=0)
j_J_regressor = torch.zeros(self.J_regressor.shape[0], X_regressor.shape[0], device=device)
for i in range(self.J_regressor.shape[0]):
j_J_regressor[i, i] = 1
j_v_template = X_regressor @ self.v_template
#
# (25, 24)
j_weights = X_regressor @ self.weights
if self.use_pose_blending:
j_posedirs = torch.einsum('ab, bde->ade', [X_regressor, torch.Tensor(posedirs)]).numpy()
j_posedirs = np.reshape(j_posedirs, [-1, num_pose_basis]).T
j_posedirs = to_tensor(j_posedirs)
self.register_buffer('j_posedirs', j_posedirs)
else:
self.j_posedirs = None
if self.use_shape_blending:
j_shapedirs = torch.einsum('vij,kv->kij', [self.shapedirs, X_regressor])
self.register_buffer('j_shapedirs', j_shapedirs)
else:
self.j_shapedirs = None
self.register_buffer('j_weights', j_weights)
self.register_buffer('j_v_template', j_v_template)
self.register_buffer('j_J_regressor', j_J_regressor)
if self.model_type == 'smplh':
# load smplh data
self.num_pca_comps = kwargs['num_pca_comps']
from os.path import join
for key in ['LEFT', 'RIGHT']:
left_file = join(kwargs['mano_path'], 'MANO_{}.pkl'.format(key))
with open(left_file, 'rb') as f:
data = pickle.load(f, encoding='latin1')
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
self.register_buffer('mHandsMean'+key[0], val)
val = to_tensor(to_np(data['hands_components'][:self.num_pca_comps, :]), dtype=dtype)
self.register_buffer('mHandsComponents'+key[0], val)
self.use_pca = kwargs['use_pca']
self.use_flat_mean = kwargs['use_flat_mean']
if self.use_pca:
self.NUM_POSES = 66 + self.num_pca_comps * 2
else:
self.NUM_POSES = 66 + 15 * 3 * 2
elif self.model_type == 'mano':
self.num_pca_comps = kwargs['num_pca_comps']
self.use_pca = kwargs['use_pca']
self.use_flat_mean = kwargs['use_flat_mean']
if self.use_pca:
self.NUM_POSES = self.num_pca_comps + 3
else:
self.NUM_POSES = 45 + 3
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
self.register_buffer('mHandsMean', val)
val = to_tensor(to_np(data['hands_components'][:self.num_pca_comps, :]), dtype=dtype)
self.register_buffer('mHandsComponents', val)
elif self.model_type == 'smplx':
# hand pose
self.num_pca_comps = 6
from os.path import join
for key in ['Ll', 'Rr']:
val = to_tensor(to_np(data['hands_mean'+key[1]].reshape(1, -1)), dtype=dtype)
self.register_buffer('mHandsMean'+key[0], val)
val = to_tensor(to_np(data['hands_components'+key[1]][:self.num_pca_comps, :]), dtype=dtype)
self.register_buffer('mHandsComponents'+key[0], val)
self.use_pca = True
self.use_flat_mean = True
self.to(self.device)
def extend_hand(poses, use_pca, use_flat_mean, coeffs, mean):
if use_pca:
poses = poses @ coeffs
if not use_flat_mean:
poses = poses + mean
return poses
def extend_pose(self, poses):
# skip SMPL or already extend
if self.model_type not in ['smplh', 'smplx', 'mano']:
return poses
elif self.model_type == 'smplh' and poses.shape[-1] == 156 and self.use_flat_mean:
return poses
elif self.model_type == 'smplx' and poses.shape[-1] == 165 and self.use_flat_mean:
return poses
elif self.model_type == 'mano' and poses.shape[-1] == 48 and self.use_flat_mean:
return poses
# skip mano
if self.model_type == 'mano':
poses_hand = self.extend_hand(poses[..., 3:], self.use_pca, self.use_flat_mean,
self.mHandsComponents, self.mHandsMean)
poses = torch.cat([poses[..., :3], poses_hand], dim=-1)
return poses
NUM_BODYJOINTS = 22 * 3
if self.use_pca:
NUM_HANDJOINTS = self.num_pca_comps
else:
NUM_HANDJOINTS = 15 * 3
NUM_FACEJOINTS = 3 * 3
poses_lh = poses[:, NUM_BODYJOINTS:NUM_BODYJOINTS + NUM_HANDJOINTS]
poses_rh = poses[:, NUM_BODYJOINTS + NUM_HANDJOINTS:NUM_BODYJOINTS+NUM_HANDJOINTS*2]
if self.use_pca:
poses_lh = poses_lh @ self.mHandsComponentsL
poses_rh = poses_rh @ self.mHandsComponentsR
if not self.use_flat_mean:
poses_lh = poses_lh + self.mHandsMeanL
poses_rh = poses_rh + self.mHandsMeanR
if self.model_type == 'smplh':
poses = torch.cat([poses[:, :NUM_BODYJOINTS], poses_lh, poses_rh], dim=1)
elif self.model_type == 'smplx':
# the head part have only three joints
# poses_head: (N, 9), jaw_pose, leye_pose, reye_pose respectively
poses_head = poses[:, NUM_BODYJOINTS+NUM_HANDJOINTS*2:]
# body, head, left hand, right hand
poses = torch.cat([poses[:, :NUM_BODYJOINTS], poses_head, poses_lh, poses_rh], dim=1)
return poses
def get_root(self, poses, shapes, return_tensor=False):
if 'torch' not in str(type(poses)):
dtype, device = self.dtype, self.device
poses = to_tensor(poses, dtype, device)
shapes = to_tensor(shapes, dtype, device)
vertices, joints = lbs(shapes, poses, self.v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.weights, pose2rot=True, dtype=self.dtype, only_shape=True)
# N x 3
j0 = joints[:, 0, :]
if not return_tensor:
j0 = j0.detach().cpu().numpy()
return j0
def convert_from_standard_smpl(self, poses, shapes, Rh=None, Th=None, expression=None):
if 'torch' not in str(type(poses)):
dtype, device = self.dtype, self.device
poses = to_tensor(poses, dtype, device)
shapes = to_tensor(shapes, dtype, device)
Rh = to_tensor(Rh, dtype, device)
Th = to_tensor(Th, dtype, device)
if expression is not None:
expression = to_tensor(expression, dtype, device)
bn = poses.shape[0]
# process shapes
if shapes.shape[0] < bn:
shapes = shapes.expand(bn, -1)
vertices, joints = lbs(shapes, poses, self.v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.weights, pose2rot=True, dtype=self.dtype, only_shape=True)
# N x 3
j0 = joints[:, 0, :]
Rh = poses[:, :3].clone()
# N x 3 x 3
rot = batch_rodrigues(Rh)
Tnew = Th + j0 - torch.einsum('bij,bj->bi', rot, j0)
poses[:, :3] = 0
res = dict(poses=poses.detach().cpu().numpy(),
shapes=shapes.detach().cpu().numpy(),
Rh=Rh.detach().cpu().numpy(),
Th=Tnew.detach().cpu().numpy()
)
return res
def full_poses(self, poses):
if 'torch' not in str(type(poses)):
dtype, device = self.dtype, self.device
poses = to_tensor(poses, dtype, device)
poses = self.extend_pose(poses)
return poses.detach().cpu().numpy()
def forward(self, poses, shapes, Rh=None, Th=None, expression=None,
v_template=None,
return_verts=True, return_tensor=True, return_smpl_joints=False,
only_shape=False, pose2rot=True, **kwargs):
""" Forward pass for SMPL model
Args:
poses (n, 72)
shapes (n, 10)
Rh (n, 3): global orientation
Th (n, 3): global translation
return_verts (bool, optional): if True return (6890, 3). Defaults to False.
"""
if 'torch' not in str(type(poses)):
dtype, device = self.dtype, self.device
poses = to_tensor(poses, dtype, device)
shapes = to_tensor(shapes, dtype, device)
if Rh is not None:
Rh = to_tensor(Rh, dtype, device)
if Th is not None:
Th = to_tensor(Th, dtype, device)
if expression is not None:
expression = to_tensor(expression, dtype, device)
bn = poses.shape[0]
# process Rh, Th
if Rh is None:
Rh = torch.zeros(bn, 3, device=poses.device)
if Th is None:
Th = torch.zeros(bn, 3, device=poses.device)
if len(Rh.shape) == 2: # angle-axis
rot = batch_rodrigues(Rh)
else:
rot = Rh
transl = Th.unsqueeze(dim=1)
# process shapes
if shapes.shape[0] < bn:
shapes = shapes.expand(bn, -1)
if expression is not None and self.model_type == 'smplx':
shapes = torch.cat([shapes, expression], dim=1)
# process poses
if pose2rot: # if given rotation matrix, no need for this
poses = self.extend_pose(poses)
if return_verts or not self.use_joints:
if v_template is None:
v_template = self.v_template
vertices, joints = self.lbs(shapes, poses, v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.weights, pose2rot=pose2rot, dtype=self.dtype,
use_pose_blending=self.use_pose_blending, use_shape_blending=self.use_shape_blending, J_shaped=self.J_shaped)
if not self.use_joints and not return_verts:
vertices = joints
else:
vertices, joints = self.lbs(shapes, poses, self.j_v_template,
self.j_shapedirs, self.j_posedirs,
self.j_J_regressor, self.parents,
self.j_weights, pose2rot=pose2rot, dtype=self.dtype, only_shape=only_shape,
use_pose_blending=self.use_pose_blending, use_shape_blending=self.use_shape_blending, J_shaped=self.J_shaped)
if return_smpl_joints:
vertices = vertices[:, :self.J_regressor.shape[0], :]
else:
vertices = vertices[:, self.J_regressor.shape[0]:, :]
vertices = torch.matmul(vertices, rot.transpose(1, 2)) + transl
if not return_tensor:
vertices = vertices.detach().cpu().numpy()
return vertices
def init_params(self, nFrames=1, nShapes=1, ret_tensor=False):
params = {
'poses': np.zeros((nFrames, self.NUM_POSES)),
'shapes': np.zeros((nShapes, NUM_SHAPES)),
'Rh': np.zeros((nFrames, 3)),
'Th': np.zeros((nFrames, 3)),
}
if self.model_type == 'smplx':
params['expression'] = np.zeros((nFrames, NUM_EXPR))
if ret_tensor:
for key in params.keys():
params[key] = to_tensor(params[key], self.dtype, self.device)
return params
def check_params(self, body_params):
model_type = self.model_type
nFrames = body_params['poses'].shape[0]
if body_params['poses'].shape[1] != self.NUM_POSES:
body_params['poses'] = np.hstack((body_params['poses'], np.zeros((nFrames, self.NUM_POSES - body_params['poses'].shape[1]))))
if model_type == 'smplx' and 'expression' not in body_params.keys():
body_params['expression'] = np.zeros((nFrames, NUM_EXPR))
return body_params
def merge_params(param_list, share_shape=True):
output = {}
for key in ['poses', 'shapes', 'Rh', 'Th', 'expression']:
if key in param_list[0].keys():
output[key] = np.vstack([v[key] for v in param_list])
if share_shape:
output['shapes'] = output['shapes'].mean(axis=0, keepdims=True)
# add other keys
for key in param_list[0].keys():
if key in output.keys():
continue
output[key] = np.stack([v[key] for v in param_list])
return output
def select_nf(params_all, nf):
output = {}
for key in ['poses', 'Rh', 'Th']:
output[key] = params_all[key][nf:nf+1, :]
if 'expression' in params_all.keys():
output['expression'] = params_all['expression'][nf:nf+1, :]
if params_all['shapes'].shape[0] == 1:
output['shapes'] = params_all['shapes']
else:
output['shapes'] = params_all['shapes'][nf:nf+1, :]
return output
def load_model(gender='neutral', use_cuda=True, model_type='smpl', skel_type='body25', device=None, model_path='data/smplx'):
# prepare SMPL model
# print('[Load model {}/{}]'.format(model_type, gender))
import torch
if device is None:
if use_cuda and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
from .body_model import SMPLlayer
if model_type == 'smpl':
if skel_type == 'body25':
reg_path = join(model_path, 'J_regressor_body25.npy')
elif skel_type == 'h36m':
reg_path = join(model_path, 'J_regressor_h36m.npy')
else:
raise NotImplementedError
body_model = SMPLlayer(join(model_path, 'smpl'), gender=gender, device=device,
regressor_path=reg_path)
elif model_type == 'smplh':
body_model = SMPLlayer(join(model_path, 'smplh/SMPLH_MALE.pkl'), model_type='smplh', gender=gender, device=device,
regressor_path=join(model_path, 'J_regressor_body25_smplh.txt'))
elif model_type == 'smplx':
body_model = SMPLlayer(join(model_path, 'smplx/SMPLX_{}.pkl'.format(gender.upper())), model_type='smplx', gender=gender, device=device,
regressor_path=join(model_path, 'J_regressor_body25_smplx.txt'))
elif model_type == 'manol' or model_type == 'manor':
lr = {'manol': 'LEFT', 'manor': 'RIGHT'}
body_model = SMPLlayer(join(model_path, 'smplh/MANO_{}.pkl'.format(lr[model_type])), model_type='mano', gender=gender, device=device,
regressor_path=join(model_path, 'J_regressor_mano_{}.txt'.format(lr[model_type])))
else:
body_model = None
body_model.to(device)
return body_model | null |
13,073 | import numpy as np
from os.path import join
def check_keypoints(keypoints2d, WEIGHT_DEBUFF=1, min_conf=0.3):
# keypoints2d: nFrames, nJoints, 3
#
# wrong feet
# if keypoints2d.shape[-2] > 25 + 42:
# keypoints2d[..., 0, 2] = 0
# keypoints2d[..., [15, 16, 17, 18], -1] = 0
# keypoints2d[..., [19, 20, 21, 22, 23, 24], -1] /= 2
if keypoints2d.shape[-2] > 25:
# set the hand keypoints
keypoints2d[..., 25, :] = keypoints2d[..., 7, :]
keypoints2d[..., 46, :] = keypoints2d[..., 4, :]
keypoints2d[..., 25:, -1] *= WEIGHT_DEBUFF
# reduce the confidence of hand and face
MIN_CONF = min_conf
conf = keypoints2d[..., -1]
conf[conf<MIN_CONF] = 0
return keypoints2d | null |
13,074 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
The provided code snippet includes necessary dependencies for implementing the `find_dynamic_lmk_idx_and_bcoords` function. Write a Python function `def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx, dynamic_lmk_b_coords, neck_kin_chain, dtype=torch.float32)` to solve the following problem:
Compute the faces, barycentric coordinates for the dynamic landmarks To do so, we first compute the rotation of the neck around the y-axis and then use a pre-computed look-up table to find the faces and the barycentric coordinates that will be used. Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) for providing the original TensorFlow implementation and for the LUT. Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices pose: torch.tensor Bx(Jx3), dtype = torch.float32 The current pose of the body model dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long The look-up table from neck rotation to faces dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 The look-up table from neck rotation to barycentric coordinates neck_kin_chain: list A python list that contains the indices of the joints that form the kinematic chain of the neck. dtype: torch.dtype, optional Returns ------- dyn_lmk_faces_idx: torch.tensor, dtype = torch.long A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks.
Here is the function:
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=vertices.device,
dtype=dtype).unsqueeze_(dim=0)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords | Compute the faces, barycentric coordinates for the dynamic landmarks To do so, we first compute the rotation of the neck around the y-axis and then use a pre-computed look-up table to find the faces and the barycentric coordinates that will be used. Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) for providing the original TensorFlow implementation and for the LUT. Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices pose: torch.tensor Bx(Jx3), dtype = torch.float32 The current pose of the body model dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long The look-up table from neck rotation to faces dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 The look-up table from neck rotation to barycentric coordinates neck_kin_chain: list A python list that contains the indices of the joints that form the kinematic chain of the neck. dtype: torch.dtype, optional Returns ------- dyn_lmk_faces_idx: torch.tensor, dtype = torch.long A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. |
13,075 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `vertices2landmarks` function. Write a Python function `def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords)` to solve the following problem:
Calculates landmarks by barycentric interpolation Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices faces: torch.tensor Fx3, dtype = torch.long The faces of the mesh lmk_faces_idx: torch.tensor L, dtype = torch.long The tensor with the indices of the faces used to calculate the landmarks. lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32 The tensor of barycentric coordinates that are used to interpolate the landmarks Returns ------- landmarks: torch.tensor BxLx3, dtype = torch.float32 The coordinates of the landmarks for each mesh in the batch
Here is the function:
def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
batch_size, -1, 3)
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks | Calculates landmarks by barycentric interpolation Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices faces: torch.tensor Fx3, dtype = torch.long The faces of the mesh lmk_faces_idx: torch.tensor L, dtype = torch.long The tensor with the indices of the faces used to calculate the landmarks. lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32 The tensor of barycentric coordinates that are used to interpolate the landmarks Returns ------- landmarks: torch.tensor BxLx3, dtype = torch.float32 The coordinates of the landmarks for each mesh in the batch |
13,076 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.view(-1, 3, 3),
rel_joints.contiguous().view(-1, 3, 1)).view(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
The provided code snippet includes necessary dependencies for implementing the `lbs` function. Write a Python function `def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False, use_shape_blending=True, use_pose_blending=True, J_shaped=None)` to solve the following problem:
Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model
Here is the function:
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False,
use_shape_blending=True, use_pose_blending=True, J_shaped=None):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
if use_shape_blending:
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
else:
v_shaped = v_template.unsqueeze(0).expand(batch_size, -1, -1)
assert J_shaped is not None
J = J_shaped[None].expand(batch_size, -1, -1)
if only_shape:
return v_shaped, J
# 3. Add pose blend shapes
# N x J x 3 x 3
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
else:
rot_mats = pose.view(batch_size, -1, 3, 3)
if use_pose_blending:
ident = torch.eye(3, dtype=dtype, device=device)
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
else:
v_posed = v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed | Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model |
13,077 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.view(-1, 3, 3),
rel_joints.contiguous().view(-1, 3, 1)).view(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
def batch_dqs_blending(A,W,Vs):
Bnum,Jnum,_,_=A.shape
_,Vnum,_=W.shape
A = A.view(Bnum*Jnum,4,4)
Rs=A[:,:3,:3]
ws=torch.sqrt(torch.clamp(Rs[:,0,0]+Rs[:,1,1]+Rs[:,2,2]+1.,min=1.e-6))/2.
xs=(Rs[:,2,1]-Rs[:,1,2])/(4.*ws)
ys=(Rs[:,0,2]-Rs[:,2,0])/(4.*ws)
zs=(Rs[:,1,0]-Rs[:,0,1])/(4.*ws)
Ts=A[:,:3,3]
vDw=-0.5*( Ts[:,0]*xs + Ts[:,1]*ys + Ts[:,2]*zs)
vDx=0.5*( Ts[:,0]*ws + Ts[:,1]*zs - Ts[:,2]*ys)
vDy=0.5*(-Ts[:,0]*zs + Ts[:,1]*ws + Ts[:,2]*xs)
vDz=0.5*( Ts[:,0]*ys - Ts[:,1]*xs + Ts[:,2]*ws)
b0=W.unsqueeze(-2)@torch.cat([ws[:,None],xs[:,None],ys[:,None],zs[:,None]],dim=-1).reshape(Bnum, 1, Jnum, 4) #B,V,J,4
be=W.unsqueeze(-2)@torch.cat([vDw[:,None],vDx[:,None],vDy[:,None],vDz[:,None]],dim=-1).reshape(Bnum, 1, Jnum, 4) #B,V,J,4
b0 = b0.reshape(-1, 4)
be = be.reshape(-1, 4)
ns=torch.norm(b0,dim=-1,keepdim=True)
be=be/ns
b0=b0/ns
Vs=Vs.view(Bnum*Vnum,3)
Vs=Vs+2.*b0[:,1:].cross(b0[:,1:].cross(Vs)+b0[:,:1]*Vs)+2.*(b0[:,:1]*be[:,1:]-be[:,:1]*b0[:,1:]+b0[:,1:].cross(be[:,1:]))
return Vs.reshape(Bnum,Vnum,3)
The provided code snippet includes necessary dependencies for implementing the `dqs` function. Write a Python function `def dqs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False, use_shape_blending=True, use_pose_blending=True, J_shaped=None)` to solve the following problem:
Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model
Here is the function:
def dqs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False,
use_shape_blending=True, use_pose_blending=True, J_shaped=None):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
if use_shape_blending:
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
else:
v_shaped = v_template.unsqueeze(0).expand(batch_size, -1, -1)
assert J_shaped is not None
J = J_shaped[None].expand(batch_size, -1, -1)
if only_shape:
return v_shaped, J
# 3. Add pose blend shapes
# N x J x 3 x 3
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
else:
rot_mats = pose.view(batch_size, -1, 3, 3)
if use_pose_blending:
ident = torch.eye(3, dtype=dtype, device=device)
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
else:
v_posed = v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
verts=batch_dqs_blending(A,W,v_posed)
return verts, J_transformed | Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model |
13,078 | import numpy as np
import cv2
from ..dataset.config import CONFIG
from ..config import load_object
from ..mytools.debug_utils import log, mywarn, myerror
import torch
from tqdm import tqdm, trange
def svd_rot(src, tgt, reflection=False, debug=False):
# optimum rotation matrix of Y
A = np.matmul(src.transpose(0, 2, 1), tgt)
U, s, Vt = np.linalg.svd(A, full_matrices=False)
V = Vt.transpose(0, 2, 1)
T = np.matmul(V, U.transpose(0, 2, 1))
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
V[have_reflection, :, -1] *= -1
s[have_reflection, -1] *= -1
T = np.matmul(V, U.transpose(0, 2, 1))
if debug:
err = np.linalg.norm(tgt - src @ T.T, axis=1)
print('[svd] ', err)
return T | null |
13,079 | import numpy as np
import cv2
from ..dataset.config import CONFIG
from ..config import load_object
from ..mytools.debug_utils import log, mywarn, myerror
import torch
from tqdm import tqdm, trange
def batch_invRodrigues(rot):
res = []
for r in rot:
v = cv2.Rodrigues(r)[0]
res.append(v)
res = np.stack(res)
return res[:, :, 0] | null |
13,080 | import numpy as np
import torch.nn as nn
import torch
from ..bodymodel.lbs import batch_rodrigues
class GMoF(nn.Module):
def __init__(self, rho=1):
def extra_repr(self):
def forward(self, est, gt=None, conf=None):
def make_loss(norm, norm_info, reduce='sum'):
reduce = torch.sum if reduce=='sum' else torch.mean
if norm == 'l2':
def loss(est, gt=None, conf=None):
if gt is not None:
square_diff = reduce((est - gt)**2, dim=-1)
else:
square_diff = reduce(est**2, dim=-1)
if conf is not None:
res = torch.sum(square_diff * conf)/(1e-5 + conf.sum())
else:
res = square_diff.sum()/square_diff.numel()
return res
elif norm == 'gm':
loss = GMoF(norm_info)
return loss | null |
13,081 | import numpy as np
import torch.nn as nn
import torch
from ..bodymodel.lbs import batch_rodrigues
def select(value, ranges, index, dim):
if len(ranges) > 0:
if ranges[1] == -1:
value = value[..., ranges[0]:]
else:
value = value[..., ranges[0]:ranges[1]]
return value
if len(index) > 0:
if dim == -1:
value = value[..., index]
elif dim == -2:
value = value[..., index, :]
return value
return value | null |
13,082 | import numpy as np
import torch.nn as nn
import torch
from ..bodymodel.lbs import batch_rodrigues
def print_table(header, contents):
from tabulate import tabulate
length = len(contents[0])
tables = [[] for _ in range(length)]
mean = ['Mean']
for icnt, content in enumerate(contents):
for i in range(length):
if isinstance(content[i], float):
tables[i].append('{:6.2f}'.format(content[i]))
else:
tables[i].append('{}'.format(content[i]))
if icnt > 0:
mean.append('{:6.2f}'.format(sum(content)/length))
tables.append(mean)
print(tabulate(tables, header, tablefmt='fancy_grid')) | null |
13,083 | import numpy as np
import torch
from ..dataset.mirror import flipPoint2D, flipSMPLPoses, flipSMPLParams
from ..estimator.wrapper_base import bbox_from_keypoints
from .lossbase import Keypoints2D
The provided code snippet includes necessary dependencies for implementing the `calc_vanishpoint` function. Write a Python function `def calc_vanishpoint(keypoints2d)` to solve the following problem:
keypoints2d: (2, N, 3)
Here is the function:
def calc_vanishpoint(keypoints2d):
'''
keypoints2d: (2, N, 3)
'''
# weight: (N, 1)
weight = keypoints2d[:, :, 2:].mean(axis=0)
conf = weight.mean()
A = np.hstack([
keypoints2d[1, :, 1:2] - keypoints2d[0, :, 1:2],
-(keypoints2d[1, :, 0:1] - keypoints2d[0, :, 0:1])
])
b = -keypoints2d[0, :, 0:1]*(keypoints2d[1, :, 1:2] - keypoints2d[0, :, 1:2]) \
+ keypoints2d[0, :, 1:2] * (keypoints2d[1, :, 0:1] - keypoints2d[0, :, 0:1])
b = -b
A = A * weight
b = b * weight
avgInsec = np.linalg.inv(A.T @ A) @ (A.T @ b)
result = np.zeros(3)
result[0] = avgInsec[0, 0]
result[1] = avgInsec[1, 0]
result[2] = 1
return result | keypoints2d: (2, N, 3) |
13,084 | import pickle
import os
from os.path import join
import numpy as np
import torch
from .lossbase import LossBase
The provided code snippet includes necessary dependencies for implementing the `create_prior_from_cmu` function. Write a Python function `def create_prior_from_cmu(n_gaussians, epsilon=1e-15)` to solve the following problem:
Load the gmm from the CMU motion database.
Here is the function:
def create_prior_from_cmu(n_gaussians, epsilon=1e-15):
"""Load the gmm from the CMU motion database."""
from os.path import dirname
np_dtype = np.float32
with open(join(dirname(__file__), 'gmm_%02d.pkl'%(n_gaussians)), 'rb') as f:
gmm = pickle.load(f, encoding='latin1')
if True:
means = gmm['means'].astype(np_dtype)
covs = gmm['covars'].astype(np_dtype)
weights = gmm['weights'].astype(np_dtype)
precisions = [np.linalg.inv(cov) for cov in covs]
precisions = np.stack(precisions).astype(np_dtype)
sqrdets = np.array([(np.sqrt(np.linalg.det(c)))
for c in gmm['covars']])
const = (2 * np.pi)**(69 / 2.)
nll_weights = np.asarray(gmm['weights'] / (const * (sqrdets / sqrdets.min())))
cov_dets = [np.log(np.linalg.det(cov.astype(np_dtype)) + epsilon)
for cov in covs]
return {
'means': means,
'covs': covs,
'precisions': precisions,
'nll_weights': -np.log(nll_weights[None]),
'weights': weights,
'pi_term': np.log(2*np.pi),
'cov_dets': cov_dets
} | Load the gmm from the CMU motion database. |
13,085 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
The provided code snippet includes necessary dependencies for implementing the `batch_rodrigues` function. Write a Python function `def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32)` to solve the following problem:
Calculates the rotation matrices for a batch of rotation vectors Parameters ---------- rot_vecs: torch.tensor Nx3 array of N axis-angle vectors Returns ------- R: torch.tensor Nx3x3 The rotation matrices for the given axis-angle parameters
Here is the function:
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat | Calculates the rotation matrices for a batch of rotation vectors Parameters ---------- rot_vecs: torch.tensor Nx3 array of N axis-angle vectors Returns ------- R: torch.tensor Nx3x3 The rotation matrices for the given axis-angle parameters |
13,086 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def aa2euler(aa):
aa = np.array(aa)
R = cv2.Rodrigues(aa)[0]
# res = Rotation.from_dcm(R).as_euler('XYZ', degrees=True)
res = Rotation.from_matrix(R).as_euler('XYZ', degrees=False)
return np.round(res, 2).tolist() | null |
13,087 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def rotmat2euler(rot):
res = Rotation.from_matrix(rot).as_euler('XYZ', degrees=True)
return res | null |
13,088 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def euler2rotmat(euler):
res = Rotation.from_euler('XYZ', euler, degrees=True)
return res.as_matrix() | null |
13,089 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def batch_rodrigues_jacobi(rvec):
shape = rvec.shape
rvec = rvec.view(-1, 3)
device = rvec.device
dSkew = torch.zeros(3, 9, device=device)
dSkew[0, 5] = -1
dSkew[1, 6] = -1
dSkew[2, 1] = -1
dSkew[0, 7] = 1
dSkew[1, 2] = 1
dSkew[2, 3] = 1
dSkew = dSkew[None]
theta = torch.norm(rvec, dim=-1, keepdim=True) + 1e-5
c = torch.cos(theta)
s = torch.sin(theta)
c1 = 1 - c
itheta = 1 / theta
r = rvec / theta
zeros = torch.zeros_like(r[:, :1])
rx, ry, rz = torch.split(r, 1, dim=1)
rrt = torch.matmul(r[:, :, None], r[:, None, :])
skew = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((r.shape[0], 3, 3))
I = torch.eye(3, device=rvec.device, dtype=rvec.dtype)[None]
rot_mat = I + s[:, None] * skew + c1[:, None] * torch.bmm(skew, skew)
drrt = torch.stack([
rx + rx, ry, rz, ry, zeros, zeros, rz, zeros, zeros,
zeros, rx, zeros, rx, ry + ry, rz, zeros, rz, zeros,
zeros, zeros, rx, zeros, zeros, ry, rx, ry, rz + rz
], dim=-1).view((r.shape[0], 3, 9))
jacobi = torch.zeros((r.shape[0], 3, 9), device=rvec.device, dtype=rvec.dtype)
for i in range(3):
ri = r[:, i:i+1]
a0 = -s * ri
a1 = (s - 2*c1*itheta)*ri
a2 = c1 * itheta
a3 = (c-s*itheta)*ri
a4 = s * itheta
jaco = a0[:, None] * I + a1[:, None] * rrt + a2[:, None] * drrt[:, i].view(-1, 3, 3) + a3[:, None] * skew + a4[:, None] * dSkew[:, i].view(-1, 3, 3)
jacobi[:, i] = jaco.view(-1, 9)
rot_mat = rot_mat.view(*shape[:-1], 3, 3)
jacobi = jacobi.view(*shape[:-1], 3, 9)
return rot_mat, jacobi
def getJacobianOfRT(rvec, tvec, joints):
# joints: (bn, nJ, 3)
dtype, device = rvec.dtype, rvec.device
bn, nJoints = joints.shape[:2]
# jacobiToRvec: (bn, 3, 9) // tested by OpenCV and PyTorch
Rot, jacobiToRvec = batch_rodrigues_jacobi(rvec)
I3 = torch.eye(3, dtype=dtype, device=device)[None]
# jacobiJ_R: (bn, nJ, 3, 3+3+3) => (bn, nJ, 3, 9)
# // flat by column:
# // x, 0, 0 | y, 0, 0 | z, 0, 0
# // 0, x, 0 | 0, y, 0 | 0, z, 0
# // 0, 0, x | 0, 0, y | 0, 0, z
jacobi_J_R = torch.zeros((bn, nJoints, 3, 9), dtype=dtype, device=device)
jacobi_J_R[:, :, 0, :3] = joints
jacobi_J_R[:, :, 1, 3:6] = joints
jacobi_J_R[:, :, 2, 6:9] = joints
# jacobi_J_rvec: (bn, nJ, 3, 3)
jacobi_J_rvec = torch.matmul(jacobi_J_R, jacobiToRvec[:, None].transpose(-1, -2))
# if True: # 测试自动梯度
# def test_func(rvec):
# Rot = batch_rodrigues(rvec[None])[0]
# joints_new = joints[0] @ Rot.t()
# return joints_new
# jac_J_rvec = torch.autograd.functional.jacobian(test_func, rvec[0])
# my_j = jacobi_joints_RT[0, ..., :3]
# jacobi_J_tvec: (bn, nJx3, 3)
jacobi_J_tvec = I3[None].expand(bn, nJoints, -1, -1)
jacobi_J_rt = torch.cat([jacobi_J_rvec, jacobi_J_tvec], dim=-1)
return Rot, jacobiToRvec, jacobi_J_rt | null |
13,090 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def rotation_matrix_from_3x3(A):
U, s, Vt = np.linalg.svd(A, full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
return T | null |
13,091 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def svd_rot(src, tgt, reflection=False, debug=True):
# optimum rotation matrix of Y
A = np.dot(src.T, tgt)
U, s, Vt = np.linalg.svd(A, full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
if debug:
err = np.linalg.norm(tgt - src @ T.T, axis=1)
print('[svd] ', err)
return T | null |
13,092 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def normalize(vector):
return vector/np.linalg.norm(vector)
def rad_from_2vec(vec1, vec2):
return np.arccos((normalize(vec1)*normalize(vec2)).sum()) | null |
13,093 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def smoothing_factor(t_e, cutoff):
r = 2 * 3.14 * cutoff * t_e
return r / (r + 1) | null |
13,094 | from collections import namedtuple
from time import time, sleep
import numpy as np
import cv2
import torch
import copy
from ..config.baseconfig import load_object_from_cmd
from ..mytools.debug_utils import log, mywarn
from ..mytools import Timer
from ..config import Config
from ..mytools.triangulator import iterative_triangulate
from ..bodymodel.base import Params
from .torchgeometry import axis_angle_to_euler, euler_to_axis_angle
from scipy.spatial.transform import Rotation
def exponential_smoothing(a, x, x_prev):
return a * x + (1 - a) * x_prev | null |
13,095 | import torch
from torch.nn import functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rot6d_to_rotation_matrix` function. Write a Python function `def rot6d_to_rotation_matrix(rot6d)` to solve the following problem:
Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Args: rot6d: torch tensor of shape (batch_size, 6) of 6d rotation representations. Returns: rotation_matrix: torch tensor of shape (batch_size, 3, 3) of corresponding rotation matrices.
Here is the function:
def rot6d_to_rotation_matrix(rot6d):
"""
Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Args:
rot6d: torch tensor of shape (batch_size, 6) of 6d rotation representations.
Returns:
rotation_matrix: torch tensor of shape (batch_size, 3, 3) of corresponding rotation matrices.
"""
x = rot6d.view(-1, 3, 2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1) | Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Args: rot6d: torch tensor of shape (batch_size, 6) of 6d rotation representations. Returns: rotation_matrix: torch tensor of shape (batch_size, 3, 3) of corresponding rotation matrices. |
13,096 | import torch
from torch.nn import functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rotation_matrix_to_rot6d` function. Write a Python function `def rotation_matrix_to_rot6d(rotation_matrix)` to solve the following problem:
Convert 3x3 rotation matrix to 6D rotation representation. Args: rotation_matrix: torch tensor of shape (batch_size, 3, 3) of corresponding rotation matrices. Returns: rot6d: torch tensor of shape (batch_size, 6) of 6d rotation representations.
Here is the function:
def rotation_matrix_to_rot6d(rotation_matrix):
"""
Convert 3x3 rotation matrix to 6D rotation representation.
Args:
rotation_matrix: torch tensor of shape (batch_size, 3, 3) of corresponding rotation matrices.
Returns:
rot6d: torch tensor of shape (batch_size, 6) of 6d rotation representations.
"""
v1 = rotation_matrix[:, :, 0:1]
v2 = rotation_matrix[:, :, 1:2]
rot6d = torch.cat([v1, v2], dim=-1).reshape(v1.shape[0], 6)
return rot6d | Convert 3x3 rotation matrix to 6D rotation representation. Args: rotation_matrix: torch tensor of shape (batch_size, 3, 3) of corresponding rotation matrices. Returns: rot6d: torch tensor of shape (batch_size, 6) of 6d rotation representations. |
13,097 | import torch
from torch.nn import functional as F
import numpy as np
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""
Convert rotation matrix to corresponding quaternion
Args:
rotation_matrix: torch tensor of shape (batch_size, 3, 3)
Returns:
quaternion: torch tensor of shape(batch_size, 4) in (w, x, y, z) representation.
"""
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
mask_c1 = mask_d2 * (~ mask_d0_d1)
mask_c2 = (~ mask_d2) * mask_d0_nd1
mask_c3 = (~ mask_d2) * (~ mask_d0_nd1)
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
def quaternion_to_axis_angle(quaternion):
"""
Convert quaternion to axis angle.
based on: https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
Returns:
axis_angle: torch tensor of shape (batch_size, 3)
"""
epsilon = 1.e-8
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta+epsilon)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def rotation_matrix_to_axis_angle(rotation_matrix):
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_axis_angle(quaternion) | null |
13,098 | import torch
from torch.nn import functional as F
import numpy as np
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
def quaternion_to_euler(quaternion, order, epsilon=0):
def rotation_matrix_to_euler(rotation_matrix, order):
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_euler(quaternion, order) | null |
13,099 | import torch
from torch.nn import functional as F
import numpy as np
def quaternion_to_rotation_matrix(quaternion):
"""
Convert quaternion coefficients to rotation matrix.
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
Returns:
rotation matrix corresponding to the quaternion, torch tensor of shape (batch_size, 3, 3)
"""
norm_quaternion = quaternion
norm_quaternion = norm_quaternion / \
norm_quaternion.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quaternion[:, 0], norm_quaternion[:,
1], norm_quaternion[:, 2], norm_quaternion[:, 3]
batch_size = quaternion.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotation_matrix = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(batch_size, 3, 3)
return rotation_matrix
def euler_to_quaternion(euler, order):
"""
Convert euler angles to quaternion.
Args:
euler: torch tensor of shape (batch_size, 3) in order.
order:
Returns:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
"""
assert euler.shape[-1] == 3
original_shape = list(euler.shape)
original_shape[-1] = 4
e = euler.reshape(-1, 3)
x = e[:, 0]
y = e[:, 1]
z = e[:, 2]
rx = torch.stack((torch.cos(x/2), torch.sin(x/2),
torch.zeros_like(x), torch.zeros_like(x)), dim=1)
ry = torch.stack((torch.cos(y/2), torch.zeros_like(y),
torch.sin(y/2), torch.zeros_like(y)), dim=1)
rz = torch.stack((torch.cos(z/2), torch.zeros_like(z),
torch.zeros_like(z), torch.sin(z/2)), dim=1)
result = None
for coord in order:
if coord == 'x':
r = rx
elif coord == 'y':
r = ry
elif coord == 'z':
r = rz
else:
raise Exception('unsupported euler order!')
if result is None:
result = r
else:
result = quaternion_mul(result, r)
# Reverse antipodal representation to have a non-negative "w"
if order in ['xyz', 'yzx', 'zxy']:
result *= -1
return result.reshape(original_shape)
def euler_to_rotation_matrix(euler, order):
quaternion = euler_to_quaternion(euler, order)
return quaternion_to_rotation_matrix(quaternion) | null |
13,100 | import torch
from torch.nn import functional as F
import numpy as np
def quaternion_to_euler(quaternion, order, epsilon=0):
"""
Convert quaternion to euler angles.
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
order: euler angle representation order, 'zyx' etc.
epsilon:
Returns:
euler: torch tensor of shape (batch_size, 3) in order.
"""
assert quaternion.shape[-1] == 4
original_shape = list(quaternion.shape)
original_shape[-1] = 3
q = quaternion.contiguous().view(-1, 4)
q0 = q[:, 0]
q1 = q[:, 1]
q2 = q[:, 2]
q3 = q[:, 3]
if order == 'xyz':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(
2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
elif order == 'yzx':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(
2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon))
elif order == 'zxy':
x = torch.asin(torch.clamp(
2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3))
elif order == 'xzy':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(
2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon))
elif order == 'yxz':
x = torch.asin(torch.clamp(
2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon))
y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3))
elif order == 'zyx':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(
2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon))
z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
else:
raise Exception('unsupported euler order!')
return torch.stack((x, y, z), dim=1).view(original_shape)
def axis_angle_to_quaternion(axis_angle):
"""
Convert axis angle to quaternion.
Args:
axis_angle: torch tensor of shape (batch_size, 3)
Returns:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
"""
rotation_matrix = axis_angle_to_rotation_matrix(axis_angle)
return rotation_matrix_to_quaternion(rotation_matrix)
def axis_angle_to_euler(axis_angle, order):
quaternion = axis_angle_to_quaternion(axis_angle)
return quaternion_to_euler(quaternion, order) | null |
13,101 | import torch
from torch.nn import functional as F
import numpy as np
def euler_to_quaternion(euler, order):
"""
Convert euler angles to quaternion.
Args:
euler: torch tensor of shape (batch_size, 3) in order.
order:
Returns:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
"""
assert euler.shape[-1] == 3
original_shape = list(euler.shape)
original_shape[-1] = 4
e = euler.reshape(-1, 3)
x = e[:, 0]
y = e[:, 1]
z = e[:, 2]
rx = torch.stack((torch.cos(x/2), torch.sin(x/2),
torch.zeros_like(x), torch.zeros_like(x)), dim=1)
ry = torch.stack((torch.cos(y/2), torch.zeros_like(y),
torch.sin(y/2), torch.zeros_like(y)), dim=1)
rz = torch.stack((torch.cos(z/2), torch.zeros_like(z),
torch.zeros_like(z), torch.sin(z/2)), dim=1)
result = None
for coord in order:
if coord == 'x':
r = rx
elif coord == 'y':
r = ry
elif coord == 'z':
r = rz
else:
raise Exception('unsupported euler order!')
if result is None:
result = r
else:
result = quaternion_mul(result, r)
# Reverse antipodal representation to have a non-negative "w"
if order in ['xyz', 'yzx', 'zxy']:
result *= -1
return result.reshape(original_shape)
def quaternion_to_axis_angle(quaternion):
"""
Convert quaternion to axis angle.
based on: https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
Args:
quaternion: torch tensor of shape (batch_size, 4) in (w, x, y, z) representation.
Returns:
axis_angle: torch tensor of shape (batch_size, 3)
"""
epsilon = 1.e-8
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta+epsilon)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def euler_to_axis_angle(euler, order):
quaternion = euler_to_quaternion(euler, order)
return quaternion_to_axis_angle(quaternion) | null |
13,102 | import torch
from torch.nn import functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rotate_vec_by_quaternion` function. Write a Python function `def rotate_vec_by_quaternion(v, q)` to solve the following problem:
Rotate vector(s) v about the rotation described by quaternion(s) q. Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, where * denotes any number of dimensions. Returns a tensor of shape (*, 3).
Here is the function:
def rotate_vec_by_quaternion(v, q):
"""
Rotate vector(s) v about the rotation described by quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
original_shape = list(v.shape)
q = q.contiguous().view(-1, 4)
v = v.view(-1, 3)
qvec = q[:, 1:]
uv = torch.cross(qvec, v, dim=1)
uuv = torch.cross(qvec, uv, dim=1)
return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) | Rotate vector(s) v about the rotation described by quaternion(s) q. Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, where * denotes any number of dimensions. Returns a tensor of shape (*, 3). |
13,103 | import torch
from torch.nn import functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `quaternion_fix` function. Write a Python function `def quaternion_fix(quaternion)` to solve the following problem:
Enforce quaternion continuity across the time dimension by selecting the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) between two consecutive frames. Args: quaternion: torch tensor of shape (batch_size, 4) Returns: quaternion: torch tensor of shape (batch_size, 4)
Here is the function:
def quaternion_fix(quaternion):
"""
Enforce quaternion continuity across the time dimension by selecting
the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
between two consecutive frames.
Args:
quaternion: torch tensor of shape (batch_size, 4)
Returns:
quaternion: torch tensor of shape (batch_size, 4)
"""
quaternion_fixed = quaternion.clone()
dot_products = torch.sum(quaternion[1:]*quaternion[:-1],dim=-1)
mask = dot_products < 0
mask = (torch.cumsum(mask, dim=0) % 2).bool()
quaternion_fixed[1:][mask] *= -1
return quaternion_fixed | Enforce quaternion continuity across the time dimension by selecting the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) between two consecutive frames. Args: quaternion: torch tensor of shape (batch_size, 4) Returns: quaternion: torch tensor of shape (batch_size, 4) |
13,104 | import torch
from torch.nn import functional as F
import numpy as np
def quaternion_inverse(quaternion):
q_conjugate = quaternion.clone()
q_conjugate[::, 1:] * -1
q_norm = quaternion[::, 1:].norm(dim=-1) + quaternion[::, 0]**2
return q_conjugate/q_norm.unsqueeze(-1) | null |
13,105 | import torch
from torch.nn import functional as F
import numpy as np
def quaternion_lerp(q1, q2, t):
q = (1-t)*q1 + t*q2
q = q/q.norm(dim=-1).unsqueeze(-1)
return q | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.