id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,306 | import numpy as np
from ..config import load_object
from .matchSVT import matchSVT
def getDimGroups(lDetections):
dimGroups = [0]
for data in lDetections:
dimGroups.append(dimGroups[-1] + len(data))
views = np.zeros(dimGroups[-1], dtype=int)
for nv in range(len(dimGroups) - 1):
views[dimGroups[nv]:dimGroups[nv+1]] = nv
return dimGroups, views | null |
13,307 | import numpy as np
from ..config import load_object
from .matchSVT import matchSVT
def composeAff(out, vis=False):
names = list(out.keys())
N = len(names)
aff = out[names[0]].copy()
for i in range(1, N):
aff = aff * out[names[i]]
aff = np.power(aff, 1/N)
return aff | null |
13,308 | import numpy as np
from ..config import load_object
from .matchSVT import matchSVT
def SimpleConstrain(dimGroups):
constrain = np.ones((dimGroups[-1], dimGroups[-1]))
for i in range(len(dimGroups)-1):
start, end = dimGroups[i], dimGroups[i+1]
constrain[start:end, start:end] = 0
N = constrain.shape[0]
constrain[range(N), range(N)] = 1
return constrain | null |
13,309 | import numpy as np
def matchSVT(M_aff, dimGroups, M_constr=None, M_obs=None, control={}):
max_iter = control['maxIter']
w_rank = control['w_rank']
tol = control['tol']
X = M_aff.copy()
N = X.shape[0]
index_diag = np.arange(N)
X[index_diag, index_diag] = 0.
if M_constr is None:
M_constr = np.ones_like(M_aff)
for i in range(len(dimGroups) - 1):
M_constr[dimGroups[i]:dimGroups[i+1], dimGroups[i]:dimGroups[i+1]] = 0
M_constr[index_diag, index_diag] = 1
X = (X + X.T)/2
Y = np.zeros((N, N))
mu = 64
W = control['w_sparse'] - X
for iter_ in range(max_iter):
X0 = X.copy()
# update Q with SVT
Q = 1.0/mu * Y + X
U, s, VT = np.linalg.svd(Q)
diagS = s - w_rank/mu
diagS[diagS<0] = 0
Q = U @ np.diag(diagS) @ VT
# update X
X = Q - (W + Y)/mu
# project X
for i in range(len(dimGroups)-1):
ind1, ind2 = dimGroups[i], dimGroups[i + 1]
X[ind1:ind2, ind1:ind2] = 0
X[index_diag, index_diag] = 1.
X[X < 0] = 0
X[X > 1] = 1
X = X * M_constr
if False:
pass
X = (X + X.T)/2
# update Y
Y = Y + mu * (X - Q)
pRes = np.linalg.norm(X - Q)/N
dRes = mu * np.linalg.norm(X - X0)/N
if control['log']:print('[Match] {}, Res = ({:.4f}, {:.4f}), mu = {}'.format(iter_, pRes, dRes, mu))
if pRes < tol and dRes < tol:
break
if pRes > 10 * dRes:
mu = 2 * mu
elif dRes > 10 * pRes:
mu = mu / 2
return X | null |
13,310 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
if len(rot_vecs.shape) > 2:
rot_vec_ori = rot_vecs
rot_vecs = rot_vecs.view(-1, 3)
else:
rot_vec_ori = None
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
if rot_vec_ori is not None:
rot_mat = rot_mat.reshape(*rot_vec_ori.shape[:-1], 3, 3)
return rot_mat
The provided code snippet includes necessary dependencies for implementing the `find_dynamic_lmk_idx_and_bcoords` function. Write a Python function `def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx, dynamic_lmk_b_coords, neck_kin_chain, dtype=torch.float32)` to solve the following problem:
Compute the faces, barycentric coordinates for the dynamic landmarks To do so, we first compute the rotation of the neck around the y-axis and then use a pre-computed look-up table to find the faces and the barycentric coordinates that will be used. Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) for providing the original TensorFlow implementation and for the LUT. Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices pose: torch.tensor Bx(Jx3), dtype = torch.float32 The current pose of the body model dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long The look-up table from neck rotation to faces dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 The look-up table from neck rotation to barycentric coordinates neck_kin_chain: list A python list that contains the indices of the joints that form the kinematic chain of the neck. dtype: torch.dtype, optional Returns ------- dyn_lmk_faces_idx: torch.tensor, dtype = torch.long A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks.
Here is the function:
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=vertices.device,
dtype=dtype).unsqueeze_(dim=0)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords | Compute the faces, barycentric coordinates for the dynamic landmarks To do so, we first compute the rotation of the neck around the y-axis and then use a pre-computed look-up table to find the faces and the barycentric coordinates that will be used. Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) for providing the original TensorFlow implementation and for the LUT. Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices pose: torch.tensor Bx(Jx3), dtype = torch.float32 The current pose of the body model dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long The look-up table from neck rotation to faces dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 The look-up table from neck rotation to barycentric coordinates neck_kin_chain: list A python list that contains the indices of the joints that form the kinematic chain of the neck. dtype: torch.dtype, optional Returns ------- dyn_lmk_faces_idx: torch.tensor, dtype = torch.long A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. |
13,312 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
if len(rot_vecs.shape) > 2:
rot_vec_ori = rot_vecs
rot_vecs = rot_vecs.view(-1, 3)
else:
rot_vec_ori = None
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
if rot_vec_ori is not None:
rot_mat = rot_mat.reshape(*rot_vec_ori.shape[:-1], 3, 3)
return rot_mat
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.view(-1, 3, 3),
rel_joints.contiguous().view(-1, 3, 1)).view(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
The provided code snippet includes necessary dependencies for implementing the `lbs` function. Write a Python function `def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False, use_shape_blending=True, use_pose_blending=True, J_shaped=None, return_vertices=True)` to solve the following problem:
Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model
Here is the function:
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False,
use_shape_blending=True, use_pose_blending=True, J_shaped=None, return_vertices=True):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
if use_shape_blending:
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
else:
v_shaped = v_template.unsqueeze(0).expand(batch_size, -1, -1)
assert J_shaped is not None
J = J_shaped[None].expand(batch_size, -1, -1)
if only_shape:
return v_shaped, J, None, None
# 3. Add pose blend shapes
# N x J x 3 x 3
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
else:
rot_mats = pose.view(batch_size, -1, 3, 3)
if use_pose_blending:
ident = torch.eye(3, dtype=dtype, device=device)
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
else:
v_posed = v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
if not return_vertices:
return None, J_transformed, A, None
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_transformed.shape[1]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed, A, T | Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model |
13,313 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
if len(rot_vecs.shape) > 2:
rot_vec_ori = rot_vecs
rot_vecs = rot_vecs.view(-1, 3)
else:
rot_vec_ori = None
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
if rot_vec_ori is not None:
rot_mat = rot_mat.reshape(*rot_vec_ori.shape[:-1], 3, 3)
return rot_mat
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.view(-1, 3, 3),
rel_joints.contiguous().view(-1, 3, 1)).view(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
def batch_dqs_blending(A,W,Vs):
Bnum,Jnum,_,_=A.shape
_,Vnum,_=W.shape
A = A.view(Bnum*Jnum,4,4)
Rs=A[:,:3,:3]
ws=torch.sqrt(torch.clamp(Rs[:,0,0]+Rs[:,1,1]+Rs[:,2,2]+1.,min=1.e-6))/2.
xs=(Rs[:,2,1]-Rs[:,1,2])/(4.*ws)
ys=(Rs[:,0,2]-Rs[:,2,0])/(4.*ws)
zs=(Rs[:,1,0]-Rs[:,0,1])/(4.*ws)
Ts=A[:,:3,3]
vDw=-0.5*( Ts[:,0]*xs + Ts[:,1]*ys + Ts[:,2]*zs)
vDx=0.5*( Ts[:,0]*ws + Ts[:,1]*zs - Ts[:,2]*ys)
vDy=0.5*(-Ts[:,0]*zs + Ts[:,1]*ws + Ts[:,2]*xs)
vDz=0.5*( Ts[:,0]*ys - Ts[:,1]*xs + Ts[:,2]*ws)
b0=W.unsqueeze(-2)@torch.cat([ws[:,None],xs[:,None],ys[:,None],zs[:,None]],dim=-1).reshape(Bnum, 1, Jnum, 4) #B,V,J,4
be=W.unsqueeze(-2)@torch.cat([vDw[:,None],vDx[:,None],vDy[:,None],vDz[:,None]],dim=-1).reshape(Bnum, 1, Jnum, 4) #B,V,J,4
b0 = b0.reshape(-1, 4)
be = be.reshape(-1, 4)
ns=torch.norm(b0,dim=-1,keepdim=True)
be=be/ns
b0=b0/ns
Vs=Vs.view(Bnum*Vnum,3)
Vs=Vs+2.*b0[:,1:].cross(b0[:,1:].cross(Vs)+b0[:,:1]*Vs)+2.*(b0[:,:1]*be[:,1:]-be[:,:1]*b0[:,1:]+b0[:,1:].cross(be[:,1:]))
return Vs.reshape(Bnum,Vnum,3)
The provided code snippet includes necessary dependencies for implementing the `dqs` function. Write a Python function `def dqs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False, use_shape_blending=True, use_pose_blending=True, J_shaped=None)` to solve the following problem:
Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model
Here is the function:
def dqs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False,
use_shape_blending=True, use_pose_blending=True, J_shaped=None):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
if use_shape_blending:
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
else:
v_shaped = v_template.unsqueeze(0).expand(batch_size, -1, -1)
assert J_shaped is not None
J = J_shaped[None].expand(batch_size, -1, -1)
if only_shape:
return v_shaped, J
# 3. Add pose blend shapes
# N x J x 3 x 3
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
else:
rot_mats = pose.view(batch_size, -1, 3, 3)
if use_pose_blending:
ident = torch.eye(3, dtype=dtype, device=device)
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
else:
v_posed = v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
verts=batch_dqs_blending(A,W,v_posed)
return verts, J_transformed | Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model |
13,314 | from .base import Model, Params
from .lbs import lbs, batch_rodrigues
import os
import numpy as np
import torch
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype) | null |
13,315 | from .base import Model, Params
from .lbs import lbs, batch_rodrigues
import os
import numpy as np
import torch
def read_pickle(name):
def load_model_data(model_path):
model_path = os.path.abspath(model_path)
assert os.path.exists(model_path), 'Path {} does not exist!'.format(
model_path)
if model_path.endswith('.npz'):
data = np.load(model_path)
data = dict(data)
elif model_path.endswith('.pkl'):
data = read_pickle(model_path)
return data | null |
13,316 | from .base import Model, Params
from .lbs import lbs, batch_rodrigues
import os
import numpy as np
import torch
def to_tensor(array, dtype=torch.float32, device=torch.device('cpu')):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype).to(device)
else:
return array.to(device)
def load_regressor(regressor_path):
if regressor_path.endswith('.npy'):
X_regressor = to_tensor(np.load(regressor_path))
elif regressor_path.endswith('.txt'):
data = np.loadtxt(regressor_path)
with open(regressor_path, 'r') as f:
shape = f.readline().split()[1:]
reg = np.zeros((int(shape[0]), int(shape[1])))
for i, j, v in data:
reg[int(i), int(j)] = v
X_regressor = to_tensor(reg)
else:
import ipdb; ipdb.set_trace()
return X_regressor | null |
13,317 | from .base import Model, Params
from .lbs import lbs, batch_rodrigues
import os
import numpy as np
import torch
def save_regressor(fname, data):
with open(fname, 'w') as f:
f.writelines('{} {} {}\r\n'.format('#', data.shape[0], data.shape[1]))
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if(data[i, j] > 0):
f.writelines('{} {} {}\r\n'.format(i, j, data[i, j])) | null |
13,318 | import torch
import torch.nn as nn
from .base import Model
from .smpl import SMPLModel, SMPLLayerEmbedding, read_pickle, to_tensor
from os.path import join
import numpy as np
def read_pickle(name):
def read_hand(path, use_pca, use_flat_mean, num_pca_comps):
data = read_pickle(path)
mean = data['hands_mean'].reshape(1, -1).astype(np.float32)
mean_full = mean
components_full = data['hands_components'].astype(np.float32)
weight = np.diag(components_full @ components_full.T)
components = components_full[:num_pca_comps]
weight = weight[:num_pca_comps]
if use_flat_mean:
mean = np.zeros_like(mean)
return mean, components, weight, mean_full, components_full | null |
13,319 | from easymocap.config.baseconfig import load_object, Config
from easymocap.mytools import Timer
from tqdm import tqdm
from easymocap.socket.base_client import BaseSocketClient
from easymocap.mytools.debug_utils import mywarn, run_cmd
import time
import numpy as np
def check_ip_port(address):
ip, port = address.split(':')[:2]
port = int(port)
flag = port != -1
return flag, ip, port | null |
13,320 | from easymocap.config.baseconfig import load_object, Config
from easymocap.mytools import Timer
from tqdm import tqdm
from easymocap.socket.base_client import BaseSocketClient
from easymocap.mytools.debug_utils import mywarn, run_cmd
import time
import numpy as np
INDEX_HALF = [11,12,13,14,15,16,17,18,19, 20]
INDEX_HALF = sum([[3*i+d for d in range(3)] for i in INDEX_HALF], [])
def mywarn(text):
myprint(text, 'warn')
def triangulate(triangulator, dataset, vis_client):
for nf in tqdm(range(len(dataset)), desc='recon'):
with Timer('require data', not args.timer):
data = dataset[nf]
with Timer('triangulate', not args.timer):
results = triangulator(data)
if vis_client is not None and results != -1 and args.half2total:
results = [r.copy() for r in results]
for res in results:
root = np.zeros((1, 3))
poses = np.zeros((1, 63))
poses[:, INDEX_HALF] = res['poses']
poses_full = np.hstack([root, poses, res['handl'], res['handr']])
res['poses'] = poses_full
results = {'annots': results}
vis_client.to_euler(results)
vis_client.send_str(results)
elif vis_client is not None and results != -1:
vis_client.send_any(results)
if results != -1 and not args.no_write:
dataset.write_all(results, data)
elif results != -1:
pass
else:
mywarn('No results in frame {}'.format(nf)) | null |
13,321 | from os.path import join
import os
from easymocap.mytools.colmap_wrapper import COLMAPDatabase, colmap_ba, colmap_dense, colmap_feature_match, copy_images, create_empty_db
from easymocap.mytools.colmap_wrapper import colmap_feature_extract
from easymocap.mytools.debug_utils import log
class COLMAPDatabase(sqlite3.Connection):
def connect(database_path):
return sqlite3.connect(database_path, factory=COLMAPDatabase)
def __init__(self, *args, **kwargs):
super(COLMAPDatabase, self).__init__(*args, **kwargs)
self.create_tables = lambda: self.executescript(CREATE_ALL)
self.create_cameras_table = \
lambda: self.executescript(CREATE_CAMERAS_TABLE)
self.create_descriptors_table = \
lambda: self.executescript(CREATE_DESCRIPTORS_TABLE)
self.create_images_table = \
lambda: self.executescript(CREATE_IMAGES_TABLE)
self.create_two_view_geometries_table = \
lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE)
self.create_keypoints_table = \
lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
self.create_matches_table = \
lambda: self.executescript(CREATE_MATCHES_TABLE)
self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
def add_camera(self, model, width, height, params,
prior_focal_length=False, camera_id=None):
params = np.asarray(params, np.float64)
cursor = self.execute(
"INSERT INTO cameras VALUES (?, ?, ?, ?, ?, ?)",
(camera_id, model, width, height, array_to_blob(params),
prior_focal_length))
return cursor.lastrowid
def add_image(self, name, camera_id,
prior_q=np.full(4, np.NaN), prior_t=np.full(3, np.NaN), image_id=None):
cursor = self.execute(
"INSERT INTO images VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(image_id, name, camera_id, prior_q[0], prior_q[1], prior_q[2],
prior_q[3], prior_t[0], prior_t[1], prior_t[2]))
return cursor.lastrowid
def add_keypoints(self, image_id, keypoints):
assert(len(keypoints.shape) == 2)
assert(keypoints.shape[1] in [2, 4, 6])
keypoints = np.asarray(keypoints, np.float32)
self.execute(
"INSERT INTO keypoints VALUES (?, ?, ?, ?)",
(image_id,) + keypoints.shape + (array_to_blob(keypoints),))
def add_descriptors(self, image_id, descriptors):
descriptors = np.ascontiguousarray(descriptors, np.uint8)
self.execute(
"INSERT INTO descriptors VALUES (?, ?, ?, ?)",
(image_id,) + descriptors.shape + (array_to_blob(descriptors),))
def add_matches(self, image_id1, image_id2, matches):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
self.execute(
"INSERT INTO matches VALUES (?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches),))
def add_two_view_geometry(self, image_id1, image_id2, matches, extra, config=2):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
if 'qvec' in extra.keys():
self.execute(
"INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches), config,
array_to_blob(extra['F']), array_to_blob(extra['E']), array_to_blob(extra['H']),
array_to_blob(extra['qvec']), array_to_blob(extra['tvec'])))
else:
self.execute(
"INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches), config,
array_to_blob(extra['F']), array_to_blob(extra['E']), array_to_blob(extra['H'])))
def read_images(self):
image_id_to_name, name_to_image_id = {}, {}
image_results = self.execute("SELECT * FROM images")
for result in image_results:
image_id, name, camera_id, q0, q1, q2, q3, t0, t1, t2 = result
image_id_to_name[image_id] = name
name_to_image_id[name] = image_id
return image_id_to_name, name_to_image_id
def read_keypoints(self, mapping=None):
image_id_to_keypoints = {}
keypoints_results = self.execute("SELECT * FROM keypoints")
for keypoints_result in keypoints_results:
image_id, rows, cols, keypoints = keypoints_result
keypoints = blob_to_array(keypoints, np.float32, (rows, cols))
if mapping is None:
image_id_to_keypoints[image_id] = keypoints
else:
image_id_to_keypoints[mapping[image_id]] = keypoints
return image_id_to_keypoints
def read_descriptors(self, mapping=None):
image_id_to_descriptors = {}
descriptors_results = self.execute("SELECT * FROM descriptors")
for descriptors_result in descriptors_results:
image_id, rows, cols, keypoints = descriptors_result
keypoints = blob_to_array(keypoints, np.uint8, (rows, cols))
if mapping is None:
image_id_to_descriptors[image_id] = keypoints
else:
image_id_to_descriptors[mapping[image_id]] = keypoints
return image_id_to_descriptors
def read_matches(self, mapping=None):
matches_results = self.execute("SELECT * FROM matches")
matches = {}
for matches_result in matches_results:
pair_id, rows, cols, match = matches_result
image_id0, image_id1 = pair_id_to_image_ids(pair_id)
if rows == 0:
continue
match = blob_to_array(match, dtype=np.uint32, shape=(rows, cols))
if mapping is not None:
image_id0 = mapping[image_id0]
image_id1 = mapping[image_id1]
matches[(image_id0, image_id1)] = match
return matches
def read_two_view_geometry(self, mapping=None):
geometry = self.execute("SELECT * FROM two_view_geometries")
geometries = {}
for _data in geometry:
if len(_data) == 10:
pair_id, rows, cols, data, config, F, E, H, qvec, tvec = _data
extra = {
'F': F,
'E': E,
'H': H,
'qvec': qvec,
'tvec': tvec
}
elif len(_data) == 8:
pair_id, rows, cols, data, config, F, E, H = _data
extra = {
'F': F,
'E': E,
'H': H,
}
for key, val in extra.items():
extra[key] = blob_to_array(val, dtype=np.float64)
image_id0, image_id1 = pair_id_to_image_ids(pair_id)
match = blob_to_array(data, dtype=np.uint32, shape=(rows, cols))
if rows == 0:continue
if mapping is not None:
image_id0 = mapping[image_id0]
image_id1 = mapping[image_id1]
geometries[(image_id0, image_id1)] = {'matches': match, 'extra': extra, 'config': config}
return geometries
def create_empty_db(database_path):
if os.path.exists(database_path):
mywarn('Removing old database: {}'.format(database_path))
os.remove(database_path)
print('Creating an empty database...')
db = COLMAPDatabase.connect(database_path)
db.create_tables()
db.commit()
db.close()
def colmap_feature_extract(colmap, path, share_camera, add_mask, gpu=False,
share_camera_per_folder=False):
'''
struct SiftMatchingOptions {
// Number of threads for feature matching and geometric verification.
int num_threads = -1;
// Whether to use the GPU for feature matching.
bool use_gpu = true;
// Index of the GPU used for feature matching. For multi-GPU matching,
// you should separate multiple GPU indices by comma, e.g., "0,1,2,3".
std::string gpu_index = "-1";
// Maximum distance ratio between first and second best match.
double max_ratio = 0.8;
// Maximum distance to best match.
double max_distance = 0.7;
// Whether to enable cross checking in matching.
bool cross_check = true;
// Maximum number of matches.
int max_num_matches = 32768;
// Maximum epipolar error in pixels for geometric verification.
double max_error = 4.0;
// Confidence threshold for geometric verification.
double confidence = 0.999;
// Minimum/maximum number of RANSAC iterations. Note that this option
// overrules the min_inlier_ratio option.
int min_num_trials = 100;
int max_num_trials = 10000;
// A priori assumed minimum inlier ratio, which determines the maximum
// number of iterations.
double min_inlier_ratio = 0.25;
// Minimum number of inliers for an image pair to be considered as
// geometrically verified.
int min_num_inliers = 15;
// Whether to attempt to estimate multiple geometric models per image pair.
bool multiple_models = false;
// Whether to perform guided matching, if geometric verification succeeds.
bool guided_matching = false;
bool Check() const;
};
'''
cmd = f'{colmap} feature_extractor --database_path {path}/database.db \
--image_path {path}/images \
--SiftExtraction.peak_threshold 0.006 \
--ImageReader.camera_model OPENCV \
'
if share_camera and not share_camera_per_folder:
cmd += ' --ImageReader.single_camera 1'
elif share_camera_per_folder:
cmd += ' --ImageReader.single_camera_per_folder 1'
if gpu:
cmd += ' --SiftExtraction.use_gpu 1'
cmd += ' --SiftExtraction.gpu_index 0'
if add_mask:
cmd += f' --ImageReader.mask_path {path}/mask'
cmd += f' >> {path}/log.txt'
run_cmd(cmd)
def colmap_feature_match(colmap, path, gpu=False):
cmd = f'{colmap} exhaustive_matcher --database_path {path}/database.db \
--SiftMatching.guided_matching 0 \
--SiftMatching.max_ratio 0.8 \
--SiftMatching.max_distance 0.5 \
--SiftMatching.cross_check 1 \
--SiftMatching.max_error 4 \
--SiftMatching.max_num_matches 32768 \
--SiftMatching.confidence 0.9999 \
--SiftMatching.max_num_trials 10000 \
--SiftMatching.min_inlier_ratio 0.25 \
--SiftMatching.min_num_inliers 30'
if gpu:
cmd += ' --SiftMatching.use_gpu 1'
cmd += ' --SiftMatching.gpu_index 0'
cmd += f' >> {path}/log.txt'
run_cmd(cmd)
def colmap_ba(colmap, path, with_init=False):
if with_init:
cmd = f'{colmap} point_triangulator --database_path {path}/database.db \
--image_path {path}/images \
--input_path {path}/sparse/0 \
--output_path {path}/sparse/0 \
--Mapper.tri_merge_max_reproj_error 3 \
--Mapper.ignore_watermarks 1 \
--Mapper.filter_max_reproj_error 2 \
>> {path}/log.txt'
run_cmd(cmd)
cmd = f'{colmap} bundle_adjuster \
--input_path {path}/sparse/0 \
--output_path {path}/sparse/0 \
--BundleAdjustment.max_num_iterations 1000 \
>> {path}/log.txt'
run_cmd(cmd)
points3d = read_points3d_binary(join(path, 'sparse', '0', 'points3D.bin'))
pids = list(points3d.keys())
mean_error = np.mean([points3d[p].error for p in pids])
log('Triangulate {} points, mean error: {:.2f} pixel'.format(len(pids), mean_error))
else:
mkdir(join(path, 'sparse'))
cmd = f'{colmap} mapper --database_path {path}/database.db --image_path {path}/images --output_path {path}/sparse \
--Mapper.ba_refine_principal_point 1 \
--Mapper.ba_global_max_num_iterations 1000 \
>> {path}/log.txt'
run_cmd(cmd)
def colmap_dense(colmap, path):
mkdir(join(path, 'dense'))
cmd = f'{colmap} image_undistorter --image_path {path}/images --input_path {path}/sparse/0 --output_path {path}/dense --output_type COLMAP --max_image_size 2000'
run_cmd(cmd)
cmd = f'{colmap} patch_match_stereo \
--workspace_path {path}/dense \
--workspace_format COLMAP \
--PatchMatchStereo.geom_consistency true \
>> {path}/log.txt'
run_cmd(cmd)
cmd = f'{colmap} stereo_fusion \
--workspace_path {path}/dense \
--workspace_format COLMAP \
--input_type geometric \
--output_path {path}/dense/fused.ply \
>> {path}/log.txt'
run_cmd(cmd)
def log(text):
myprint(text, 'info')
def run_dense(path, colmap, args):
# out = join(out_root, '{}_{:06d}'.format(seq, nf))
sparse_dir = join(path, 'sparse', 'model')
os.makedirs(sparse_dir, exist_ok=True)
# create blank database
database_name = join(path, 'database.db')
create_empty_db(database_name)
# if not args.no_camera:
# db = COLMAPDatabase.connect(database_name)
# cameras_colmap, cameras_map = create_cameras(db, cameras, list(image_names.keys()))
# write_cameras_binary(cameras_colmap, join(sparse_dir, 'cameras.bin'))
# images = create_images(db, cameras, cameras_map, image_names)
# write_images_binary(images, join(sparse_dir, 'images.bin'))
# write_points3d_binary({}, join(sparse_dir, 'points3D.bin'))
# db.commit()
# db.close()
# perform COLMAP extracting and matching
colmap_feature_extract(colmap, path, args.share_camera, args.add_mask)
colmap_feature_match(colmap, path)
# check the matches
db = COLMAPDatabase.connect(join(path, 'database.db'))
geometry = db.read_two_view_geometry()
db.close()
num_pairs = len(geometry)
num_matches = []
for key, val in geometry.items():
log('cameras: {} has {:5d} matches'.format(key, len(val['matches'])))
num_matches.append(len(val['matches']))
# log('[match] {}_{:06d}: {} pairs: {}'.format(seq, nf, num_pairs, sum(num_matches)))
# if not args.no_camera: continue
colmap_ba(colmap, path)
if args.dense:
colmap_dense(colmap, path) | null |
13,322 | from easymocap.annotator.file_utils import getFileList, read_json, save_json
from os.path import join
import os
from tqdm import tqdm
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def save_json(file, data):
if file is None:
return 0
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f, indent=4)
def create_markers(path, name, N, N_group):
outname = join(path, name)
if os.path.exists(outname):
results = read_json(outname)
N_ = len(results['keypoints3d'])
if N == N_:
return 0
results = {
'keypoints3d': [[0., 0., 0.] for _ in range(N)],
'lines': [[i, i+1] if (i+1)%N_group!=0 else [i, i-N_group+1] for i in range(N-1) ]
}
if N < 5:
results['lines'].append([args.N-1, 0])
save_json(outname, results) | null |
13,323 | from easymocap.annotator.file_utils import getFileList, read_json, save_json
from os.path import join
import os
from tqdm import tqdm
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def save_json(file, data):
if file is None:
return 0
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f, indent=4)
def getFileList(root, ext='.jpg', max=-1, ret_full=False):
files = []
dirs = sorted(os.listdir(root))
while len(dirs) > 0:
path = dirs.pop()
if path.startswith('.'):continue
fullname = join(root, path)
if os.path.isfile(fullname) and fullname.endswith(ext):
if ret_full:
files.append(fullname)
else:
files.append(path)
elif os.path.isdir(fullname):
names = sorted(os.listdir(fullname))
if max != -1 and os.path.isfile(join(fullname, names[0])):
names = names[:max]
for s in names:
newDir = join(path, s)
dirs.append(newDir)
files = sorted(files)
return files
def create_corners(path, grid, image='images', ext='.jpg', overwrite=True):
imgnames = getFileList(join(path, image), ext=ext)
keypoints3d = [
[0., 0., 0.],
[grid[0], 0., 0.],
[grid[0], grid[1], 0.],
[0., grid[1], 0.],
]
template = {
'keypoints3d': keypoints3d,
'keypoints2d': [[0.,0.,0.] for _ in range(4)],
'pattern': (2, 2),
'grid_size': grid,
'visited': False
}
for imgname in tqdm(imgnames, desc='create template chessboard'):
annname = imgname.replace(ext, '.json')
annname = join(path, args.annot, annname)
if os.path.exists(annname) and overwrite:
# 覆盖keypoints3d
data = read_json(annname)
data['keypoints3d'] = template['keypoints3d']
data['grid_size'] = grid
save_json(annname, data)
elif os.path.exists(annname) and not overwrite:
continue
else:
save_json(annname, template) | null |
13,324 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
def load_cube(grid_size=1, **kwargs):
min_x, min_y, min_z = (0, 0, 0.)
max_x, max_y, max_z = (grid_size, grid_size, grid_size)
# min_x, min_y, min_z = (-0.75, -0.9, 0.)
# max_x, max_y, max_z = (0.75, 0.7, 0.9)
# # 灯光球场篮球:
# min_x, min_y, min_z = (-7.5, -2.89, 0.)
# max_x, max_y, max_z = (7.5, 11.11, 2.)
# # 4d association:
# min_x, min_y, min_z = (-1.6, -1.6, 0.)
# max_x, max_y, max_z = (1.5, 1.6, 2.4)
# min_x, min_y, min_z = (-2.45, -4., 0.)
# max_x, max_y, max_z = (1.65, 2.45, 2.6)
points3d = np.array([
[min_x, min_y, min_z],
[max_x, min_y, min_z],
[max_x, max_y, min_z],
[min_x, max_y, min_z],
[min_x, min_y, max_z],
[max_x, min_y, max_z],
[max_x, max_y, max_z],
[min_x, max_y, max_z],
])
lines = np.array([
[0, 1],
[1, 2],
[2, 3],
[3, 0],
[4, 5],
[5, 6],
[6, 7],
[7, 4],
[0, 4],
[1, 5],
[2, 6],
[3, 7]
], dtype=np.int64)
points3d = np.hstack((points3d, np.ones((points3d.shape[0], 1))))
return points3d, lines | null |
13,325 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
POINTS_SQUARE = np.array([
[0., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.]
])
LINES_SQUARE = np.array([
[0, 1],
[1, 2],
[2, 3],
[3, 0]
])
def merge_points_lines(points3d, lines):
dist = np.linalg.norm(points3d[:, None, :] - points3d[None, :, :], axis=-1)
mapid = np.arange(points3d.shape[0])
for i in range(dist.shape[0]):
if mapid[i] != i:
continue
equal = np.where(dist[i] < 1e-3)[0]
for j in equal:
if j == i:
continue
mapid[j] = i
newid = sorted(list(set(mapid)))
newpoints = points3d[newid]
for i, newi in enumerate(newid):
mapid[mapid==newi] = i
return newpoints, mapid[lines]
def load_grid(xrange=28, yrange=15, step=1, two=False, **kwargs):
start = np.array([0., 0., 0.])
xdir = np.array([1., 0., 0.])
ydir = np.array([0., 1., 0.])
stepx = step
stepy = step
points3d, lines = [], []
if two:
start_x = -xrange
start_y = -yrange
else:
start_x = 0
start_y = 0
for i in range(start_x, xrange):
for j in range(start_y, yrange):
base = start + xdir*i*stepx + ydir*j*stepy
points3d.append(POINTS_SQUARE+base)
lines.append(LINES_SQUARE+4*((i-start_x)*(yrange-start_y)+(j-start_y)))
points3d = np.vstack(points3d)
lines = np.vstack(lines)
return merge_points_lines(points3d, lines) | null |
13,326 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
CONFIG = {
'points': {
'nJoints': 1,
'kintree': []
}
}
CONFIG['smpl'] = {'nJoints': 24, 'kintree':
[
[ 0, 1 ],
[ 0, 2 ],
[ 0, 3 ],
[ 1, 4 ],
[ 2, 5 ],
[ 3, 6 ],
[ 4, 7 ],
[ 5, 8 ],
[ 6, 9 ],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[ 9, 13],
[ 9, 14],
[12, 15],
[13, 16],
[14, 17],
[16, 18],
[17, 19],
[18, 20],
[19, 21],
[20, 22],
[21, 23],
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['smplh'] = {'nJoints': 52, 'kintree':
[
[ 1, 0],
[ 2, 0],
[ 3, 0],
[ 4, 1],
[ 5, 2],
[ 6, 3],
[ 7, 4],
[ 8, 5],
[ 9, 6],
[ 10, 7],
[ 11, 8],
[ 12, 9],
[ 13, 9],
[ 14, 9],
[ 15, 12],
[ 16, 13],
[ 17, 14],
[ 18, 16],
[ 19, 17],
[ 20, 18],
[ 21, 19],
[ 22, 20],
[ 23, 22],
[ 24, 23],
[ 25, 20],
[ 26, 25],
[ 27, 26],
[ 28, 20],
[ 29, 28],
[ 30, 29],
[ 31, 20],
[ 32, 31],
[ 33, 32],
[ 34, 20],
[ 35, 34],
[ 36, 35],
[ 37, 21],
[ 38, 37],
[ 39, 38],
[ 40, 21],
[ 41, 40],
[ 42, 41],
[ 43, 21],
[ 44, 43],
[ 45, 44],
[ 46, 21],
[ 47, 46],
[ 48, 47],
[ 49, 21],
[ 50, 49],
[ 51, 50]
],
'joint_names': [
'MidHip', # 0
'LUpLeg', # 1
'RUpLeg', # 2
'spine', # 3
'LLeg', # 4
'RLeg', # 5
'spine1', # 6
'LFoot', # 7
'RFoot', # 8
'spine2', # 9
'LToeBase', # 10
'RToeBase', # 11
'neck', # 12
'LShoulder', # 13
'RShoulder', # 14
'head', # 15
'LArm', # 16
'RArm', # 17
'LForeArm', # 18
'RForeArm', # 19
'LHand', # 20
'RHand', # 21
'LHandIndex1', # 22
'RHandIndex1', # 23
]
}
CONFIG['coco'] = {
'nJoints': 17,
'kintree': [
[0, 1], [0, 2], [1, 3], [2, 4], [0, 5], [0, 6], [5, 6], [5, 7], [6, 8], [7, 9], [8, 10], [5, 11], [5, 12], [11, 12], [11, 13], [12, 14], [13, 15], [14, 16]
],
}
CONFIG['coco_17'] = CONFIG['coco']
CONFIG['body25'] = {'nJoints': 25, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11]],
'joint_names': [
"Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "MidHip", "RHip","RKnee","RAnkle","LHip","LKnee","LAnkle","REye","LEye","REar","LEar","LBigToe","LSmallToe","LHeel","RBigToe","RSmallToe","RHeel"]}
CONFIG['body25']['kintree_order'] = [
[1, 8], # 躯干放在最前面
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[8, 9],
[8, 12],
[9, 10],
[10, 11],
[12, 13],
[13, 14],
[1, 0],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[11, 22],
[11, 24],
[22, 23],
[14, 19],
[19, 20],
[14, 21]
]
CONFIG['body25']['colors'] = ['k', 'r', 'r', 'r', 'b', 'b', 'b', 'k', 'r', 'r', 'r', 'b', 'b', 'b', 'r', 'b', 'r', 'b', 'b', 'b', 'b', 'r', 'r', 'r']
CONFIG['body25']['skeleton'] = \
{
( 0, 1): {'mean': 0.228, 'std': 0.046}, # Nose ->Neck
( 1, 2): {'mean': 0.144, 'std': 0.029}, # Neck ->RShoulder
( 2, 3): {'mean': 0.283, 'std': 0.057}, # RShoulder->RElbow
( 3, 4): {'mean': 0.258, 'std': 0.052}, # RElbow ->RWrist
( 1, 5): {'mean': 0.145, 'std': 0.029}, # Neck ->LShoulder
( 5, 6): {'mean': 0.281, 'std': 0.056}, # LShoulder->LElbow
( 6, 7): {'mean': 0.258, 'std': 0.052}, # LElbow ->LWrist
( 1, 8): {'mean': 0.483, 'std': 0.097}, # Neck ->MidHip
( 8, 9): {'mean': 0.106, 'std': 0.021}, # MidHip ->RHip
( 9, 10): {'mean': 0.438, 'std': 0.088}, # RHip ->RKnee
(10, 11): {'mean': 0.406, 'std': 0.081}, # RKnee ->RAnkle
( 8, 12): {'mean': 0.106, 'std': 0.021}, # MidHip ->LHip
(12, 13): {'mean': 0.438, 'std': 0.088}, # LHip ->LKnee
(13, 14): {'mean': 0.408, 'std': 0.082}, # LKnee ->LAnkle
( 0, 15): {'mean': 0.043, 'std': 0.009}, # Nose ->REye
( 0, 16): {'mean': 0.043, 'std': 0.009}, # Nose ->LEye
(15, 17): {'mean': 0.105, 'std': 0.021}, # REye ->REar
(16, 18): {'mean': 0.104, 'std': 0.021}, # LEye ->LEar
(14, 19): {'mean': 0.180, 'std': 0.036}, # LAnkle ->LBigToe
(19, 20): {'mean': 0.038, 'std': 0.008}, # LBigToe ->LSmallToe
(14, 21): {'mean': 0.044, 'std': 0.009}, # LAnkle ->LHeel
(11, 22): {'mean': 0.182, 'std': 0.036}, # RAnkle ->RBigToe
(22, 23): {'mean': 0.038, 'std': 0.008}, # RBigToe ->RSmallToe
(11, 24): {'mean': 0.044, 'std': 0.009}, # RAnkle ->RHeel
}
CONFIG['body25vis'] = {
'nJoints': 25,
'kintree': [
[8, 1], # 躯干放在最前面
[8, 9],
[8, 12],
[9, 10],
[12, 13],
[10, 11],
[13, 14],
[11, 22],
[14, 19],
[1, 2],
[1, 5],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[1, 0]]
}
CONFIG['handvis'] = {
'nJoints': 21,
'kintree': [
[0, 1],
[0, 5],
[0, 9],
[0, 13],
[0, 17],
[1, 2],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[7, 8],
[9, 10],
[10, 11],
[11, 12],
[13, 14],
[14, 15],
[15, 16],
[17, 18],
[18, 19],
[19, 20]
]
}
CONFIG['body15'] = {'nJoints': 15, 'root': 8,
'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13]], 'root': 8,}
CONFIG['body15']['joint_names'] = CONFIG['body25']['joint_names'][:15]
CONFIG['body15']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 15 and key[1] < 15}
CONFIG['body15']['kintree_order'] = CONFIG['body25']['kintree_order'][:14]
CONFIG['body15']['colors'] = CONFIG['body25']['colors'][:15]
CONFIG['body19'] = {'nJoints': 19, 'kintree': [[i, j] for (i, j) in CONFIG['body25']['kintree'] if i < 19 and j < 19]}
CONFIG['body19']['skeleton'] = {key: val for key, val in CONFIG['body25']['skeleton'].items() if key[0] < 19 and key[1] < 19}
CONFIG['panoptic'] = {
'nJoints': 19,
'joint_names': ['Neck', 'Nose', 'MidHip', 'LShoulder', 'LElbow', 'LWrist', 'LHip', 'LKnee', 'LAnkle', 'RShoulder','RElbow', 'RWrist', 'RHip','RKnee', 'RAnkle', 'LEye', 'LEar', 'REye', 'REar'],
'kintree': [[0, 1],
[0, 2],
[0, 3],
[3, 4],
[4, 5],
[0, 9],
[9, 10],
[10, 11],
[2, 6],
[2, 12],
[6, 7],
[7, 8],
[12, 13],
[13, 14]],
'colors': ['b' for _ in range(19)]
}
CONFIG['panoptic15'] = {
'nJoints': 15,
'root': 2,
'joint_names': CONFIG['panoptic']['joint_names'][:15],
'kintree': [[i, j] for (i, j) in CONFIG['panoptic']['kintree'] if i < 15 and j < 15],
'limb_mean': [0.1129,0.4957,0.1382,0.2547,0.2425,0.1374,0.2549,0.2437,0.1257,0.1256, 0.4641,0.4580,0.4643,0.4589],
'limb_std': [0.0164,0.0333,0.0078,0.0237,0.0233,0.0085,0.0233,0.0237,0.0076,0.0076, 0.0273,0.0247,0.0272,0.0242],
'colors': CONFIG['panoptic']['colors'][:15]
}
CONFIG['mpii_16'] = {
'nJoints': 16,
'joint_names': ['rankle', 'rknee', 'rhip', 'lhip', 'lknee', 'lankle', 'pelvis', 'thorax', 'upper_neck', 'head_top', 'rwrist', 'relbow', 'rshoulder', 'lshoulder', 'lelbow', 'lwrist'],
'kintree': [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [12, 7], [13, 14], [14, 15], [13, 7]],
'colors': ['b' for _ in range(16)]
}
CONFIG['ochuman_19'] = {
'nJoints': 19,
'joint_names': ["right_shoulder", "right_elbow", "right_wrist",
"left_shoulder", "left_elbow", "left_wrist",
"right_hip", "right_knee", "right_ankle",
"left_hip", "left_knee", "left_ankle",
"head", "neck"] + ['right_ear', 'left_ear', 'nose', 'right_eye', 'left_eye'],
'kintree': [
[0, 1], [1, 2], [3, 4], [4, 5],
[6, 7], [7, 8], [9, 10], [10, 11],
[13, 0], [13, 3], [0, 3], [6, 9],
[12, 16], [16, 13], [16, 17], [16, 18], [18, 15], [17, 14],
],
'colors': ['b' for _ in range(19)]
}
CONFIG['chi3d_25'] = {
'nJoints': 25,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
[13, 21], [13, 22], [16, 23], [16, 24], [3, 17], [3, 18], [6, 19], [6, 20]],
'colors': ['b' for _ in range(25)]
}
CONFIG['chi3d_17'] = {
'nJoints': 17,
'joint_names': [],
'kintree': [[10, 9], [9, 8], [8, 11], [8, 14], [11, 12], [14, 15], [12, 13], [15, 16],
[8, 7], [7, 0], [0, 1], [0, 4], [1, 2], [4, 5], [2, 3], [5, 6],
],
'colors': ['b' for _ in range(17)]
}
CONFIG['hand'] = {'nJoints': 21, 'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 0],
[ 6, 5],
[ 7, 6],
[ 8, 7],
[ 9, 0],
[10, 9],
[11, 10],
[12, 11],
[13, 0],
[14, 13],
[15, 14],
[16, 15],
[17, 0],
[18, 17],
[19, 18],
[20, 19]],
'colors': [
'_k', '_k', '_k', '_k', '_r', '_r', '_r', '_r',
'_g', '_g', '_g', '_g', '_b', '_b', '_b', '_b',
'_y', '_y', '_y', '_y'],
'colorsrhand': [
'_pink', '_pink', '_pink', '_pink', '_mint', '_mint', '_mint', '_mint',
'_orange', '_orange', '_orange', '_orange', '_mint2', '_mint2', '_mint2', '_mint2',
'purple', 'purple', 'purple', 'purple'],
'joint_names':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
}
CONFIG['handl'] = CONFIG['hand']
CONFIG['handr'] = CONFIG['hand']
CONFIG['handlr'] = {
'nJoints': 42,
'colors': CONFIG['hand']['colors'] + CONFIG['hand']['colorsrhand'],
'joint_names': CONFIG['hand']['joint_names'] + CONFIG['hand']['joint_names'],
'kintree': np.vstack((np.array(CONFIG['hand']['kintree']), np.array(CONFIG['hand']['kintree'])+21)).tolist()
}
CONFIG['bodyhand'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65]
],
'nJoints': 67,
'colors': CONFIG['body25']['colors'] + CONFIG['hand']['colors'] + CONFIG['hand']['colors'],
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
}
}
CONFIG['bodyhandface'] = {'kintree':
[[ 1, 0],
[ 2, 1],
[ 3, 2],
[ 4, 3],
[ 5, 1],
[ 6, 5],
[ 7, 6],
[ 8, 1],
[ 9, 8],
[10, 9],
[11, 10],
[12, 8],
[13, 12],
[14, 13],
[15, 0],
[16, 0],
[17, 15],
[18, 16],
[19, 14],
[20, 19],
[21, 14],
[22, 11],
[23, 22],
[24, 11],
[26, 7], # handl
[27, 26],
[28, 27],
[29, 28],
[30, 7],
[31, 30],
[32, 31],
[33, 32],
[34, 7],
[35, 34],
[36, 35],
[37, 36],
[38, 7],
[39, 38],
[40, 39],
[41, 40],
[42, 7],
[43, 42],
[44, 43],
[45, 44],
[47, 4], # handr
[48, 47],
[49, 48],
[50, 49],
[51, 4],
[52, 51],
[53, 52],
[54, 53],
[55, 4],
[56, 55],
[57, 56],
[58, 57],
[59, 4],
[60, 59],
[61, 60],
[62, 61],
[63, 4],
[64, 63],
[65, 64],
[66, 65],
[ 67, 68],
[ 68, 69],
[ 69, 70],
[ 70, 71],
[ 72, 73],
[ 73, 74],
[ 74, 75],
[ 75, 76],
[ 77, 78],
[ 78, 79],
[ 79, 80],
[ 81, 82],
[ 82, 83],
[ 83, 84],
[ 84, 85],
[ 86, 87],
[ 87, 88],
[ 88, 89],
[ 89, 90],
[ 90, 91],
[ 91, 86],
[ 92, 93],
[ 93, 94],
[ 94, 95],
[ 95, 96],
[ 96, 97],
[ 97, 92],
[ 98, 99],
[ 99, 100],
[100, 101],
[101, 102],
[102, 103],
[103, 104],
[104, 105],
[105, 106],
[106, 107],
[107, 108],
[108, 109],
[109, 98],
[110, 111],
[111, 112],
[112, 113],
[113, 114],
[114, 115],
[115, 116],
[116, 117],
[117, 110]
],
'nJoints': 118,
'skeleton':{
( 0, 1): {'mean': 0.251, 'std': 0.050},
( 1, 2): {'mean': 0.169, 'std': 0.034},
( 2, 3): {'mean': 0.292, 'std': 0.058},
( 3, 4): {'mean': 0.275, 'std': 0.055},
( 1, 5): {'mean': 0.169, 'std': 0.034},
( 5, 6): {'mean': 0.295, 'std': 0.059},
( 6, 7): {'mean': 0.278, 'std': 0.056},
( 1, 8): {'mean': 0.566, 'std': 0.113},
( 8, 9): {'mean': 0.110, 'std': 0.022},
( 9, 10): {'mean': 0.398, 'std': 0.080},
(10, 11): {'mean': 0.402, 'std': 0.080},
( 8, 12): {'mean': 0.111, 'std': 0.022},
(12, 13): {'mean': 0.395, 'std': 0.079},
(13, 14): {'mean': 0.403, 'std': 0.081},
( 0, 15): {'mean': 0.053, 'std': 0.011},
( 0, 16): {'mean': 0.056, 'std': 0.011},
(15, 17): {'mean': 0.107, 'std': 0.021},
(16, 18): {'mean': 0.107, 'std': 0.021},
(14, 19): {'mean': 0.180, 'std': 0.036},
(19, 20): {'mean': 0.055, 'std': 0.011},
(14, 21): {'mean': 0.065, 'std': 0.013},
(11, 22): {'mean': 0.169, 'std': 0.034},
(22, 23): {'mean': 0.052, 'std': 0.010},
(11, 24): {'mean': 0.061, 'std': 0.012},
( 7, 26): {'mean': 0.045, 'std': 0.009},
(26, 27): {'mean': 0.042, 'std': 0.008},
(27, 28): {'mean': 0.035, 'std': 0.007},
(28, 29): {'mean': 0.029, 'std': 0.006},
( 7, 30): {'mean': 0.102, 'std': 0.020},
(30, 31): {'mean': 0.040, 'std': 0.008},
(31, 32): {'mean': 0.026, 'std': 0.005},
(32, 33): {'mean': 0.023, 'std': 0.005},
( 7, 34): {'mean': 0.101, 'std': 0.020},
(34, 35): {'mean': 0.043, 'std': 0.009},
(35, 36): {'mean': 0.029, 'std': 0.006},
(36, 37): {'mean': 0.024, 'std': 0.005},
( 7, 38): {'mean': 0.097, 'std': 0.019},
(38, 39): {'mean': 0.041, 'std': 0.008},
(39, 40): {'mean': 0.027, 'std': 0.005},
(40, 41): {'mean': 0.024, 'std': 0.005},
( 7, 42): {'mean': 0.095, 'std': 0.019},
(42, 43): {'mean': 0.033, 'std': 0.007},
(43, 44): {'mean': 0.020, 'std': 0.004},
(44, 45): {'mean': 0.018, 'std': 0.004},
( 4, 47): {'mean': 0.043, 'std': 0.009},
(47, 48): {'mean': 0.041, 'std': 0.008},
(48, 49): {'mean': 0.034, 'std': 0.007},
(49, 50): {'mean': 0.028, 'std': 0.006},
( 4, 51): {'mean': 0.101, 'std': 0.020},
(51, 52): {'mean': 0.041, 'std': 0.008},
(52, 53): {'mean': 0.026, 'std': 0.005},
(53, 54): {'mean': 0.024, 'std': 0.005},
( 4, 55): {'mean': 0.100, 'std': 0.020},
(55, 56): {'mean': 0.044, 'std': 0.009},
(56, 57): {'mean': 0.029, 'std': 0.006},
(57, 58): {'mean': 0.023, 'std': 0.005},
( 4, 59): {'mean': 0.096, 'std': 0.019},
(59, 60): {'mean': 0.040, 'std': 0.008},
(60, 61): {'mean': 0.028, 'std': 0.006},
(61, 62): {'mean': 0.023, 'std': 0.005},
( 4, 63): {'mean': 0.094, 'std': 0.019},
(63, 64): {'mean': 0.032, 'std': 0.006},
(64, 65): {'mean': 0.020, 'std': 0.004},
(65, 66): {'mean': 0.018, 'std': 0.004},
(67, 68): {'mean': 0.012, 'std': 0.002},
(68, 69): {'mean': 0.013, 'std': 0.003},
(69, 70): {'mean': 0.014, 'std': 0.003},
(70, 71): {'mean': 0.012, 'std': 0.002},
(72, 73): {'mean': 0.014, 'std': 0.003},
(73, 74): {'mean': 0.014, 'std': 0.003},
(74, 75): {'mean': 0.015, 'std': 0.003},
(75, 76): {'mean': 0.013, 'std': 0.003},
(77, 78): {'mean': 0.014, 'std': 0.003},
(78, 79): {'mean': 0.014, 'std': 0.003},
(79, 80): {'mean': 0.015, 'std': 0.003},
(81, 82): {'mean': 0.009, 'std': 0.002},
(82, 83): {'mean': 0.010, 'std': 0.002},
(83, 84): {'mean': 0.010, 'std': 0.002},
(84, 85): {'mean': 0.010, 'std': 0.002},
(86, 87): {'mean': 0.009, 'std': 0.002},
(87, 88): {'mean': 0.009, 'std': 0.002},
(88, 89): {'mean': 0.008, 'std': 0.002},
(89, 90): {'mean': 0.008, 'std': 0.002},
(90, 91): {'mean': 0.009, 'std': 0.002},
(86, 91): {'mean': 0.008, 'std': 0.002},
(92, 93): {'mean': 0.009, 'std': 0.002},
(93, 94): {'mean': 0.009, 'std': 0.002},
(94, 95): {'mean': 0.009, 'std': 0.002},
(95, 96): {'mean': 0.009, 'std': 0.002},
(96, 97): {'mean': 0.009, 'std': 0.002},
(92, 97): {'mean': 0.009, 'std': 0.002},
(98, 99): {'mean': 0.016, 'std': 0.003},
(99, 100): {'mean': 0.013, 'std': 0.003},
(100, 101): {'mean': 0.008, 'std': 0.002},
(101, 102): {'mean': 0.008, 'std': 0.002},
(102, 103): {'mean': 0.012, 'std': 0.002},
(103, 104): {'mean': 0.014, 'std': 0.003},
(104, 105): {'mean': 0.015, 'std': 0.003},
(105, 106): {'mean': 0.012, 'std': 0.002},
(106, 107): {'mean': 0.009, 'std': 0.002},
(107, 108): {'mean': 0.009, 'std': 0.002},
(108, 109): {'mean': 0.013, 'std': 0.003},
(98, 109): {'mean': 0.016, 'std': 0.003},
(110, 111): {'mean': 0.021, 'std': 0.004},
(111, 112): {'mean': 0.009, 'std': 0.002},
(112, 113): {'mean': 0.008, 'std': 0.002},
(113, 114): {'mean': 0.019, 'std': 0.004},
(114, 115): {'mean': 0.018, 'std': 0.004},
(115, 116): {'mean': 0.008, 'std': 0.002},
(116, 117): {'mean': 0.009, 'std': 0.002},
(110, 117): {'mean': 0.020, 'std': 0.004},
}
}
CONFIG['face'] = {'nJoints': 70,
'kintree':[ [0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16], #outline (ignored)
[17,18],[18,19],[19,20],[20,21], #right eyebrow
[22,23],[23,24],[24,25],[25,26], #left eyebrow
[27,28],[28,29],[29,30], #nose upper part
[31,32],[32,33],[33,34],[34,35], #nose lower part
[36,37],[37,38],[38,39],[39,40],[40,41],[41,36], #right eye
[42,43],[43,44],[44,45],[45,46],[46,47],[47,42], #left eye
[48,49],[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,48], #Lip outline
[60,61],[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,60] #Lip inner line
], 'colors': ['g' for _ in range(100)]}
CONFIG['h36m'] = {
'kintree': [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [
12, 13], [8, 14], [14, 15], [15, 16]],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': [
'hip', # 0
'LHip', # 1
'LKnee', # 2
'LAnkle', # 3
'RHip', # 4
'RKnee', # 5
'RAnkle', # 6
'Spine (H36M)', # 7
'Neck', # 8
'Head (H36M)', # 9
'headtop', # 10
'LShoulder', # 11
'LElbow', # 12
'LWrist', # 13
'RShoulder', # 14
'RElbow', # 15
'RWrist', # 16
],
'nJoints': 17}
CONFIG['h36m_17'] = CONFIG['h36m']
CONFIG['total'] = compose(['body25', 'hand', 'hand', 'face'])
CONFIG['bodyhandface']['joint_names'] = CONFIG['body25']['joint_names']
CONFIG['keypoints2d'] = CONFIG['body25']
CONFIG['handl2d'] = CONFIG['hand']
CONFIG['handr2d'] = CONFIG['hand']
CONFIG['face2d'] = CONFIG['face']
CONFIG['mpbody'] = {}
CONFIG['mpbody']['kintree'] = [
(0, 1),
(0, 4),
(1, 2),
(2, 3),
(3, 7),
(4, 5),
(5, 6),
(6, 8),
(9, 10),
(11, 12),
(11, 13),
(11, 23),
(12, 14),
(12, 24),
(13, 15),
(14, 16),
(15, 17),
(15, 19),
(15, 21),
(16, 18),
(16, 20),
(16, 22),
(17, 19),
(18, 20),
(23, 24),
(23, 25),
(24, 26),
(25, 27),
(26, 28),
(27, 29),
(27, 31),
(28, 30),
(28, 32),
(29, 31),
(30, 32)
]
CONFIG['mpbody']['nJoints'] = 33
CONFIG['mpbody']['colors'] = ['b', 'r', 'b', 'b', 'b', 'r', 'r', 'r', 'k', 'k', 'b', 'b', 'r', 'r', 'b', 'r',
'y', 'r', 'y', 'g', 'b', 'g', 'y', 'g', 'k', 'b', 'r', 'b', 'r', 'b', 'b', 'r', 'r', 'b', 'b']
CONFIG['mpface'] = {}
CONFIG['mpface']['kintree'] = [(270, 409), (176, 149), (37, 0), (84, 17), (318, 324), (293, 334), (386, 385), (7, 163), (33, 246), (17, 314), (374, 380), (251, 389), (390, 373), (267, 269), (295, 285), (389, 356), (173, 133), (33, 7), (377, 152), (158, 157), (405, 321), (54, 103), (263, 466), (324, 308), (67, 109), (409, 291), (157, 173), (454, 323), (388, 387), (78, 191), (148, 176), (311, 310), (39, 37), (249, 390), (144, 145), (402, 318), (80, 81), (310, 415), (153, 154), (384, 398), (397, 365), (234, 127), (103, 67), (282, 295), (338, 297), (378, 400), (127, 162), (321, 375), (375, 291), (317, 402), (81, 82), (154, 155), (91, 181), (334, 296), (297, 332), (269, 270), (150, 136), (109, 10), (356, 454), (58, 132), (312, 311), (152, 148), (415, 308), (161, 160), (296, 336), (65, 55), (61, 146), (78, 95), (380, 381), (398, 362), (361, 288), (246, 161), (162, 21), (0, 267), (82, 13), (132, 93), (314, 405), (10, 338), (178, 87), (387, 386), (381, 382), (70, 63), (61, 185), (14, 317), (105, 66), (300, 293), (382, 362), (88, 178), (185, 40), (46, 53), (284, 251), (400, 377), (136, 172), (323, 361), (13, 312), (21, 54), (172, 58), (373, 374), (163, 144), (276, 283), (53, 52), (365, 379), (379, 378), (146, 91), (263, 249), (283, 282), (87, 14), (145, 153), (155, 133), (93, 234), (66, 107), (95, 88), (159, 158), (52, 65), (332, 284), (40, 39), (191, 80), (63, 105), (181, 84), (466, 388), (149, 150), (288, 397), (160, 159), (385, 384)]
CONFIG['mpface']['nJoints'] = 468
CONFIG['mptotal'] = compose(['mpbody', 'hand', 'hand', 'mpface'])
CONFIG['bodyhandmpface'] = compose(['body25', 'hand', 'hand', 'mpface'])
CONFIG['iris'] = {
'nJoints': 10,
'kintree': [[0, 1], [1, 2], [2, 3], [3, 4]]
}
CONFIG['onepoint'] = {
'nJoints': 1,
'kintree': []
}
CONFIG['up'] = {
'nJoints': 79,
'kintree': []
}
CONFIG['ochuman'] = {
'nJoints': 19,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13], [14, 17], [15, 18], [17, 16], [18, 16]]
}
CONFIG['mpii'] = {
'nJoints': 16,
'kintree': [[0, 1], [1, 2], [3, 4], [4, 5], [2, 6], [3, 6], [6, 7], [7, 8], [8, 9], [10, 11], [11, 12], [7, 12], [7, 13], \
[13, 14], [14, 15]],
'joint_names': ['rank', 'rkne', 'rhip', 'lhip', 'lkne', 'lank', 'pelv', 'thrx', 'neck', 'head', 'rwri', 'relb', 'rsho', 'lsho', 'lelb', 'lwri'],
}
CONFIG['h36mltri_17'] = {
'kintree': [(0, 1), (1, 2), (2, 6), (5, 4), (4, 3), (3, 6), (6, 7), (7, 8), (8, 16), (9, 16), (8, 12), (11, 12), (10, 11), (8, 13), (13, 14), (14, 15)],
'color': ['r', 'r', 'r', 'g', 'g', 'g', 'k', 'k', 'k', 'k', 'g', 'g', 'g', 'r', 'r', 'r'],
'joint_names': CONFIG['mpii']['joint_names'] + ['Neck/Nose'],
'nJoints': 17}
def load_human(path, pid, nf=0, camnames=[], annot='annots'):
points = []
nvs = []
annot_ = annot
for nv, sub in enumerate(camnames):
annotname = join(path, annot_, sub, '{:06d}.json'.format(nf))
if not os.path.exists(annotname):
print('[Warn] Not exist ', annotname)
continue
annots = read_json(annotname)
if isinstance(annots, dict):
annots = annots['annots']
annot = [d for d in annots if d['personID'] == pid]
if len(annot) == 0:
continue
pts = np.array(annot[0]['keypoints'])
if args.hand:
handl = np.array(annot[0]['handl2d'])
handr = np.array(annot[0]['handr2d'])
pts = np.vstack([pts, handl, handr])
points.append(pts)
nvs.append(nv)
points = np.stack(points)
results = np.zeros((len(camnames), *points.shape[1:]))
results[nvs] = points
from easymocap.dataset.config import CONFIG
lines = CONFIG['body25']['kintree']
return results, lines | null |
13,327 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
def mywarn(text):
myprint(text, 'warn')
def load2d_ground(path, nf=0, camnames=[]):
k2ds = []
k3d = None
MAX_POINTS = 0
for cam in sorted(camnames):
annname = join(path, cam, '{:06d}.json'.format(nf))
if not os.path.exists(annname):
mywarn(annname + ' not exists')
data = read_json(annname)
k2d = np.array(data['keypoints2d'], dtype=np.float32)
k3d = np.array(data['keypoints3d'], dtype=np.float32)
if k2d.shape[0] > MAX_POINTS:
MAX_POINTS = k2d.shape[0]
k2ds.append(k2d)
for i, k2d in enumerate(k2ds):
if k2d.shape[0] < MAX_POINTS:
k2ds[i] = np.vstack([k2d, np.zeros((MAX_POINTS-k2d.shape[0], 3))])
k2ds = np.stack(k2ds)
conf = k2ds[:, :, 2].sum(axis=1)
if (conf>0).sum() < 2:
return False, None, None
return True, k2ds, k3d | null |
13,328 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
def read_match2d_file(file, camnames):
points = read_json(file)['points_global']
match2d = np.zeros((len(camnames), len(points), 3))
for npo in range(match2d.shape[1]):
for key, (x, y) in points[npo].items():
if key not in camnames:
continue
match2d[camnames.index(key), npo] = [x, y, 1.]
return True, match2d, np.zeros((match2d.shape[1], 3)) | null |
13,329 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
def check_calib(path, out, vis=False, show=False, debug=False):
if vis:
out_dir = join(out, 'check')
os.makedirs(out_dir, exist_ok=True)
cameras = read_camera(join(out, 'intri.yml'), join(out, 'extri.yml'))
cameras.pop('basenames')
total_sum, cnt = 0, 0
for nf in tqdm(range(10000)):
imgs = []
k2ds = []
for cam, camera in cameras.items():
if vis:
for ext in ['jpg', 'png']:
imgname = join(path, 'images', cam, '{:06d}.{}'.format(nf, ext))
if not os.path.exists(imgname):
continue
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
img = Undistort.image(img, camera['K'], camera['dist'])
imgs.append(img)
annname = join(path, 'chessboard', cam, '{:06d}.json'.format(nf))
if not os.path.exists(annname):
break
data = read_json(annname)
k2d = np.array(data['keypoints2d'], dtype=np.float32)
k2d = Undistort.points(k2d, camera['K'], camera['dist'])
k2ds.append(k2d)
if len(k2ds) == 0:
break
Pall = np.stack([camera['P'] for camera in cameras.values()])
k2ds = np.stack(k2ds)
k3d = batch_triangulate(k2ds, Pall)
kpts_repro = projectN3(k3d, Pall)
for nv in range(len(k2ds)):
conf = k2ds[nv][:, -1]
dist = conf * np.linalg.norm(kpts_repro[nv][:, :2] - k2ds[nv][:, :2], axis=1)
total_sum += dist.sum()
cnt += conf.sum()
if debug:
print('{:2d}-{:2d}: {:6.2f}/{:2d}'.format(nf, nv, dist.sum(), int(conf.sum())))
if vis:
kpts_repro_vis = np.hstack((kpts_repro[nv][:, :2], conf[:, None]))
plot_points2d(imgs[nv], kpts_repro_vis, [], col=(0, 0, 255), lw=1, putText=False)
plot_points2d(imgs[nv], k2ds[nv], [], lw=1, putText=False)
for i in range(kpts_repro_vis.shape[0]):
cv2.line(imgs[nv], kpts_repro_vis[i], k2ds[nv][i], (0,0,0), thickness=1)
if show:
cv2.imshow('vis', imgs[nv])
cv2.waitKey(0)
if vis:
imgout = merge(imgs, resize=False)
outname = join(out, 'check', '{:06d}.jpg'.format(nf))
cv2.imwrite(outname, imgout)
print('{:.2f}/{} = {:.2f} pixel'.format(total_sum, int(cnt), total_sum/cnt)) | null |
13,330 | from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2
import numpy as np
import os
from os.path import join
from easymocap.mytools import read_json, merge
from easymocap.mytools import read_camera, plot_points2d
from easymocap.mytools import batch_triangulate, projectN3, Undistort
from tqdm import tqdm
def check_match(path, out):
os.makedirs(out, exist_ok=True)
cameras = read_camera(join(path, 'intri.yml'), join(path, 'extri.yml'))
cams = cameras.pop('basenames')
annots = read_json(join(path, 'calib.json'))
points_global = annots['points_global']
points3d = np.ones((len(points_global), 4))
# first triangulate
points2d = np.zeros((len(cams), len(points_global), 3))
for i, record in enumerate(points_global):
for cam, (x, y) in record.items():
points2d[cams.index(cam), i] = (x, y, 1)
# 2. undistort
for nv in range(points2d.shape[0]):
camera = cameras[cams[nv]]
points2d[nv] = Undistort.points(points2d[nv], camera['K'], camera['dist'])
Pall = np.stack([cameras[cam]['P'] for cam in cams])
points3d = batch_triangulate(points2d, Pall)
lines = []
nf = 0
for cam, camera in cameras.items():
imgname = join(path, 'images', cam, '{:06d}.jpg'.format(nf))
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
img = Undistort.image(img, camera['K'], camera['dist'])
kpts_repro = projectN3(points3d, camera['P'][None, :, :])[0]
plot_points2d(img, kpts_repro, lines, col=(0, 0, 255), lw=1, putText=True)
plot_points2d(img, points2d[cams.index(cam)], lines, col=(0, 255, 0), lw=1, putText=True)
for i in range(kpts_repro_vis.shape[0]):
cv2.line(imgs[nv], kpts_repro[i], points2d[cams.index(cam)][i], (0,0,0), thickness=1)
outname = join(out, cam+'.jpg')
cv2.imwrite(outname, img) | null |
13,331 | import open3d as o3d
import os
import cv2
import numpy as np
from easymocap.mytools.camera_utils import read_cameras
from easymocap.visualize.o3dwrapper import Vector3dVector, create_pcd
from easymocap.mytools.vis_base import generate_colorbar
def transform_cameras(cameras):
dims = {'x': 0, 'y': 1, 'z': 2}
R_global = np.eye(3)
T_global = np.zeros((3, 1))
# order: trans0, rot, trans
if len(args.trans0) == 3:
trans = np.array(args.trans0).reshape(3, 1)
T_global += trans
if len(args.rot) > 0:
for i in range(len(args.rot)//2):
dim = args.rot[2*i]
val = float(args.rot[2*i+1])
rvec = np.zeros((3,))
rvec[dims[dim]] = np.deg2rad(val)
R = cv2.Rodrigues(rvec)[0]
R_global = R @ R_global
T_global = R_global @ T_global
# 平移相机
if len(args.trans) == 3:
trans = np.array(args.trans).reshape(3, 1)
T_global += trans
trans = np.eye(4)
trans[:3, :3] = R_global
trans[:3, 3:] = T_global
# apply the transformation of each camera
for key, cam in cameras.items():
RT = np.eye(4)
RT[:3, :3] = cam['R']
RT[:3, 3:] = cam['T']
RT = RT @ np.linalg.inv(trans)
cam.pop('Rvec', '')
cam['R'] = RT[:3, :3]
cam['T'] = RT[:3, 3:]
return cameras, trans | null |
13,332 | import shutil
import random
from easymocap.mytools.debug_utils import log, mywarn
from easymocap.mytools.vis_base import plot_points2d
from easymocap.mytools import write_intri, read_json, Timer
import numpy as np
import cv2
import os
from os.path import join
from glob import glob
from easymocap.annotator.chessboard import get_lines_chessboard
from tqdm import tqdm
def load_chessboards(chessnames, imagenames, max_image, sample_image=-1, out='debug-calib'):
def calib_intri_share(path, image, ext):
camnames = sorted(os.listdir(join(path, image)))
camnames = [cam for cam in camnames if os.path.isdir(join(path, image, cam))]
imagenames = sorted(glob(join(path, image, '*', '*' + ext)))
chessnames = sorted(glob(join(path, 'chessboard', '*', '*.json')))
k3ds_, k2ds_ = load_chessboards(chessnames, imagenames, args.num, args.sample, out=join(args.path, 'output'))
with Timer('calibrate'):
print('[Info] start calibration with {} detections'.format(len(k2ds_)))
gray = cv2.imread(imagenames[0], 0)
k3ds = k3ds_
k2ds = [np.ascontiguousarray(k2d[:, :-1]) for k2d in k2ds_]
ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
k3ds, k2ds, gray.shape[::-1], None, None,
flags=cv2.CALIB_FIX_K3)
cameras = {}
for cam in camnames:
cameras[cam] = {
'K': K,
'dist': dist # dist: (1, 5)
}
if True:
img = cv2.imread(imagenames[0])
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(K, dist, (w,h), 1, (w,h))
mapx, mapy = cv2.initUndistortRectifyMap(K, dist, None, newcameramtx, (w,h), 5)
for imgname in tqdm(imagenames):
img = cv2.imread(imgname)
dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
outname = join(path, 'output', os.path.basename(imgname))
cv2.imwrite(outname, dst)
write_intri(join(path, 'output', 'intri.yml'), cameras) | null |
13,333 | import shutil
import random
from easymocap.mytools.debug_utils import log, mywarn
from easymocap.mytools.vis_base import plot_points2d
from easymocap.mytools import write_intri, read_json, Timer
import numpy as np
import cv2
import os
from os.path import join
from glob import glob
from easymocap.annotator.chessboard import get_lines_chessboard
from tqdm import tqdm
def load_chessboards(chessnames, imagenames, max_image, sample_image=-1, out='debug-calib'):
os.makedirs(out, exist_ok=True)
k3ds_, k2ds_, imgs = [], [], []
valid_idx = []
for i, chessname in enumerate(tqdm(chessnames, desc='read')):
flag, k2d, k3d = read_chess(chessname)
if not flag:
continue
k3ds_.append(k3d)
k2ds_.append(k2d)
valid_idx.append(i)
if max_image > 0 and len(valid_idx) > max_image + int(max_image * 0.1):
pop(k2ds_, k3ds_, valid_idx, imagenames, max_num=max_image)
if sample_image > 0:
mywarn('[calibration] Load {} images, sample {} images'.format(len(k3ds_), sample_image))
index = [i for i in range(len(k2ds_))]
index_sample = random.sample(index, min(sample_image, len(index)))
valid_idx = [valid_idx[i] for i in index_sample]
k2ds_ = [k2ds_[i] for i in index_sample]
k3ds_ = [k3ds_[i] for i in index_sample]
else:
log('[calibration] Load {} images'.format(len(k3ds_)))
for ii, idx in enumerate(valid_idx):
shutil.copyfile(imagenames[idx], join(out, '{:06d}.jpg'.format(ii)))
return k3ds_, k2ds_
def calib_intri(path, image, ext):
camnames = sorted(os.listdir(join(path, image)))
camnames = [cam for cam in camnames if os.path.isdir(join(path, image, cam))]
cameras = {}
for ic, cam in enumerate(camnames):
imagenames = sorted(glob(join(path, image, cam, '*'+ext)))
chessnames = sorted(glob(join(path, 'chessboard', cam, '*.json')))
k3ds_, k2ds_ = load_chessboards(chessnames, imagenames, args.num, out=join(args.path, 'output', cam+'_used'))
k3ds = k3ds_
k2ds = [np.ascontiguousarray(k2d[:, :-1]) for k2d in k2ds_]
gray = cv2.imread(imagenames[0], 0)
print('>> Camera {}: {:3d} frames'.format(cam, len(k2ds)))
with Timer('calibrate'):
ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
k3ds, k2ds, gray.shape[::-1], None, None,
flags=cv2.CALIB_FIX_K3)
cameras[cam] = {
'K': K,
'dist': dist # dist: (1, 5)
}
write_intri(join(path, 'output', 'intri.yml'), cameras) | null |
13,334 | from easymocap.mytools.camera_utils import write_intri
import os
from glob import glob
from os.path import join
import numpy as np
import cv2
from easymocap.mytools import read_intri, write_extri, read_json
from easymocap.mytools.debug_utils import mywarn
def init_intri(path, image):
camnames = sorted(os.listdir(join(path, image)))
cameras = {}
for ic, cam in enumerate(camnames):
imagenames = sorted(glob(join(path, image, cam, '*.jpg')))
assert len(imagenames) > 0
imgname = imagenames[0]
img = cv2.imread(imgname)
height, width = img.shape[0], img.shape[1]
focal = 1.2*max(height, width) # as colmap
K = np.array([focal, 0., width/2, 0., focal, height/2, 0. ,0., 1.]).reshape(3, 3)
dist = np.zeros((1, 5))
cameras[cam] = {
'K': K,
'dist': dist
}
return cameras
def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
k2d = np.ascontiguousarray(k2d[:, :2])
# try different initial values:
if tryextri:
def closure(rvec, tvec):
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, K, dist, rvec, tvec, True, flags=flag)
points2d_repro, xxx = cv2.projectPoints(k3d, rvec, tvec, K, dist)
kpts_repro = points2d_repro.squeeze()
err = np.linalg.norm(points2d_repro.squeeze() - k2d, axis=1).mean()
return err, rvec, tvec, kpts_repro
# create a series of extrinsic parameters looking at the origin
height_guess = 2.1
radius_guess = 7.
infos = []
for theta in np.linspace(0, 2*np.pi, 180):
st = np.sin(theta)
ct = np.cos(theta)
center = np.array([radius_guess*ct, radius_guess*st, height_guess]).reshape(3, 1)
R = np.array([
[-st, ct, 0],
[0, 0, -1],
[-ct, -st, 0]
])
tvec = - R @ center
rvec = cv2.Rodrigues(R)[0]
err, rvec, tvec, kpts_repro = closure(rvec, tvec)
infos.append({
'err': err,
'repro': kpts_repro,
'rvec': rvec,
'tvec': tvec
})
infos.sort(key=lambda x:x['err'])
err, rvec, tvec, kpts_repro = infos[0]['err'], infos[0]['rvec'], infos[0]['tvec'], infos[0]['repro']
else:
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, K, dist, flags=flag)
points2d_repro, xxx = cv2.projectPoints(k3d, rvec, tvec, K, dist)
kpts_repro = points2d_repro.squeeze()
err = np.linalg.norm(points2d_repro.squeeze() - k2d, axis=1).mean()
# print(err)
return err, rvec, tvec, kpts_repro
def write_intri(intri_name, cameras):
if not os.path.exists(os.path.dirname(intri_name)):
os.makedirs(os.path.dirname(intri_name))
intri = FileStorage(intri_name, True)
results = {}
camnames = list(cameras.keys())
intri.write('names', camnames, 'list')
for key_, val in cameras.items():
key = key_.split('.')[0]
K, dist = val['K'], val['dist']
assert K.shape == (3, 3), K.shape
assert dist.shape == (1, 5) or dist.shape == (5, 1) or dist.shape == (1, 4) or dist.shape == (4, 1), dist.shape
intri.write('K_{}'.format(key), K)
intri.write('dist_{}'.format(key), dist.flatten()[None])
def mywarn(text):
myprint(text, 'warn')
def calib_extri(path, image, intriname, image_id):
camnames = sorted(os.listdir(join(path, image)))
camnames = [c for c in camnames if os.path.isdir(join(path, image, c))]
if intriname is None:
# initialize intrinsic parameters
intri = init_intri(path, image)
else:
assert os.path.exists(intriname), intriname
intri = read_intri(intriname)
if len(intri.keys()) == 1:
key0 = list(intri.keys())[0]
for cam in camnames:
intri[cam] = intri[key0].copy()
extri = {}
# methods = [cv2.SOLVEPNP_ITERATIVE, cv2.SOLVEPNP_P3P, cv2.SOLVEPNP_AP3P, cv2.SOLVEPNP_EPNP, cv2.SOLVEPNP_DLS, cv2.SOLVEPNP_IPPE, cv2.SOLVEPNP_SQPNP]
methods = [cv2.SOLVEPNP_ITERATIVE]
for ic, cam in enumerate(camnames):
imagenames = sorted(glob(join(path, image, cam, '*{}'.format(args.ext))))
chessnames = sorted(glob(join(path, 'chessboard', cam, '*.json')))
# chessname = chessnames[0]
assert len(chessnames) > 0, cam
chessname = chessnames[image_id]
data = read_json(chessname)
k3d = np.array(data['keypoints3d'], dtype=np.float32)
k2d = np.array(data['keypoints2d'], dtype=np.float32)
if k3d.shape[0] != k2d.shape[0]:
mywarn('k3d {} doesnot match k2d {}'.format(k3d.shape, k2d.shape))
length = min(k3d.shape[0], k2d.shape[0])
k3d = k3d[:length]
k2d = k2d[:length]
#k3d[:, 0] *= -1
valididx = k2d[:, 2] > 0
if valididx.sum() < 4:
extri[cam] = {}
rvec = np.zeros((1, 3))
tvec = np.zeros((3, 1))
extri[cam]['Rvec'] = rvec
extri[cam]['R'] = cv2.Rodrigues(rvec)[0]
extri[cam]['T'] = tvec
print('[ERROR] Failed to initialize the extrinsic parameters')
extri.pop(cam)
continue
k3d = k3d[valididx]
k2d = k2d[valididx]
if args.tryfocal:
infos = []
for focal in range(500, 5000, 10):
dist = intri[cam]['dist']
K = intri[cam]['K']
K[0, 0] = focal
K[1, 1] = focal
for flag in methods:
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag)
infos.append({
'focal': focal,
'err': err,
'repro': kpts_repro,
'rvec': rvec,
'tvec': tvec
})
infos.sort(key=lambda x:x['err'])
err, rvec, tvec = infos[0]['err'], infos[0]['rvec'], infos[0]['tvec']
kpts_repro = infos[0]['repro']
focal = infos[0]['focal']
intri[cam]['K'][0, 0] = focal
intri[cam]['K'][1, 1] = focal
else:
K, dist = intri[cam]['K'], intri[cam]['dist']
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag=cv2.SOLVEPNP_ITERATIVE)
extri[cam] = {}
extri[cam]['Rvec'] = rvec
extri[cam]['R'] = cv2.Rodrigues(rvec)[0]
extri[cam]['T'] = tvec
center = - extri[cam]['R'].T @ tvec
print('{} center => {}, err = {:.3f}'.format(cam, center.squeeze(), err))
write_intri(join(path, 'intri.yml'), intri)
write_extri(join(path, 'extri.yml'), extri) | null |
13,335 | import join
from easymocap.annotator.file_utils import save_json
from easymocap.mytools.debug_utils import myerror, run_cmd, mywarn, log
from easymocap.mytools.camera_utils import read_cameras, write_camera
from easymocap.mytools import read_json
from easymocap.mytools import batch_triangulate, projectN3, Undistort
import numpy as np
import cv2
from apps.calibration.calib_extri import solvePnP
def guess_ground(pcdname):
pcd = o3d.io.read_point_cloud(pcdname) | null |
13,336 | os.path import join
from easymocap.annotator.file_utils import save_json
from easymocap.mytools.debug_utils import myerror, run_cmd, mywarn, log
from easymocap.mytools.camera_utils import read_cameras, write_camera
from easymocap.mytools import read_json
from easymocap.mytools import batch_triangulate, projectN3, Undistort
import numpy as np
import cv2
from apps.calibration.calib_extri import solvePnP
def compute_rel(R_src, T_src, R_tgt, T_tgt):
R_rel = R_src.T @ R_tgt
T_rel = R_src.T @ (T_tgt - T_src)
return R_rel, T_rel
def triangulate(cameras, areas):
Ps, k2ds = [], []
for cam, _, k2d, k3d in areas:
k2d = Undistort.points(k2d, cameras[cam]['K'], cameras[cam]['dist'])
P = cameras[cam]['K'] @ np.hstack([cameras[cam]['R'], cameras[cam]['T']])
Ps.append(P)
k2ds.append(k2d)
Ps = np.stack(Ps)
k2ds = np.stack(k2ds)
k3d = batch_triangulate(k2ds, Ps)
return k3d
def best_fit_transform(A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m-1,:] *= -1
R = np.dot(Vt.T, U.T)
# translation
t = centroid_B.T - np.dot(R,centroid_A.T)
return R, t
def log(text):
myprint(text, 'info')
def myerror(text):
myprint(text, 'error')
def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
k2d = np.ascontiguousarray(k2d[:, :2])
# try different initial values:
if tryextri:
def closure(rvec, tvec):
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, K, dist, rvec, tvec, True, flags=flag)
points2d_repro, xxx = cv2.projectPoints(k3d, rvec, tvec, K, dist)
kpts_repro = points2d_repro.squeeze()
err = np.linalg.norm(points2d_repro.squeeze() - k2d, axis=1).mean()
return err, rvec, tvec, kpts_repro
# create a series of extrinsic parameters looking at the origin
height_guess = 2.1
radius_guess = 7.
infos = []
for theta in np.linspace(0, 2*np.pi, 180):
st = np.sin(theta)
ct = np.cos(theta)
center = np.array([radius_guess*ct, radius_guess*st, height_guess]).reshape(3, 1)
R = np.array([
[-st, ct, 0],
[0, 0, -1],
[-ct, -st, 0]
])
tvec = - R @ center
rvec = cv2.Rodrigues(R)[0]
err, rvec, tvec, kpts_repro = closure(rvec, tvec)
infos.append({
'err': err,
'repro': kpts_repro,
'rvec': rvec,
'tvec': tvec
})
infos.sort(key=lambda x:x['err'])
err, rvec, tvec, kpts_repro = infos[0]['err'], infos[0]['rvec'], infos[0]['tvec'], infos[0]['repro']
else:
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, K, dist, flags=flag)
points2d_repro, xxx = cv2.projectPoints(k3d, rvec, tvec, K, dist)
kpts_repro = points2d_repro.squeeze()
err = np.linalg.norm(points2d_repro.squeeze() - k2d, axis=1).mean()
# print(err)
return err, rvec, tvec, kpts_repro
def align_by_chessboard(cameras, path):
camnames = sorted(os.listdir(join(path, 'chessboard')))
areas = []
for ic, cam in enumerate(camnames):
imagename = join(path, 'images', cam, '000000.jpg')
chessname = join(path, 'chessboard', cam, '000000.json')
data = read_json(chessname)
k3d = np.array(data['keypoints3d'], dtype=np.float32)
k2d = np.array(data['keypoints2d'], dtype=np.float32)
# TODO
# pattern = (11, 8)
if 'pattern' in data.keys():
pattern = data['pattern']
else:
pattern = None
img = cv2.imread(imagename)
if args.scale2d is not None:
k2d[:, :2] *= args.scale2d
img = cv2.resize(img, None, fx=args.scale2d, fy=args.scale2d)
if args.origin is not None:
cameras[args.prefix+cam] = cameras.pop(args.origin+cam.replace('VID_', '0000'))
cam = args.prefix + cam
if cam not in cameras.keys():
myerror('camera {} not found in {}'.format(cam, cameras.keys()))
continue
cameras[cam]['shape'] = img.shape[:2]
if k2d[:, -1].sum() < 1:
continue
# calculate the area of the chessboard
mask = np.zeros_like(img[:, :, 0])
k2d_int = np.round(k2d[:, :2]).astype(int)
if pattern is not None:
cv2.fillPoly(mask, [k2d_int[[0, pattern[0]-1, -1, -pattern[0]]]], 1)
else:
cv2.fillPoly(mask, [k2d_int[[0, 1, 2, 3, 0]]], 1)
area = mask.sum()
print(cam, area)
areas.append([cam, area, k2d, k3d])
areas.sort(key=lambda x: -x[1])
best_cam, area, k2d, k3d = areas[0]
# 先解决尺度问题
ref_point_id = np.linalg.norm(k3d - k3d[:1], axis=-1).argmax()
k3d_pre = triangulate(cameras, areas)
length_gt = np.linalg.norm(k3d[0, :3] - k3d[ref_point_id, :3])
length = np.linalg.norm(k3d_pre[0, :3] - k3d_pre[ref_point_id, :3])
log('gt diag={:.3f}, est diag={:.3f}, scale={:.3f}'.format(length_gt, length, length_gt/length))
scale_colmap = length_gt / length
for cam, camera in cameras.items():
camera['T'] *= scale_colmap
k3d_pre = triangulate(cameras, areas)
length = np.linalg.norm(k3d_pre[0, :3] - k3d_pre[-1, :3])
log('gt diag={:.3f}, est diag={:.3f}, scale={:.3f}'.format(length_gt, length, length_gt/length))
# 计算相机相对于棋盘格的RT
if False:
for cam, _, k2d, k3d in areas:
K, dist = cameras[cam]['K'], cameras[cam]['dist']
R, T = cameras[cam]['R'], cameras[cam]['T']
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag=cv2.SOLVEPNP_ITERATIVE)
# 不同视角的计算的相对变换应该是一致的
R_tgt = cv2.Rodrigues(rvec)[0]
T_tgt = tvec.reshape(3, 1)
R_rel, T_rel = compute_rel(R, T, R_tgt, T_tgt)
break
else:
# 使用估计的棋盘格坐标与实际的棋盘格坐标
X = k3d_pre[:, :3]
X_gt = k3d[:, :3]
R_rel, T_rel = best_fit_transform(X_gt, X)
# 从棋盘格坐标系映射到colmap坐标系
T_rel = T_rel.reshape(3, 1)
centers = []
for cam, camera in cameras.items():
camera.pop('Rvec')
R_old, T_old = camera['R'], camera['T']
R_new = R_old @ R_rel
T_new = T_old + R_old @ T_rel
camera['R'] = R_new
camera['T'] = T_new
center = - camera['R'].T @ camera['T']
centers.append(center)
print('{}: ({:6.3f}, {:.3f}, {:.3f})'.format(cam, *np.round(center.T[0], 3)))
# 使用棋盘格估计一下尺度
k3d_pre = triangulate(cameras, areas)
length = np.linalg.norm(k3d_pre[0, :3] - k3d_pre[ref_point_id, :3])
log('{} {} {}'.format(length_gt, length, length_gt/length))
log(k3d_pre)
transform = np.eye(4)
transform[:3, :3] = R_rel
transform[:3, 3:] = T_rel
return cameras, scale_colmap, np.linalg.inv(transform) | null |
13,337 | from easymocap.annotator.file_utils import getFileList, read_json, save_json
from easymocap.mytools.debug_utils import mywarn
from tqdm import tqdm
from easymocap.annotator import ImageFolder
from easymocap.annotator.chessboard import findChessboardCorners
import numpy as np
from os.path import join
import cv2
import os
import func_timeout
import threading
from easymocap.mytools.debug_utils import log
def create_chessboard(path, image, pattern, gridSize, ext, overwrite=True):
print('Create chessboard {}'.format(pattern))
keypoints3d = getChessboard3d(pattern, gridSize=gridSize, axis=args.axis)
keypoints2d = np.zeros((keypoints3d.shape[0], 3))
imgnames = getFileList(join(path, image), ext=ext)
template = {
'keypoints3d': keypoints3d.tolist(),
'keypoints2d': keypoints2d.tolist(),
'pattern': pattern,
'grid_size': gridSize,
'visited': False
}
for imgname in tqdm(imgnames, desc='create template chessboard'):
annname = imgname.replace(ext, '.json')
annname = join(path, 'chessboard', annname)
if os.path.exists(annname) and overwrite:
# 覆盖keypoints3d
data = read_json(annname)
data['keypoints3d'] = template['keypoints3d']
save_json(annname, data)
elif os.path.exists(annname) and not overwrite:
continue
else:
save_json(annname, template)
def _detect_chessboard(datas, path, image, out, pattern):
for imgname, annotname in datas:
# imgname, annotname = dataset[i]
# detect the 2d chessboard
img = cv2.imread(imgname)
annots = read_json(annotname)
try:
show = findChessboardCorners(img, annots, pattern)
except func_timeout.exceptions.FunctionTimedOut:
show = None
save_json(annotname, annots)
if show is None:
mywarn('[Info] Cannot find chessboard in {}'.format(imgname))
continue
outname = join(out, imgname.replace(path + '/{}/'.format(image), ''))
os.makedirs(os.path.dirname(outname), exist_ok=True)
if isinstance(show, np.ndarray):
cv2.imwrite(outname, show)
def detect_chessboard(path, image, out, pattern, gridSize, args):
create_chessboard(path, image, pattern, gridSize, ext=args.ext, overwrite=args.overwrite3d)
dataset = ImageFolder(path, image=image, annot='chessboard', ext=args.ext)
dataset.isTmp = False
trange = list(range(len(dataset)))
threads = []
for i in range(args.mp):
ranges = trange[i::args.mp]
datas = [dataset[t] for t in ranges]
thread = threading.Thread(target=_detect_chessboard, args=(datas, path, image, out, pattern)) # 应该不存在任何数据竞争
thread.start()
threads.append(thread)
for thread in threads:
thread.join() | null |
13,338 | from easymocap.annotator.file_utils import getFileList, read_json, save_json
from easymocap.mytools.debug_utils import mywarn
from tqdm import tqdm
from easymocap.annotator import ImageFolder
from easymocap.annotator.chessboard import findChessboardCorners
import numpy as np
from os.path import join
import cv2
import os
import func_timeout
import threading
from easymocap.mytools.debug_utils import log
def create_chessboard(path, image, pattern, gridSize, ext, overwrite=True):
print('Create chessboard {}'.format(pattern))
keypoints3d = getChessboard3d(pattern, gridSize=gridSize, axis=args.axis)
keypoints2d = np.zeros((keypoints3d.shape[0], 3))
imgnames = getFileList(join(path, image), ext=ext)
template = {
'keypoints3d': keypoints3d.tolist(),
'keypoints2d': keypoints2d.tolist(),
'pattern': pattern,
'grid_size': gridSize,
'visited': False
}
for imgname in tqdm(imgnames, desc='create template chessboard'):
annname = imgname.replace(ext, '.json')
annname = join(path, 'chessboard', annname)
if os.path.exists(annname) and overwrite:
# 覆盖keypoints3d
data = read_json(annname)
data['keypoints3d'] = template['keypoints3d']
save_json(annname, data)
elif os.path.exists(annname) and not overwrite:
continue
else:
save_json(annname, template)
def _detect_by_search(path, image, out, pattern, sub):
dataset = ImageFolder(path, sub=sub, annot='chessboard', ext=args.ext)
dataset.isTmp = False
nFrames = len(dataset)
found = np.zeros(nFrames, dtype=bool)
visited = np.zeros(nFrames, dtype=bool)
proposals = []
init_step = args.max_step
min_step = args.min_step
for nf in range(0, nFrames, init_step):
if nf + init_step < len(dataset):
proposals.append([nf, nf+init_step])
while len(proposals) > 0:
left, right = proposals.pop(0)
print('[detect] {} {:4.1f}% Check [{:5d}, {:5d}]'.format(
sub, visited.sum()/visited.shape[0]*100, left, right), end=' ')
for nf in [left, right]:
if not visited[nf]:
visited[nf] = True
imgname, annotname = dataset[nf]
# detect the 2d chessboard
img = cv2.imread(imgname)
annots = read_json(annotname)
try:
show = findChessboardCorners(img, annots, pattern)
except func_timeout.exceptions.FunctionTimedOut:
show = None
save_json(annotname, annots)
if show is None:
if args.debug:
print('[Info] Cannot find chessboard in {}'.format(imgname))
found[nf] = False
continue
found[nf] = True
outname = join(out, imgname.replace(path + '{}{}{}'.format(os.sep, image, os.sep), ''))
os.makedirs(os.path.dirname(outname), exist_ok=True)
if isinstance(show, np.ndarray):
cv2.imwrite(outname, show)
print('{}-{}'.format('o' if found[left] else 'x', 'o' if found[right] else 'x'))
if not found[left] and not found[right]:
visited[left:right] = True
continue
mid = (left+right)//2
if mid == left or mid == right:
continue
if mid - left > min_step:
proposals.append((left, mid))
if right - mid > min_step:
proposals.append((mid, right))
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def log(text):
myprint(text, 'info')
def detect_chessboard_sequence(path, image, out, pattern, gridSize, args):
create_chessboard(path, image, pattern, gridSize, ext=args.ext, overwrite=args.overwrite3d)
subs = sorted(os.listdir(join(path, image)))
subs = [s for s in subs if os.path.isdir(join(path, image, s))]
if len(subs) == 0:
subs = [None]
from multiprocessing import Process
tasks = []
for sub in subs:
task = Process(target=_detect_by_search, args=(path, image, out, pattern, sub))
task.start()
tasks.append(task)
for task in tasks:
task.join()
for sub in subs:
dataset = ImageFolder(path, sub=sub, annot='chessboard', ext=args.ext)
dataset.isTmp = False
count, visited = 0, 0
for nf in range(len(dataset)):
imgname, annotname = dataset[nf]
# detect the 2d chessboard
annots = read_json(annotname)
if annots['visited']:
visited += 1
if annots['keypoints2d'][0][-1] > 0.01:
count += 1
log('{}: found {:4d}/{:4d} frames'.format(sub, count, visited)) | null |
13,339 | from easymocap.annotator.file_utils import getFileList, read_json, save_json
from easymocap.mytools.debug_utils import mywarn
from tqdm import tqdm
from easymocap.annotator import ImageFolder
from easymocap.annotator.chessboard import findChessboardCorners
import numpy as np
from os.path import join
import cv2
import os
import func_timeout
import threading
from easymocap.mytools.debug_utils import log
def mywarn(text):
myprint(text, 'warn')
def check_chessboard(path, out):
subs_notvalid = []
for sub in sorted(os.listdir(join(path, 'images'))):
if os.path.exists(join(out, sub)):
continue
subs_notvalid.append(sub)
print(subs_notvalid)
mywarn('Cannot find chessboard in view {}'.format(subs_notvalid))
mywarn('Please annot them manually:')
mywarn(f'python3 apps/annotation/annot_calib.py {path} --mode chessboard --annot chessboard --sub {" ".join(subs_notvalid)}') | null |
13,340 | import os
from os.path import join
import shutil
from easymocap.mytools.debug_utils import log, myerror, mywarn, run_cmd, mkdir
from easymocap.mytools.colmap_wrapper import colmap_feature_extract, colmap_feature_match
from tqdm import tqdm
def copy_images(data, out, nf=0):
subs = sorted(os.listdir(data))
image_names = []
for sub in subs:
srcname = join(data, sub, '{:06d}.jpg'.format(nf))
dstname = join(out, '{}.jpg'.format(sub))
os.makedirs(os.path.dirname(dstname), exist_ok=True)
shutil.copyfile(srcname, dstname)
image_names.append(dstname)
return image_names
def log(text):
myprint(text, 'info')
def copy_to_newdir(path, out, num):
statics = copy_images(join(path, 'images'), join(out, 'images', 'static'), nf=0)
scannames = sorted(os.listdir(join(path, 'scan')))
if num != -1:
log('[copy] sample {} from {} images'.format(num, len(scannames)))
scannames = scannames[::len(scannames)//num]
scans = []
for name in tqdm(scannames):
srcname = join(path, 'scan', name)
dstname = join(out, 'images', 'scan', name)
os.makedirs(os.path.dirname(dstname), exist_ok=True)
shutil.copyfile(srcname, dstname)
scans.append(dstname)
return statics, scans | null |
13,341 | import os
from os.path import join
import shutil
from easymocap.mytools.debug_utils import log, myerror, mywarn, run_cmd, mkdir
from easymocap.mytools.colmap_wrapper import colmap_feature_extract, colmap_feature_match
from tqdm import tqdm
def run_cmd(cmd, verbo=True, bg=False):
if verbo: myprint('[run] ' + cmd, 'run')
if bg:
args = cmd.split()
print(args)
p = subprocess.Popen(args)
return [p]
else:
os.system(cmd)
return []
def mkdir(path):
if os.path.exists(path):
return 0
log('mkdir {}'.format(path))
os.makedirs(path, exist_ok=True)
def colmap_feature_extract(colmap, path, share_camera, add_mask, gpu=False,
share_camera_per_folder=False):
'''
struct SiftMatchingOptions {
// Number of threads for feature matching and geometric verification.
int num_threads = -1;
// Whether to use the GPU for feature matching.
bool use_gpu = true;
// Index of the GPU used for feature matching. For multi-GPU matching,
// you should separate multiple GPU indices by comma, e.g., "0,1,2,3".
std::string gpu_index = "-1";
// Maximum distance ratio between first and second best match.
double max_ratio = 0.8;
// Maximum distance to best match.
double max_distance = 0.7;
// Whether to enable cross checking in matching.
bool cross_check = true;
// Maximum number of matches.
int max_num_matches = 32768;
// Maximum epipolar error in pixels for geometric verification.
double max_error = 4.0;
// Confidence threshold for geometric verification.
double confidence = 0.999;
// Minimum/maximum number of RANSAC iterations. Note that this option
// overrules the min_inlier_ratio option.
int min_num_trials = 100;
int max_num_trials = 10000;
// A priori assumed minimum inlier ratio, which determines the maximum
// number of iterations.
double min_inlier_ratio = 0.25;
// Minimum number of inliers for an image pair to be considered as
// geometrically verified.
int min_num_inliers = 15;
// Whether to attempt to estimate multiple geometric models per image pair.
bool multiple_models = false;
// Whether to perform guided matching, if geometric verification succeeds.
bool guided_matching = false;
bool Check() const;
};
'''
cmd = f'{colmap} feature_extractor --database_path {path}/database.db \
--image_path {path}/images \
--SiftExtraction.peak_threshold 0.006 \
--ImageReader.camera_model OPENCV \
'
if share_camera and not share_camera_per_folder:
cmd += ' --ImageReader.single_camera 1'
elif share_camera_per_folder:
cmd += ' --ImageReader.single_camera_per_folder 1'
if gpu:
cmd += ' --SiftExtraction.use_gpu 1'
cmd += ' --SiftExtraction.gpu_index 0'
if add_mask:
cmd += f' --ImageReader.mask_path {path}/mask'
cmd += f' >> {path}/log.txt'
run_cmd(cmd)
def colmap_feature_match(colmap, path, gpu=False):
cmd = f'{colmap} exhaustive_matcher --database_path {path}/database.db \
--SiftMatching.guided_matching 0 \
--SiftMatching.max_ratio 0.8 \
--SiftMatching.max_distance 0.5 \
--SiftMatching.cross_check 1 \
--SiftMatching.max_error 4 \
--SiftMatching.max_num_matches 32768 \
--SiftMatching.confidence 0.9999 \
--SiftMatching.max_num_trials 10000 \
--SiftMatching.min_inlier_ratio 0.25 \
--SiftMatching.min_num_inliers 30'
if gpu:
cmd += ' --SiftMatching.use_gpu 1'
cmd += ' --SiftMatching.gpu_index 0'
cmd += f' >> {path}/log.txt'
run_cmd(cmd)
def sparse_recon(path, statics, scans, colmap):
colmap_feature_extract(colmap, path, share_camera=False, add_mask=False, gpu=args.gpu,
share_camera_per_folder=True)
colmap_feature_match(colmap, path, gpu=args.gpu)
mkdir(join(path, 'sparse'))
cmd = f'{colmap} mapper --database_path {path}/database.db --image_path {path}/images --output_path {path}/sparse \
--Mapper.ba_refine_principal_point 1 \
--Mapper.ba_global_max_num_iterations 1000 \
'
run_cmd(cmd) | null |
13,344 | import os
import sys
import collections
import numpy as np
import struct
import cv2
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec | null |
13,351 | from easymocap.config import Config, load_object
import open3d as o3d
from easymocap.visualize.o3dwrapper import Vector3dVector, create_mesh, create_coord
import numpy as np
Vector3dVector = o3d.utility.Vector3dVector
def update_vis(vis, mesh, body_model, params):
vertices = body_model(return_verts=True, return_tensor=False, **params)[0]
mesh.vertices = Vector3dVector(vertices)
vis.update_geometry(model)
vis.poll_events()
vis.update_renderer() | null |
13,352 | from easymocap.mytools.reader import read_smpl
import socket
import time
from easymocap.socket.base_client import BaseSocketClient
import os
def send_rand(client):
import numpy as np
N_person = 10
datas = []
for i in range(N_person):
transl = (np.random.rand(1, 3) - 0.5) * 3
kpts = np.random.rand(25, 4)
kpts[:, :3] += transl
data = {
'id': i,
'keypoints3d': kpts
}
datas.append(data)
for _ in range(1):
for i in range(N_person):
move = (np.random.rand(1, 3) - 0.5) * 0.1
datas[i]['keypoints3d'][:, :3] += move
client.send(datas)
time.sleep(0.005)
client.close() | null |
13,353 | from easymocap.mytools.reader import read_smpl
import socket
import time
from easymocap.socket.base_client import BaseSocketClient
import os
def read_keypoints3d(filename):
data = read_json(filename)
res_ = []
for d in data:
pid = d['id'] if 'id' in d.keys() else d['personID']
ret = {'id': pid, 'type': 'body25'}
for key in ['keypoints3d', 'handl3d', 'handr3d', 'face3d']:
if key not in d.keys():continue
pose3d = np.array(d[key], dtype=np.float32)
if pose3d.shape[1] == 3:
pose3d = np.hstack([pose3d, np.ones((pose3d.shape[0], 1))])
ret[key] = pose3d
res_.append(ret)
return res_
def read_smpl(filename):
datas = read_json(filename)
if isinstance(datas, dict):
datas = datas['annots']
outputs = []
for data in datas:
for key in ['Rh', 'Th', 'poses', 'handl', 'handr', 'shapes', 'expression', 'keypoints3d']:
if key in data.keys():
data[key] = np.array(data[key], dtype=np.float32)
# for smplx results
outputs.append(data)
return outputs
def send_dir(client, path, step):
from os.path import join
from glob import glob
from tqdm import tqdm
from easymocap.mytools.reader import read_keypoints3d
results = sorted(glob(join(path, '*.json')))
for result in tqdm(results[::step]):
if args.smpl:
data = read_smpl(result)
client.send_smpl(data)
else:
data = read_keypoints3d(result)
client.send(data)
time.sleep(0.005) | null |
13,354 | import Config, load_object
from tqdm import tqdm
def process(dataset, model, args):
ret_all = []
print('[Run] dataset has {} samples'.format(len(dataset)))
if args.num_workers == -1:
for i in tqdm(range(len(dataset)), desc='[Run]'):
data = dataset[i]
ret = model.at_step(data, i)
ret_all.append(ret)
else:
import torch
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=1, num_workers=args.num_workers, shuffle=False, collate_fn=lambda x:x, drop_last=False)
index = 0
for data in tqdm(dataloader, desc='[Run]'):
data = data[0]
ret = model.at_step(data, index)
if not args.skip_final:
ret_all.append(ret)
index += 1
if not args.skip_final:
ret_all = model.at_final(ret_all)
def load_cfg_from_file(cfg, args):
cfg = Config.load(cfg)
cfg_data = Config.load(cfg.data)
cfg_data.args.merge_from_other_cfg(cfg.data_opts)
cfg_data = update_data_by_args(cfg_data, args)
cfg_exp = Config.load(cfg.exp)
cfg_exp.args.merge_from_other_cfg(cfg.exp_opts)
update_exp_by_args(cfg_exp, args)
return cfg_data, cfg_exp
def load_cfg_from_cmd(args):
cfg_data = Config.load(args.data, args.opt_data)
cfg_data = update_data_by_args(cfg_data, args)
cfg_exp = Config.load(args.exp, args.opt_exp)
update_exp_by_args(cfg_exp, args)
return cfg_data, cfg_exp
def main_entrypoint():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default=None)
for name in ['data', 'exp']:
parser.add_argument('--{}'.format(name), type=str, required=False)
parser.add_argument('--opt_{}'.format(name), type=str, nargs='+', default=[])
parser.add_argument('--root', type=str, default=None)
parser.add_argument('--subs', type=str, default=None, nargs='+')
parser.add_argument('--subs_vis', type=str, default=None, nargs='+')
parser.add_argument('--ranges', type=int, default=None, nargs=3)
parser.add_argument('--cameras', type=str, default=None, help='Camera file path')
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--num_workers', type=int, default=-1)
parser.add_argument('--skip_vis', action='store_true')
parser.add_argument('--skip_vis_step', action='store_true')
parser.add_argument('--skip_vis_final', action='store_true')
parser.add_argument('--skip_final', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if args.cfg is not None:
cfg_data, cfg_exp = load_cfg_from_file(args.cfg, args)
else:
cfg_data, cfg_exp = load_cfg_from_cmd(args)
if args.out is not None:
cfg_exp.args.output = args.out
out = cfg_exp.args.output
os.makedirs(out, exist_ok=True)
print(cfg_data, file=open(os.path.join(out, 'cfg_data.yml'), 'w'))
print(cfg_exp, file=open(os.path.join(out, 'cfg_exp.yml'), 'w'))
dataset = load_object(cfg_data.module, cfg_data.args)
print(dataset)
model = load_object(cfg_exp.module, cfg_exp.args)
process(dataset, model, args) | null |
13,355 | from operator import imod
import numpy as np
from tqdm import tqdm
from os.path import join
from easymocap.dataset.mv1pmf_mirror import ImageFolderMirror as ImageFolder
from easymocap.mytools import Timer
from easymocap.smplmodel import load_model, merge_params, select_nf
from easymocap.estimator import SPIN, init_with_spin
from easymocap.pipeline.mirror import multi_stage_optimize
def multi_stage_optimize(body_model, body_params, bboxes, keypoints2d, Pall, normal, args):
weight = load_weight_mirror(args.model, args.opts)
config = Config()
config.device = body_model.device
config.verbose = args.verbose
config.OPT_R = True
config.OPT_T = True
config.OPT_SHAPE = True
with Timer('Optimize 2D Pose/{} frames'.format(keypoints2d.shape[1]), not args.verbose):
if args.direct:
config.OPT_POSE = False
body_params = optimizeMirrorDirect(body_model, body_params, bboxes, keypoints2d, Pall, normal, weight, config)
config.OPT_POSE = True
body_params = optimizeMirrorDirect(body_model, body_params, bboxes, keypoints2d, Pall, normal, weight, config)
else:
config.OPT_POSE = False
body_params = optimizeMirrorSoft(body_model, body_params, bboxes, keypoints2d, Pall, normal, weight, config)
config.OPT_POSE = True
body_params = optimizeMirrorSoft(body_model, body_params, bboxes, keypoints2d, Pall, normal, weight, config)
return body_params
The provided code snippet includes necessary dependencies for implementing the `demo_1v1p1f_smpl_mirror` function. Write a Python function `def demo_1v1p1f_smpl_mirror(path, body_model, spin_model, args)` to solve the following problem:
Optimization for single image
Here is the function:
def demo_1v1p1f_smpl_mirror(path, body_model, spin_model, args):
"Optimization for single image"
# 0. construct the dataset
dataset = ImageFolder(path, out=args.out, kpts_type=args.body)
if args.gtK:
dataset.gtK = True
dataset.load_gt_cameras()
start, end = args.start, min(args.end, len(dataset))
for nf in tqdm(range(start, end, args.step), desc='Optimizing'):
image, annots = dataset[nf]
if len(annots) < 2:
continue
annots = annots[:2]
camera = dataset.camera(nf)
# initialize the SMPL parameters
body_params_all = []
bboxes, keypoints2d, pids = [], [], []
for i, annot in enumerate(annots):
assert annot['id'] == i, (i, annot['id'])
result = init_with_spin(body_model, spin_model, image,
annot['bbox'], annot['keypoints'], camera)
body_params_all.append(result['body_params'])
bboxes.append(annot['bbox'])
keypoints2d.append(annot['keypoints'])
pids.append(annot['id'])
bboxes = np.vstack(bboxes)
keypoints2d = np.stack(keypoints2d)
body_params = merge_params(body_params_all)
# bboxes: (nViews(2), 1, 5); keypoints2d: (nViews(2), 1, nJoints, 3)
bboxes = bboxes[:, None]
keypoints2d = keypoints2d[:, None]
if args.normal:
normal = dataset.normal(nf)[None, :, :]
else:
normal = None
body_params = multi_stage_optimize(body_model, body_params, bboxes, keypoints2d, Pall=camera['P'], normal=normal, args=args)
vertices = body_model(return_verts=True, return_tensor=False, **body_params)
keypoints = body_model(return_verts=False, return_tensor=False, **body_params)
write_data = [{'id': pids[i], 'keypoints3d': keypoints[i]} for i in range(len(pids))]
# write out the results
dataset.write_keypoints3d(write_data, nf)
for i in range(len(pids)):
write_data[i].update(select_nf(body_params, i))
if args.vis_smpl:
# render the results
render_data = {pids[i]: {
'vertices': vertices[i],
'faces': body_model.faces,
'vid': 0, 'name': 'human_{}'.format(pids[i])} for i in range(len(pids))}
dataset.vis_smpl(render_data, image, camera, nf)
dataset.write_smpl(write_data, nf) | Optimization for single image |
13,356 | from operator import imod
import numpy as np
from tqdm import tqdm
from os.path import join
from easymocap.dataset.mv1pmf_mirror import ImageFolderMirror as ImageFolder
from easymocap.mytools import Timer
from easymocap.smplmodel import load_model, merge_params, select_nf
from easymocap.estimator import SPIN, init_with_spin
from easymocap.pipeline.mirror import multi_stage_optimize
def multi_stage_optimize(body_model, body_params, bboxes, keypoints2d, Pall, normal, args):
def demo_1v1pmf_smpl_mirror(path, body_model, spin_model, args):
subs = args.sub
assert len(subs) > 0
# 遍历所有文件夹
for sub in subs:
dataset = ImageFolder(path, subs=[sub], out=args.out, kpts_type=args.body)
start, end = args.start, min(args.end, len(dataset))
frames = list(range(start, end, args.step))
nFrames = len(frames)
pids = [0, 1]
body_params_all = {pid:[None for nf in frames] for pid in pids}
bboxes = {pid:[None for nf in frames] for pid in pids}
keypoints2d = {pid:[None for nf in frames] for pid in pids}
for nf in tqdm(frames, desc='loading'):
image, annots = dataset[nf]
# 这个时候如果annots不够 不能够跳过了,需要进行补全
camera = dataset.camera(nf)
# 初始化每个人的SMPL参数
for i, annot in enumerate(annots):
pid = annot['id']
if pid not in pids:
continue
result = init_with_spin(body_model, spin_model, image,
annot['bbox'], annot['keypoints'], camera)
body_params_all[pid][nf-start] = result['body_params']
bboxes[pid][nf-start] = annot['bbox']
keypoints2d[pid][nf-start] = annot['keypoints']
# stack [p1f1, p1f2, p1f3, ..., p1fn, p2f1, p2f2, p2f3, ..., p2fn]
# TODO:for missing bbox
body_params = merge_params([merge_params(body_params_all[pid]) for pid in pids])
# bboxes: (nViews, nFrames, 5)
bboxes = np.stack([np.stack(bboxes[pid]) for pid in pids])
# keypoints: (nViews, nFrames, nJoints, 3)
keypoints2d = np.stack([np.stack(keypoints2d[pid]) for pid in pids])
# optimize
P = dataset.camera(start)['P']
if args.normal:
normal = dataset.normal_all(start=start, end=end)
else:
normal = None
body_params = multi_stage_optimize(body_model, body_params, bboxes, keypoints2d, Pall=P, normal=normal, args=args)
# write
vertices = body_model(return_verts=True, return_tensor=False, **body_params)
keypoints = body_model(return_verts=False, return_tensor=False, **body_params)
dataset.no_img = not args.vis_smpl
for nf in tqdm(frames, desc='rendering'):
idx = nf - start
write_data = [{'id': pids[i], 'keypoints3d': keypoints[i*nFrames+idx]} for i in range(len(pids))]
dataset.write_keypoints3d(write_data, nf)
for i in range(len(pids)):
write_data[i].update(select_nf(body_params, i*nFrames+idx))
dataset.write_smpl(write_data, nf)
# 保存结果
if args.vis_smpl:
image, annots = dataset[nf]
camera = dataset.camera(nf)
render_data = {pids[i]: {
'vertices': vertices[i*nFrames+idx],
'faces': body_model.faces,
'vid': 0, 'name': 'human_{}'.format(pids[i])} for i in range(len(pids))}
dataset.vis_smpl(render_data, image, camera, nf) | null |
13,357 | from tqdm import tqdm
from easymocap.smplmodel import check_keypoints, load_model, select_nf
from easymocap.mytools import simple_recon_person, Timer, projectN3
from easymocap.pipeline import smpl_from_keypoints3d2d
import os
from os.path import join
import numpy as np
def check_repro_error(keypoints3d, kpts_repro, keypoints2d, P, MAX_REPRO_ERROR):
square_diff = (keypoints2d[:, :, :2] - kpts_repro[:, :, :2])**2
conf = keypoints3d[None, :, -1:]
conf = (keypoints3d[None, :, -1:] > 0) * (keypoints2d[:, :, -1:] > 0)
dist = np.sqrt((((kpts_repro[..., :2] - keypoints2d[..., :2])*conf)**2).sum(axis=-1))
vv, jj = np.where(dist > MAX_REPRO_ERROR)
if vv.shape[0] > 0:
keypoints2d[vv, jj, -1] = 0.
keypoints3d, kpts_repro = simple_recon_person(keypoints2d, P)
return keypoints3d, kpts_repro
def mv1pmf_skel(dataset, check_repro=True, args=None):
MIN_CONF_THRES = args.thres2d
no_img = not (args.vis_det or args.vis_repro)
dataset.no_img = no_img
kp3ds = []
start, end = args.start, min(args.end, len(dataset))
kpts_repro = None
for nf in tqdm(range(start, end), desc='triangulation'):
images, annots = dataset[nf]
check_keypoints(annots['keypoints'], WEIGHT_DEBUFF=1, min_conf=MIN_CONF_THRES)
keypoints3d, kpts_repro = simple_recon_person(annots['keypoints'], dataset.Pall)
if check_repro:
keypoints3d, kpts_repro = check_repro_error(keypoints3d, kpts_repro, annots['keypoints'], P=dataset.Pall, MAX_REPRO_ERROR=args.MAX_REPRO_ERROR)
# keypoints3d, kpts_repro = robust_triangulate(annots['keypoints'], dataset.Pall, config=config, ret_repro=True)
kp3ds.append(keypoints3d)
if args.vis_det:
dataset.vis_detections(images, annots, nf, sub_vis=args.sub_vis)
if args.vis_repro:
dataset.vis_repro(images, kpts_repro, nf=nf, sub_vis=args.sub_vis)
# smooth the skeleton
if args.smooth3d > 0:
kp3ds = smooth_skeleton(kp3ds, args.smooth3d)
for nf in tqdm(range(len(kp3ds)), desc='dump'):
dataset.write_keypoints3d(kp3ds[nf], nf+start) | null |
13,358 | from tqdm import tqdm
from easymocap.smplmodel import check_keypoints, load_model, select_nf
from easymocap.mytools import simple_recon_person, Timer, projectN3
from easymocap.pipeline import smpl_from_keypoints3d2d
import os
from os.path import join
import numpy as np
def mv1pmf_smpl(dataset, args, weight_pose=None, weight_shape=None):
dataset.skel_path = args.skel
kp3ds = []
start, end = args.start, min(args.end, len(dataset))
keypoints2d, bboxes = [], []
dataset.no_img = True
for nf in tqdm(range(start, end), desc='loading'):
images, annots = dataset[nf]
keypoints2d.append(annots['keypoints'])
bboxes.append(annots['bbox'])
kp3ds = dataset.read_skeleton(start, end)
keypoints2d = np.stack(keypoints2d)
bboxes = np.stack(bboxes)
kp3ds = check_keypoints(kp3ds, 1)
# optimize the human shape
with Timer('Loading {}, {}'.format(args.model, args.gender), not args.verbose):
body_model = load_model(gender=args.gender, model_type=args.model)
params = smpl_from_keypoints3d2d(body_model, kp3ds, keypoints2d, bboxes,
dataset.Pall, config=dataset.config, args=args,
weight_shape=weight_shape, weight_pose=weight_pose)
# write out the results
dataset.no_img = not (args.vis_smpl or args.vis_repro)
for nf in tqdm(range(start, end), desc='render'):
images, annots = dataset[nf]
param = select_nf(params, nf-start)
dataset.write_smpl(param, nf)
if args.write_smpl_full:
param_full = param.copy()
param_full['poses'] = body_model.full_poses(param['poses'])
dataset.write_smpl(param_full, nf, mode='smpl_full')
if args.write_vertices:
vertices = body_model(return_verts=True, return_tensor=False, **param)
write_data = [{'id': 0, 'vertices': vertices[0]}]
dataset.write_vertices(write_data, nf)
if args.vis_smpl:
vertices = body_model(return_verts=True, return_tensor=False, **param)
dataset.vis_smpl(vertices=vertices[0], faces=body_model.faces, images=images, nf=nf, sub_vis=args.sub_vis, add_back=True)
if args.vis_repro:
keypoints = body_model(return_verts=False, return_tensor=False, **param)[0]
kpts_repro = projectN3(keypoints, dataset.Pall)
dataset.vis_repro(images, kpts_repro, nf=nf, sub_vis=args.sub_vis, mode='repro_smpl') | null |
13,359 | from easymocap.dataset import CONFIG
from easymocap.mytools import Timer
from easymocap.smplmodel import load_model, select_nf
from easymocap.mytools.reader import read_keypoints3d_all
from easymocap.mytools.file_utils import write_smpl
from easymocap.pipeline.weight import load_weight_pose, load_weight_shape
from easymocap.pipeline import smpl_from_keypoints3d
import os
from os.path import join
from tqdm import tqdm
def read_keypoints3d_all(path, key='keypoints3d', pids=[]):
assert os.path.exists(path), '{} not exists!'.format(path)
results = {}
filenames = sorted(glob(join(path, '*.json')))
for filename in filenames:
nf = int(os.path.basename(filename).replace('.json', ''))
datas = read_keypoints3d(filename)
for data in datas:
pid = data['id']
if len(pids) > 0 and pid not in pids:
continue
# 注意 这里没有考虑从哪开始的
if pid not in results.keys():
results[pid] = {key: [], 'frames': []}
results[pid][key].append(data[key])
results[pid]['frames'].append(nf)
if key == 'keypoints3d':
for pid, result in results.items():
result[key] = np.stack(result[key])
return results, filenames
def write_smpl(dumpname, results):
keys = ['Rh', 'Th', 'poses', 'handl', 'handr', 'expression', 'shapes']
write_common_results(dumpname, results, keys)
def load_weight_shape(model, opts):
if model in ['smpl', 'smplh', 'smplx']:
weight = {'s3d': 1., 'reg_shapes': 5e-3}
elif model == 'mano':
weight = {'s3d': 1e2, 'reg_shapes': 5e-5}
else:
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
def load_weight_pose(model, opts):
if model == 'smpl':
weight = {
'k3d': 1., 'reg_poses_zero': 1e-2, 'smooth_body': 5e0,
'smooth_poses': 1e0, 'reg_poses': 1e-3,
'k2d': 1e-4
}
elif model == 'smplh':
weight = {
'k3d': 1., 'k3d_hand': 5.,
'reg_poses_zero': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1, 'smooth_hand': 1e-3,
'reg_hand': 1e-4,
'k2d': 1e-4
}
elif model == 'smplx':
weight = {
'k3d': 1., 'k3d_hand': 5., 'k3d_face': 2.,
'reg_poses_zero': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1, 'smooth_hand': 1e-3,
'reg_hand': 1e-4, 'reg_expr': 1e-2, 'reg_head': 1e-2,
'k2d': 1e-4
}
elif model == 'mano':
weight = {
'k3d': 1e2, 'k2d': 2e-3,
'reg_poses': 1e-3, 'smooth_body': 1e2,
# 'collision': 1 # If the frame number is too large (more than 1000), then GPU oom
}
# weight = {
# 'k3d': 1., 'k2d': 1e-4,
# 'reg_poses': 1e-4, 'smooth_body': 0
# }
else:
print(model)
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
def smpl_from_skel(path, sub, out, skel3d, args):
config = CONFIG[args.body]
results3d, filenames = read_keypoints3d_all(skel3d)
pids = list(results3d.keys())
weight_shape = load_weight_shape(args.model, args.opts)
weight_pose = load_weight_pose(args.model, args.opts)
with Timer('Loading {}, {}'.format(args.model, args.gender)):
body_model = load_model(args.gender, model_type=args.model)
for pid, result in results3d.items():
body_params = smpl_from_keypoints3d(body_model, result['keypoints3d'], config, args,
weight_shape=weight_shape, weight_pose=weight_pose)
result['body_params'] = body_params
# write for each frame
for nf, skelname in enumerate(tqdm(filenames, desc='writing')):
basename = os.path.basename(skelname)
outname = join(out, basename)
res = []
for pid, result in results3d.items():
frames = result['frames']
if nf in frames:
nnf = frames.index(nf)
val = {'id': pid}
params = select_nf(result['body_params'], nnf)
val.update(params)
res.append(val)
write_smpl(outname, res) | null |
13,360 | import os
from os.path import exists
from os.path import join
from easymocap.config import Config, CfgNode
from glob import glob
from easymocap.mytools.debug_utils import run_cmd, check_exists, myerror, log, mywarn
def check_image(path):
if not check_exists(join(path, 'images')):
mywarn('Images not found in {}'.format(path))
if exists(join(path, 'videos')):
cmd = 'python3 apps/preprocess/extract_image.py {}'.format(path)
run_cmd(cmd)
def check_camera(path, mode):
if mode == 'scan':
return 0
if not os.path.exists(join(path, 'intri.yml')) or \
not os.path.exists(join(path, 'extri.yml')):
myerror('[error] No camera calibration found in {}'.format(path))
raise FileNotFoundError
def format_subs(subs):
subs = ', '.join(list(map(lambda x:"'{}'".format(x), subs)))
subs = f'''"[{subs}]"'''
return subs
def log(text):
myprint(text, 'info')
def run_cmd(cmd, verbo=True, bg=False):
if verbo: myprint('[run] ' + cmd, 'run')
if bg:
args = cmd.split()
print(args)
p = subprocess.Popen(args)
return [p]
else:
os.system(cmd)
return []
def check_exists(path):
flag1 = os.path.isfile(path) and os.path.exists(path)
flag2 = os.path.isdir(path) and len(os.listdir(path)) >= 10
return flag1 or flag2
def mocap_demo(path, mode, exp=None):
# check images
check_image(path)
# check camera
check_camera(path, mode)
# run triangulation
if mode in ['object3d']:
dir_k3d = join(path, 'output-object3d')
else:
dir_k3d = join(path, 'output-keypoints3d')
if not check_exists(join(dir_k3d, 'keypoints3d')) or args.restart_mocap:
if 'half' in mode:
cfg_data = 'config/recon/mv1p.yml'
cfg_exp = 'config/recon/mv1p-half.yml'
elif mode == 'object3d':
cfg_data = 'config/recon/mvobj.yml'
cfg_exp = 'config/recon/tri-mvobj.yml'
elif mode.startswith('smpl-3d-mp-wild'):
cfg_data = 'config/recon/mvmp.yml'
cfg_exp = 'config/recon/mvmp-wild.yml'
elif args.mp:
# In this mode, we just perform triangulation on matched 3d keypoints
cfg_data = 'config/recon/mvmp.yml'
cfg_exp = 'config/recon/mvmp-match.yml'
else:
cfg_data = 'config/recon/mv1p.yml'
cfg_exp = 'config/recon/mv1p-total.yml'
opt_data = f'args.path {path} args.out {dir_k3d}'
if args.subs is not None:
opt_data += ' args.subs {}'.format(args.subs)
if args.subs_vis is not None:
opt_data += ' args.subs_vis {}'.format(format_subs(args.subs_vis))
if args.disable_visdetec:
opt_data += ' args.writer.visdetect.enable False'
if args.vismatch:
opt_data += ' args.writer.vismatch.enable True'
if args.disable_visrepro:
opt_data += ' args.writer.visrepro.enable False'
if args.disable_crop:
opt_data += ' args.writer.vismatch.crop False args.writer.visdetect.crop False '
if args.ranges is not None:
opt_data += ' args.ranges {},{},{}'.format(*args.ranges)
# config for experiment
opt_exp = ' args.debug {}'.format('True' if args.debug else 'False')
cmd = 'python3 apps/fit/triangulate1p.py --cfg_data {cfg_data} --opt_data {opt_data} --cfg_exp {cfg_exp} --opt_exp {opt_exp}'.format(
cfg_data=cfg_data,
cfg_exp=cfg_exp,
opt_data=opt_data,
opt_exp=opt_exp
)
run_cmd(cmd)
# compose videos
cmd = f'python3 -m easymocap.visualize.ffmpeg_wrapper {dir_k3d}/match --fps 50'
run_cmd(cmd)
# TODO: check triangulation
# run reconstruction
if mode in ['object3d']:
return 0
exp = mode if exp is None else exp
if not check_exists(join(path, 'output-{}'.format(exp), 'smpl')) or args.restart:
# load config
config = config_dict[args.mode]
cfg_data = config.data
cfg_model = config.model
cfg_exp = config.exp
_config_data = Config.load(cfg_data)
cmd = f'python3 apps/fit/fit.py --cfg_model {cfg_model} --cfg_data {cfg_data} --cfg_exp {cfg_exp}'
# opt data
output = join(path, 'output-{}'.format(exp))
opt_data = ['args.path', path, 'args.out', output]
opt_data += args.opt_data
opt_data += config.get('opt_data', [])
if 'camera' in _config_data.args.keys():
opt_data.extend(['args.camera', path])
if args.ranges is not None:
opt_data.extend(['args.ranges', '{},{},{}'.format(*args.ranges)])
if args.subs is not None:
opt_data.extend(["args.subs", "{}".format(args.subs)])
if args.disable_vismesh:
opt_data += ['args.writer.render.enable', 'False']
if args.vis_scale is not None:
opt_data += ['args.writer.render.scale', '{}'.format(args.vis_scale)]
if args.vis_mode is not None:
opt_data += ['args.writer.render.mode', args.vis_mode]
if args.pids is not None and args.mp:
opt_data += ['args.pids', ','.join(map(str, args.pids))]
cmd += ' --opt_data "{}"'.format('" "'.join(opt_data))
# opt model
opt_model = config.get('opt_model', [])
if len(opt_model) > 0:
cmd += ' --opt_model "{}"'.format('" "'.join(opt_model))
# opt exp
opt_exp = ['args.monitor.printloss', "True"] + args.opt_exp
opt_exp += config.get('opt_exp', [])
if len(opt_exp) > 0:
cmd += ' --opt_exp "{}"'.format('" "'.join(opt_exp))
log(cmd.replace(output, '${output}').replace(path, '${data}'))
run_cmd(cmd)
videoname = join(path, 'output-{}'.format(exp), 'smplmesh.mp4')
if not exists(videoname) or args.restart:
cmd = 'python3 -m easymocap.visualize.ffmpeg_wrapper {data}/output-{exp}/smplmesh --fps 50'.format(
data=path, exp=exp
)
run_cmd(cmd) | null |
13,361 | import os
from os.path import exists
from os.path import join
from easymocap.config import Config, CfgNode
from glob import glob
from easymocap.mytools.debug_utils import run_cmd, check_exists, myerror, log, mywarn
def check_image(path):
if not check_exists(join(path, 'images')):
mywarn('Images not found in {}'.format(path))
if exists(join(path, 'videos')):
cmd = 'python3 apps/preprocess/extract_image.py {}'.format(path)
run_cmd(cmd)
def format_subs(subs):
subs = ', '.join(list(map(lambda x:"'{}'".format(x), subs)))
subs = f'''"[{subs}]"'''
return subs
def log(text):
myprint(text, 'info')
def run_cmd(cmd, verbo=True, bg=False):
if verbo: myprint('[run] ' + cmd, 'run')
if bg:
args = cmd.split()
print(args)
p = subprocess.Popen(args)
return [p]
else:
os.system(cmd)
return []
def mono_demo(path, mode, exp=None):
check_image(path)
# check cameras
if not os.path.exists(join(path, 'intri.yml')):
cmd = f'python3 apps/calibration/create_blank_camera.py {path}'
run_cmd(cmd)
# run reconstruction
exp = mode if exp is None else exp
if args.subs is None:
args.subs = sorted(os.listdir(join(path, 'images')))
for sub in args.subs:
outdir = join(path, 'output-{}'.format(exp), 'smplmesh')
videoname = join(outdir, sub+'.mp4')
if os.path.exists(videoname) and not args.restart:
continue
# load config
config = config_dict[mode]
cfg_data = config.data
cfg_model = config.model
cfg_exp = config.exp
cmd = f'python3 apps/fit/fit.py --cfg_model {cfg_model} --cfg_data {cfg_data} --cfg_exp {cfg_exp}'
_config_data = Config.load(cfg_data)
# opt data
output = join(path, 'output-{}'.format(exp))
opt_data = ['args.path', path, 'args.out', output, 'args.subs', format_subs([sub]).replace('"', '')]
opt_data += args.opt_data
opt_data += config.get('opt_data', [])
if 'camera' in _config_data.args.keys():
opt_data.extend(['args.camera', path])
if args.ranges is not None:
opt_data.extend(['args.ranges', '{},{},{}'.format(*args.ranges)])
if args.vis_scale is not None:
opt_data += ['args.writer.render.scale', '{}'.format(args.vis_scale)]
if args.vis_mode is not None:
opt_data += ['args.writer.render.mode', args.vis_mode]
if args.pids is not None and args.mp:
opt_data += ['args.pids', ','.join(map(str, args.pids))]
if args.render_side:
opt_data += ['args.writer.render.mode', "left"]
cmd += ' --opt_data "{}"'.format('" "'.join(opt_data))
# opt model
opt_model = config.get('opt_model', [])
if len(opt_model) > 0:
cmd += ' --opt_model "{}"'.format('" "'.join(opt_model))
# opt exp
opt_exp = [] + args.opt_exp
if args.debug:
opt_exp.extend(['args.monitor.printloss', "True", 'args.monitor.check', 'True'])
opt_exp += config.get('opt_exp', [])
if len(opt_exp) > 0:
cmd += ' --opt_exp "{}"'.format('" "'.join(opt_exp))
log(cmd.replace(output, '${output}').replace(path, '${data}'))
run_cmd(cmd)
cmd = 'python3 -m easymocap.visualize.ffmpeg_wrapper {data}/output-{exp}/smplmesh/{sub} --fps {fps}'.format(
data=path, exp=exp, sub=sub, fps=30
)
run_cmd(cmd) | null |
13,362 | import os
from os.path import exists
from os.path import join
from easymocap.config import Config, CfgNode
from glob import glob
from easymocap.mytools.debug_utils import run_cmd, check_exists, myerror, log, mywarn
def run_triangulation(cfg_data, cfg_exp, path, out, args):
def append_mocap_flags(path, output, cfg_data, cfg_model, cfg_exp, config, args):
def log(text):
def mywarn(text):
def run_cmd(cmd, verbo=True, bg=False):
def workflow(work, args):
if not os.path.exists(join(args.path, 'images')):
mywarn('Images not exists, extract it use default setting')
cmd = f'python3 apps/preprocess/extract_image.py {args.path}'
run_cmd(cmd)
workflow_dict = Config.load('config/mocap_workflow.yml')
for filename in glob(join('config', 'mocap_workflow_*.yml')):
dict_ = Config.load(filename)
workflow_dict.update(dict_)
workflow = workflow_dict[work]
for key_work in ['subs', 'pids']:
if key_work in workflow.keys():
if key_work == 'subs':
args.subs = workflow[key_work]
elif key_work == 'pids':
args.pids = workflow[key_work]
exp = work if args.exp is None else args.exp
if 'extract_keypoints' in workflow.keys() and not args.skip_detect:
if isinstance(workflow['extract_keypoints'], str):
cmd = workflow['extract_keypoints'].replace('${data}', args.path)
run_cmd(cmd)
else:
pass
if 'calibration' in workflow.keys() and workflow['calibration'] != 'none':
cmd = workflow['calibration'].replace('${data}', args.path)
run_cmd(cmd)
# check triangulation
if 'triangulation' in workflow.keys():
cfg_data = workflow.triangulation.data
cfg_exp = workflow.triangulation.exp
out = join(args.path, workflow.triangulation.out)
# check output
if not args.restart_mocap and os.path.exists(join(out, 'keypoints3d')) and len(os.listdir(join(out, 'keypoints3d'))) > 10:
log('[Skip] Triangulation already done, skipping...')
else:
run_triangulation(cfg_data, cfg_exp, args.path, out, args)
if 'fit' in workflow.keys():
if isinstance(workflow.fit, str):
workflow.fit = config_dict[workflow.fit]
cfg_data = workflow.fit.data
cfg_model = workflow.fit.model
cfg_exp = workflow.fit.exp
# check output
path = args.path
if 'output' in workflow.keys():
output = join(args.path, workflow.output)
else:
output = join(args.path, 'output-{}'.format(exp))
append_mocap_flags(path, output, cfg_data, cfg_model, cfg_exp, workflow.fit, args)
if 'postprocess' in workflow.keys():
for key, cmd in workflow.postprocess.items():
cmd = cmd.replace('${data}', args.path).replace('${exp}', args.exp)
if '${subs_vis}' in cmd:
cmd = cmd.replace('${subs_vis}', ' '.join(args.subs_vis))
if '${vis_scale}' in cmd:
cmd = cmd.replace('${vis_scale}', '{}'.format(args.vis_scale))
run_cmd(cmd) | null |
13,363 | from easymocap.dataset import CONFIG
from easymocap.dataset import CONFIG
from easymocap.affinity.affinity import ComposedAffinity
from easymocap.assignment.associate import simple_associate
from easymocap.assignment.group import PeopleGroup
from easymocap.mytools import Timer
from tqdm import tqdm
class ComposedAffinity:
def __init__(self, cameras, basenames, cfg):
affinity = {}
for key, args in cfg.aff_funcs.items():
args['cameras'] = cameras
args['cams'] = basenames
affinity[key] = load_object(key, args)
self.cameras = cameras
self.affinity = affinity
self.cfg = cfg
def __call__(self, annots, images=None):
dimGroups, maptoview = getDimGroups(annots)
out = {}
for key, model in self.affinity.items():
out[key] = model(annots, dimGroups)
aff = composeAff(out, self.cfg.vis_aff)
constrain = SimpleConstrain(dimGroups)
observe = np.ones_like(aff)
aff = constrain * aff
if self.cfg.svt_py:
aff = matchSVT(aff, dimGroups, constrain, observe, self.cfg.svt_args)
aff[aff<self.cfg.aff_min] = 0
return aff, dimGroups
def simple_associate(annots, affinity, dimGroups, Pall, group, cfg):
nViews = len(annots)
criterions = load_criterions(cfg.criterions)
n2D = dimGroups[-1]
views = views_from_dimGroups(dimGroups)
views_cnt = np.zeros((affinity.shape[0], nViews))
for nv in range(nViews):
views_cnt[:, nv] = affinity[:, dimGroups[nv]:dimGroups[nv+1]].sum(axis=1)
views_cnt = (views_cnt>0.5).sum(axis=1)
sortidx = np.argsort(-views_cnt)
p2dAssigned = np.zeros(n2D, dtype=int) - 1
indices_zero = np.zeros((nViews), dtype=int) - 1
for idx in sortidx:
if p2dAssigned[idx] != -1:
continue
proposals = [indices_zero.copy()]
for nv in range(nViews):
match = np.where(
(affinity[idx, dimGroups[nv]:dimGroups[nv+1]] > 0.)
& (p2dAssigned[dimGroups[nv]:dimGroups[nv+1]] == -1) )[0]
if len(match) > 0:
match = match + dimGroups[nv]
for proposal in proposals:
proposal[nv] = match[0]
if len(match) > 1:
proposals_new = []
for proposal in proposals:
for col in match[1:]:
p = proposal.copy()
p[nv] = col
proposals_new.append(p)
proposals += proposals_new
results = []
while len(proposals) > 0:
proposal = proposals.pop()
# less than two views
if (proposal != -1).sum() < cfg.min_views:
continue
# print('[associate] pop proposal: {}'.format(proposal))
keypoints2d, bboxes, Pused, Vused = set_keypoints2d(proposal, annots, Pall, dimGroups)
keypoints3d = batch_triangulate(keypoints2d, Pused)
kptsRepro = projectN3(keypoints3d, Pused)
err = ((kptsRepro[:, :, 2]*keypoints2d[:, :, 2]) > 0.) * np.linalg.norm(kptsRepro[:, :, :2] - keypoints2d[:, :, :2], axis=2)
size = (bboxes[:, [2, 3]] - bboxes[:, [0, 1]]).max(axis=1, keepdims=True)
err = err / size
err_view = err.sum(axis=1)/((err>0. + 1e-9).sum(axis=1))
flag = (err_view < cfg.max_repro_error).all()
err = err.sum()/(err>0 + 1e-9).sum()
# err_view = err.sum(axis=1)/((err>0.).sum(axis=1))
# err = err.sum()/(err>0.).sum()
# flag = err_view.max() < err_view.mean() * 2
flag = True
for crit in criterions:
if not crit(keypoints3d):
flag = False
break
if flag:
# print('[associate]: view {}'.format(Vused))
results.append({
'indices': proposal,
'keypoints2d': keypoints2d,
'keypoints3d': keypoints3d,
'Vused': Vused,
'error': err
})
else:
# make new proposals
outlier_view = Vused[err_view.argmax()]
proposal[outlier_view] = -1
proposals.append(proposal)
if len(results) == 0:
continue
if len(results) > 1:
# print('[associate] More than one avalible results')
results.sort(key=lambda x:x['error'])
result = results[0]
proposal = result['indices']
Vused = result['Vused']
# proposal中有-1的,所以需要使用Vused进行赋值
p2dAssigned[proposal[Vused]] = 1
group.add(result)
group.dimGroups = dimGroups
return group
class PeopleGroup(dict):
def __init__(self, Pall, cfg) -> None:
self.maxid = 0
self.pids = []
self.current = []
self.dimGroups = []
self.Pall = Pall
Person.Pall = Pall
def add(self, info):
# self.current.append(info)
pid = self.maxid
people = Person(pid)
people.add(**info)
self.maxid += 1
self[pid] = people
def clear(self):
self.pids = []
self.maxid = 0
super().clear()
def __setitem__(self, pid, people) -> None:
self.pids.append(pid)
self.maxid += 1
super().__setitem__(pid, people)
def results(self):
results = []
for pid, people in self.items():
result = {'id': pid, 'keypoints3d': people.keypoints3d}
results.append(result)
return results
def __str__(self):
res = ' PeopleDict {:6d}: {}\n'.format(Person.time, ' '.join(map(str, self.pids)))
for pid in self.pids:
res += ' {:3d}: {}\n'.format(pid, self[pid])
return res
class BaseSocketClient:
def __init__(self, host, port) -> None:
if host == 'auto':
host = socket.gethostname()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
self.s = s
def send(self, data):
val = encode_detect(data)
self.s.send(bytes('{}\n'.format(len(val)), 'ascii'))
self.s.sendall(val)
def send_smpl(self, data):
val = encode_smpl(data)
self.s.send(bytes('{}\n'.format(len(val)), 'ascii'))
self.s.sendall(val)
def close(self):
self.s.close()
def mvposev1(dataset, args, cfg):
dataset.no_img = not (args.vis_det or args.vis_match or args.vis_repro or args.ret_crop)
start, end = args.start, min(args.end, len(dataset))
affinity_model = ComposedAffinity(cameras=dataset.cameras, basenames=dataset.cams, cfg=cfg.affinity)
group = PeopleGroup(Pall=dataset.Pall, cfg=cfg.group)
if args.vis3d:
from easymocap.socket.base_client import BaseSocketClient
vis3d = BaseSocketClient(args.host, args.port)
for nf in tqdm(range(start, end), desc='reconstruction'):
group.clear()
with Timer('load data', not args.time):
images, annots = dataset[nf]
if args.vis_det:
dataset.vis_detections(images, annots, nf, sub_vis=args.sub_vis)
# 计算不同视角的检测结果的affinity
with Timer('compute affinity', not args.time):
affinity, dimGroups = affinity_model(annots, images=images)
with Timer('associate', not args.time):
group = simple_associate(annots, affinity, dimGroups, dataset.Pall, group, cfg=cfg.associate)
results = group
if args.vis_match:
dataset.vis_detections(images, annots, nf, mode='match', sub_vis=args.sub_vis)
if args.vis_repro:
dataset.vis_repro(images, results, nf, sub_vis=args.sub_vis)
dataset.write_keypoints2d(annots, nf)
dataset.write_keypoints3d(results, nf)
if args.vis3d:
vis3d.send(group.results)
Timer.report() | null |
13,364 | import os
from os.path import join
from glob import glob
extensions = ['.mp4', '.webm', '.flv', '.MP4', '.MOV', '.mov', '.avi']
def run(cmd):
def extract_images(path, ffmpeg, image):
videos = sorted(sum([
glob(join(path, 'videos', '*'+ext)) for ext in extensions
], [])
)
for videoname in videos:
sub = '.'.join(os.path.basename(videoname).split('.')[:-1])
sub = sub.replace(args.strip, '')
outpath = join(path, image, sub)
if os.path.exists(outpath) and (len(os.listdir(outpath)) > 10 or len(os.listdir(outpath)) == args.num) and not args.restart:
continue
os.makedirs(outpath, exist_ok=True)
other_cmd = ''
if args.num != -1:
other_cmd += '-vframes {}'.format(args.num)
if args.scale != 1 and args.transpose != -1:
other_cmd += ' -vf "transpose={transpose},scale=iw/{scale}:ih/{scale}"'.format(scale=args.scale,transpose=args.transpose)
elif args.scale != 1:
other_cmd += ' -vf "scale=iw/{scale}:ih/{scale}'.format(scale=args.scale)
elif args.transpose != -1:
other_cmd += ' -vf transpose={}'.format(args.transpose)
cmd = '{} -i {} {} -q:v 1 -start_number 0 {}/%06d.jpg'.format(
ffmpeg, videoname, other_cmd, outpath)
if not args.debug:
cmd += ' -loglevel quiet'
run(cmd) | null |
13,365 | import shutil
from easymocap.mytools.debug_utils import log, mkdir, mywarn, run_cmd
import os
from os.path import join
from tqdm import tqdm
def log(text):
myprint(text, 'info')
def mkdir(path):
if os.path.exists(path):
return 0
log('mkdir {}'.format(path))
os.makedirs(path, exist_ok=True)
def merge_directories(root, out):
mkdir(join(out, 'images', 'merge'))
sequence = sorted(os.listdir(root))
log('>>> Totally {} sub-folders'.format(len(sequence)))
records = []
for seq in tqdm(sequence, 'check sequence'):
subs = sorted(os.listdir(join(root, seq, 'images')))
for sub in subs:
imgnames = sorted(os.listdir(join(root, seq, 'images', sub)))
for imgname in imgnames:
records.append((seq, sub, imgname))
log('>>> Totally {} records'.format(len(records)))
for (seq, sub, imgname) in tqdm(records):
srcname = join(root, seq, 'images', sub, imgname)
dstname = join(out, 'images', 'merge', '{}+{}+{}'.format(seq, sub, imgname))
if not os.path.exists(dstname):
cmd = 'ln -s {} {}'.format(srcname, dstname)
# run_cmd(cmd)
shutil.copyfile(srcname, dstname)
with open(join(out, 'log.txt'), 'w') as f:
for (seq, sub, imgname) in tqdm(records, 'writing'):
f.write('{},{},{}\r\n'.format(seq, sub, imgname)) | null |
13,366 | import shutil
from easymocap.mytools.debug_utils import log, mkdir, mywarn, run_cmd
import os
from os.path import join
from tqdm import tqdm
def split_directories(root, out):
with open(join(out, 'log.txt'), 'r') as f:
records = f.readlines()
for record in tqdm(records):
seq, sub, imgname = record.strip().split(',')
imgname = imgname.replace('.jpg', '.json')
srcname = join(out, 'annots', 'merge', '{}+{}+{}'.format(seq, sub, imgname))
dstname = join(root, seq, 'annots', sub, imgname)
if not os.path.exists(os.path.dirname(dstname)):
os.makedirs(os.path.dirname(dstname))
shutil.copyfile(srcname, dstname) | null |
13,367 | from easymocap.annotator.file_utils import read_json, save_json
from easymocap.config import load_object_from_cmd
import numpy as np
from easymocap.mytools.debug_utils import log, myerror, mywarn, run_cmd
from tqdm import tqdm
import os
from os.path import join
class Tracker:
def __init__(self, missing_frame=10, thres_iou=0.5) -> None:
def step(self):
def init(self, data):
def update(self, data, pid):
def calculate_distance(self, data, infos):
def track(self, data):
def add(self, data):
def report(self):
def read_json(path):
def save_annot(file, data):
def track2d(datas):
# sort the first frame by size
annots0 = datas['annots'][0]
len_first_frame = len(annots0)
tracker = Tracker(thres_iou=args.thres_iou)
annots0.sort(key=lambda x:-(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))
for i, annot in enumerate(annots0):
annot['personID'] = i
for nf, annots in enumerate(datas['annots']):
if nf == 0:
# new the tracker
for annot in annots:
flag, pid = tracker.init(annot)
continue
# track all the frames
tracker.step()
annots0.sort(key=lambda x:-(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))
for annot in annots:
# greedy match
flag, pid = tracker.add(annot)
if flag:
annot['personID'] = pid
from easymocap.annotator.file_utils import save_annot
nFrames = len(data['annname'])
for nf in tqdm(range(nFrames), desc='writing track'):
annname = data['annname'][nf]
annots = data['annots'][nf]
annots.sort(key=lambda x:x['personID'])
annots_origin = read_json(annname)
annots_origin['annots'] = annots
save_annot(annname, annots_origin)
tracker.report() | null |
13,368 | import os
from os.path import join
from tqdm import tqdm
import numpy as np
def load_subs(path, subs):
if len(subs) == 0:
subs = sorted(os.listdir(join(path, 'images')))
subs = [sub for sub in subs if os.path.isdir(join(path, 'images', sub))]
if len(subs) == 0:
subs = ['']
return subs | null |
13,369 | import os
from os.path import join
from easymocap.mytools.debug_utils import myerror
import torch
from easymocap.config import load_object, Config
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning import seed_everything
import resource
class plwrapper(pl.LightningModule):
def __init__(self, cfg, mode='train'):
super().__init__()
# load model
self.cfg = cfg
self.network = load_object(cfg.network_module, cfg.network_args)
trainer_args = dict(cfg.trainer_args)
trainer_args['net'] = self.network
self.train_renderer = load_object(cfg.trainer_module, trainer_args)
if mode == 'train' or mode == 'trainvis':
self.train_dataset = load_object(cfg.data_train_module, cfg.data_train_args)
# self.val_dataset = load_object(cfg.data_val_module, cfg.data_val_args)
else:
if mode + '_renderer_module' in cfg.keys():
module, args = cfg[mode+'_renderer_module'], cfg[mode+'_renderer_args']
else:
module, args = cfg.renderer_module, cfg.renderer_args
self.test_renderer = load_object(module, args, net=self.network)
if mode + '_visualizer_module' in cfg.keys():
module, args = cfg[mode+'_visualizer_module'], cfg[mode+'_visualizer_args']
else:
module, args = cfg.visualizer_module, cfg.visualizer_args
self.visualizer = load_object(module, args)
def forward(self, batch):
# in lightning, forward defines the prediction/inference actions
self.network.train()
batch['step'] = self.trainer.global_step
batch['meta']['step'] = self.trainer.global_step
output = self.test_renderer(batch)
self.visualizer(output, batch)
return 0
def training_step(self, batch, batch_idx):
batch['step'] = self.trainer.global_step
batch['meta']['step'] = self.trainer.global_step
# training_step defines the train loop. It is independent of forward
output, loss, loss_stats, image_stats = self.train_renderer(batch)
for key, val in loss_stats.items():
self.log(key, val)
return loss
def train_dataloader(self):
from easymocap.neuralbody.trainer.dataloader import make_data_sampler, make_batch_data_sampler, make_collator, worker_init_fn
shuffle = True
is_distributed = len(cfg.gpus) > 1
is_train = True
sampler = make_data_sampler(self.cfg, self.train_dataset, shuffle, is_distributed, is_train)
batch_size = self.cfg.train.batch_size
drop_last = False
max_iter = cfg.train.ep_iter
self.batch_sampler = make_batch_data_sampler(cfg, sampler, batch_size,
drop_last, max_iter, is_train)
num_workers = cfg.train.num_workers
collator = make_collator(cfg, is_train)
data_loader = torch.utils.data.DataLoader(self.train_dataset,
batch_sampler=self.batch_sampler,
num_workers=num_workers,
collate_fn=collator,
worker_init_fn=worker_init_fn)
return data_loader
def configure_optimizers(self):
from easymocap.neuralbody.trainer.optimizer import Optimizer
from easymocap.neuralbody.trainer.lr_sheduler import Scheduler, set_lr_scheduler
optimizer = Optimizer(self.network, cfg.optimizer)
scheduler = Scheduler(cfg.scheduler, optimizer)
return [optimizer], [scheduler]
def on_train_epoch_end(self):
if len(cfg.gpus) > 1:
self.batch_sampler.sampler.set_epoch(self.current_epoch)
def train(cfg):
model = plwrapper(cfg)
if cfg.resume and os.path.exists(join(cfg.trained_model_dir, 'last.ckpt')):
resume_from_checkpoint = join(cfg.trained_model_dir, 'last.ckpt')
else:
resume_from_checkpoint = None
if os.path.exists(cfg.recorder_args.log_dir):
# os.removedirs(cfg.recorder_args.log_dir)
pass
os.makedirs(cfg.recorder_args.log_dir, exist_ok=True)
print(cfg, file=open(join(cfg.recorder_args.log_dir, 'exp.yml'), 'w'))
logger = TensorBoardLogger(save_dir=cfg.recorder_args.log_dir, name=cfg.exp)
ckpt_callback = pl.callbacks.ModelCheckpoint(
verbose=True,
dirpath=cfg.trained_model_dir,
every_n_epochs=5 if not args.debug else 1,
save_last=True,
save_top_k=-1,
monitor='loss',
filename="{epoch}")
# Log true learning rate, serves as LR-Scheduler callback
lr_monitor = pl.callbacks.LearningRateMonitor(logging_interval='step')
extra_args = {
# 'num_nodes': len(cfg.gpus),
'accelerator': 'gpu',
}
if len(cfg.gpus) > 0:
extra_args['strategy'] = 'ddp'
extra_args['replace_sampler_ddp'] = False
trainer = pl.Trainer(
gpus=len(cfg.gpus),
logger=logger,
resume_from_checkpoint=resume_from_checkpoint,
callbacks=[ckpt_callback, lr_monitor],
max_epochs=cfg.train.epoch,
# profiler='simple',
**extra_args
)
trainer.fit(model) | null |
13,370 | import os
from os.path import join
from easymocap.mytools.debug_utils import myerror
import torch
from easymocap.config import load_object, Config
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning import seed_everything
import resource
def load_ckpt(model, ckpt_path, model_name='network'):
print('Load from {}'.format(ckpt_path))
checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))
epoch = checkpoint['epoch']
if 'state_dict' in checkpoint.keys():
checkpoint = checkpoint['state_dict']
checkpoint_ = {}
for k, v in checkpoint.items():
if not k.startswith(model_name):
continue
k = k[len(model_name)+1:]
for prefix in []:
if k.startswith(prefix):
break
else:
checkpoint_[k] = v
model.load_state_dict(checkpoint_, strict=False)
return epoch | null |
13,371 | import os
from os.path import join
from easymocap.mytools.debug_utils import myerror
import torch
from easymocap.config import load_object, Config
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning import seed_everything
import resource
def parse(args, cfg):
from os.path import join
cfg.recorder_args.local_rank = cfg.local_rank
if not args.slurm:
os.environ['CUDA_VISIBLE_DEVICES'] = ', '.join([str(gpu) for gpu in cfg.gpus])
assert cfg.exp != "", "Please set the experiement name"
cfg.trained_model_dir = join('neuralbody', cfg.exp, 'model')
os.makedirs(cfg.trained_model_dir, exist_ok=True)
cfg.recorder_args.log_dir = join('neuralbody', cfg.exp, 'record')
os.makedirs(cfg.recorder_args.log_dir, exist_ok=True)
exp = 'vis'
if 'keyframe' in cfg.data_val_args:
exp += '_{}'.format(cfg.data_val_args.keyframe)
if 'pid' in cfg.data_val_args:
exp += '_{}'.format(cfg.data_val_args.pid)
if 'pids' in cfg.data_val_args:
exp += '_{}'.format('+'.join(list(map(str, cfg.data_val_args.pids))))
if cfg.split == 'eval':
if 'camnf' not in cfg.visualizer_args.format:
cfg.visualizer_args.format = 'camnf'
cfg.visualizer_args.concat = 'none'
cfg.visualizer_args['keys'] = list(cfg.visualizer_args['keys']) + ['rgb', 'instance_map'] + ['raw_depth']
assert len(cfg.data_val_args.subs) > 0, cfg.data_val_args.subs
cfg.visualizer_args['subs'] = cfg.data_val_args.subs | null |
13,372 | from os.path import join
from easymocap.mytools.debug_utils import log, run_cmd
from easymocap.config.baseconfig import Config, CN
import os
from glob import glob
from copy import deepcopy
def reload_config(config, data, outdir):
def log(text):
def run_cmd(cmd, verbo=True, bg=False):
def neuralbody_train(data, config, mode, exp=None):
# run reconstruction
exp = mode if exp is None else exp
outdir = join(args.out, exp)
cfg_name = join(outdir, 'config.yml')
cmd = f'python3 apps/neuralbody/train_pl.py --cfg {cfg_name} gpus {args.gpus} distributed True exp {exp}'
if args.recfg or (not args.test and not args.demo and not args.eval):
reload_config(config, data, outdir)
if args.eval or args.demo or args.test or args.trainvis or args.canonical or args.poses is not None:
if args.test:
cmd += ' split test'
elif args.eval:
cmd += ' split eval'
elif args.trainvis:
cmd += ' split trainvis'
elif args.canonical is not None:
cmd += ' split canonical data_canonical_args.root {}'.format(args.canonical)
elif args.poses is not None:
cmd += ' split novelposes data_novelposes_args.root {}'.format(args.poses)
elif args.demo:
cmd += ' split demo'
print(cmd)
run_cmd(cmd)
# generate videos
split = cmd.split()[cmd.split().index('split')+1]
find_epoch = lambda x:os.path.basename(x).replace(split+'_', '')
demolists = [i for i in glob(join(outdir, split+'*')) if os.path.isdir(i) and find_epoch(i).isdigit()]
demolists = sorted(demolists, key=lambda x:int(find_epoch(x)))
if len(demolists) == 0:
log('No demo results found')
else:
newest = demolists[-1]
for key in ['rgb_map', 'acc_map', 'feat_map']:
cmd = f'ffmpeg -y -i {newest}/{key}_%06d.jpg -vcodec libx264 -pix_fmt yuv420p {newest}_{key}.mp4 -loglevel quiet'
run_cmd(cmd)
return 0
if args.debug:
cmd += ' --debug'
print(cmd)
run_cmd(cmd) | null |
13,373 | import myerror, mywarn, run_cmd
from easymocap.mytools.vis_base import plot_line
from easymocap.annotator.basic_annotator import AnnotBase, parse_parser
from easymocap.annotator import ImageFolder
from easymocap.annotator import plot_text
from easymocap.annotator.basic_visualize import capture_screen, resize_to_screen
from easymocap.mytools import read_json, save_json
from easymocap.annotator.basic_keyboard import get_any_move
from os.path import join
import os
import numpy as np
import cv2
class Clips:
def __init__(self, path) -> None:
self.temp = join(path, 'clips.json')
if os.path.exists(self.temp):
self.annots = read_json(self.temp)
else:
self.annots = {}
self.start_ = None
self.end_ = None
self.clips = []
self.sub_ = None
def sub(self):
return self.sub_
def sub(self, value):
self.sub_ = value
if value in self.annots.keys():
self.clips = self.annots[value]
else:
self.annots[value] = []
self.clips = self.annots[value]
self.print(0)
def start(self, annotator, **kwargs):
self.start_ = annotator.frame
print('>>> Start clip from frame {:6d}'.format(annotator.frame))
def end(self, annotator, **kwargs):
self.end_ = annotator.frame
print('>>> End clip from frame {:6d}'.format(annotator.frame))
def add(self, annotator, **kwargs):
if self.start_ is None:
print('[clip] Please check the start!')
return 0
if self.end_ is None:
print('[clip] Please check the end!')
return 0
print('[{}, {})'.format(self.start_, self.end_))
self.clips.append([self.start_, self.end_])
self.start_ = None
self.end_ = None
def delete(self, annotator, **kwargs):
frame = annotator.frame
ind = -1
for i, (start, end) in enumerate(self.clips):
if frame > start and frame < end:
ind = i
break
else:
print('[clip] current not in any clip')
return 0
self.clips.pop(ind)
def print(self, annotator, **kwargs):
print('{}: '.format(self.sub))
for (start, end) in self.clips:
print(' - [{}, {})'.format(start, end))
def save(self):
save_json(self.temp, self.annots)
def vis_clips(self, img, frame, nFrames, **kwargs):
COL_CLIP = (0, 0, 255)
COL_NEW = (0, 0, 255)
width = img.shape[1]
pos = lambda x: int(width*(x+1)/nFrames)
lw = 12
# 可视化标注的clips
for (start, end) in self.clips:
plot_line(img, (pos(start), lw/2), (pos(end), lw/2), lw, COL_CLIP)
# 可视化当前的标注
if self.start_ is not None:
top = pos(self.start_)
pts = np.array([[top, lw], [top-lw, lw*4], [top, lw*4]])
cv2.fillPoly(img, [pts], COL_NEW)
if self.end_ is not None:
top = pos(self.end_)
pts = np.array([[top, lw], [top, lw*4], [top+lw, lw*4]])
cv2.fillPoly(img, [pts], COL_NEW)
return img
def mywarn(text):
myprint(text, 'warn')
class AnnotBase:
def __init__(self, dataset, key_funcs={}, callbacks=[], vis_funcs=[],
name = 'main', body='body25',
start=0, end=100000, step=10, no_window=False) -> None:
self.name = name
self.dataset = dataset
self.nFrames = len(dataset)
self.step = step
self.register_keys = register_keys.copy()
self.register_keys.update(key_funcs)
self.no_img = False
if resize_to_screen not in vis_funcs:
vis_funcs += [resize_to_screen]
self.vis_funcs = vis_funcs
self.start = start
self.end = end
self.isOpen = True
self._frame = self.start
self.visited_frames = set([self._frame])
bbox_name, kpts_name = restore_key[body]
self.param = {
'frame': 0, 'nFrames': self.nFrames,
'kpts_name': kpts_name, 'bbox_name': bbox_name,
'select': {bbox_name: -1, 'corner': -1},
'click': None,
'name': name,
'body': body,
'capture_screen':False}
self.set_frame(self.start)
self.no_window = no_window
if not no_window:
cv2.namedWindow(self.name)
callback = ComposedCallback(processes=callbacks)
cv2.setMouseCallback(self.name, callback.call, self.param)
def working(self):
param = self.param
flag = False
if param['click'] is not None or param['start'] is not None:
flag = True
for key in self.param['select']:
if self.param['select'][key] != -1:
flag = True
return flag
def clear_working(param):
param['click'] = None
param['start'] = None
param['end'] = None
for key in param['select']:
param['select'][key] = -1
def save_and_quit(self, key=None):
self.frame = self.frame
self.isOpen = False
cv2.destroyWindow(self.name)
# get the input
if key is None:
key = get_valid_yn()
if key == 'n':
return 0
for frame in tqdm(self.visited_frames, desc='writing'):
self.dataset.isTmp = True
_, annname = self.dataset[frame]
self.dataset.isTmp = False
_, annname_ = self.dataset[frame]
if annname is not None:
shutil.copyfile(annname, annname_)
def frame(self):
return self._frame
def previous(self):
if self.frame == 0:
print('Reach to the first frame')
return None
imgname, annname = self.dataset[self.frame-1]
annots = load_annot_to_tmp(annname)
return annots
def set_param(param, imgname, annname, nf, no_img=False):
annots = load_annot_to_tmp(annname)
# 清空键盘
for key in ['click', 'start', 'end']:
param[key] = None
# 清空选中
for key in param['select']:
param['select'][key] = -1
param['imgname'] = imgname
param['annname'] = annname
param['frame'] = nf
param['annots'] = annots
if not no_img:
assert os.path.exists(imgname), imgname
img0 = cv2.imread(imgname)
param['img0'] = img0
# param['pid'] = len(annot['annots'])
param['scale'] = min(CV_KEY.WINDOW_HEIGHT/img0.shape[0], CV_KEY.WINDOW_WIDTH/img0.shape[1])
# param['scale'] = 1
def set_frame(self, nf):
param = self.param
if 'annots' in param.keys():
save_annot(param['annname'], param['annots'])
self.clear_working(param)
imgname, annname = self.dataset[nf]
self.set_param(param, imgname, annname, nf, no_img=self.no_img)
def frame(self, value):
self.visited_frames.add(value)
self._frame = value
# save current frames
save_annot(self.param['annname'], self.param['annots'])
self.set_frame(value)
def run(self, key=None, noshow=False):
if key is None:
key = chr(get_key())
if key in self.register_keys.keys():
self.register_keys[key](self, param=self.param)
if not self.isOpen:
return 0
if noshow:
return 0
img = self.param['img0'].copy()
for func in self.vis_funcs:
img = func(img, **self.param)
if not self.no_window:
cv2.imshow(self.name, img)
return img
def resize_to_screen(img, scale=1, **kwargs):
img = cv2.resize(img, None, fx=scale, fy=scale)
return img
def capture_screen(img, capture_screen=False, **kwargs):
if capture_screen:
from datetime import datetime
time_now = datetime.now().strftime("%m-%d-%H:%M:%S")
outname = join('capture', time_now+'.jpg')
os.makedirs('capture', exist_ok=True)
cv2.imwrite(outname, img)
print('Capture current screen to {}'.format(outname))
return img
def get_any_move(df):
get_frame = lambda x, f: f + df
clip_frame = lambda x, f: max(0, min(x.nFrames-1, f))
def move(annotator, **kwargs):
newframe = get_frame(annotator, annotator.frame)
newframe = clip_frame(annotator, newframe)
annotator.frame = newframe
move.__doc__ = '{} frames'.format(df)
return move
def annot_example(path, sub, skip=False):
# define datasets
# define visualize
if not os.path.exists(join(path, 'images', sub)):
mywarn('[annot] No such sub: {}'.format(sub))
return 0
clip = Clips(path)
vis_funcs = [resize_to_screen, plot_text, clip.vis_clips, capture_screen]
clip.sub = sub
if skip and len(clip.clips) > 0:
return 0
key_funcs = {
'j': clip.start,
'k': clip.end,
'l': clip.add,
'x': clip.delete,
'v': clip.print,
'w': get_any_move(-10),
's': get_any_move(10),
'f': get_any_move(100),
'g': get_any_move(-100)
}
dataset = ImageFolder(path, sub=sub, no_annot=True)
print('[Info] Totally {} frames'.format(len(dataset)))
# construct annotations
annotator = AnnotBase(
dataset=dataset,
key_funcs=key_funcs,
vis_funcs=vis_funcs)
while annotator.isOpen:
annotator.run()
clip.save() | null |
13,374 | import myerror, mywarn, run_cmd
from easymocap.mytools.vis_base import plot_line
from easymocap.annotator.basic_annotator import AnnotBase, parse_parser
from easymocap.annotator import ImageFolder
from easymocap.annotator import plot_text
from easymocap.annotator.basic_visualize import capture_screen, resize_to_screen
from easymocap.mytools import read_json, save_json
from easymocap.annotator.basic_keyboard import get_any_move
from os.path import join
import os
import numpy as np
import cv2
def log(text):
myprint(text, 'info')
def mywarn(text):
myprint(text, 'warn')
def myerror(text):
myprint(text, 'error')
def mkdir(path):
if os.path.exists(path):
return 0
log('mkdir {}'.format(path))
os.makedirs(path, exist_ok=True)
def copy_clips(path, out):
from tqdm import tqdm
import shutil
from easymocap.mytools.debug_utils import log, mywarn, mkdir
temp = join(path, 'clips.json')
assert os.path.exists(temp), temp
annots = read_json(temp)
for key, clips in tqdm(annots.items()):
for start, end in clips:
outname = '{}+{:06d}+{:06d}'.format(key, start, end)
outdir = join(out, 'images', outname)
if os.path.exists(outdir) and len(os.listdir(outdir)) == end - start:
mywarn('[copy] Skip {}'.format(outname))
continue
# check the input image
srcname0 = join(path, 'images', key, '{:06d}.jpg'.format(start))
srcname1 = join(path, 'images', key, '{:06d}.jpg'.format(end))
if not os.path.exists(srcname0) or not os.path.exists(srcname1):
myerror('[copy] No such file: {}, {}'.format(srcname0, srcname1))
log('[copy] {}'.format(outname))
mkdir(outdir)
# copy the images
for nnf, nf in enumerate(tqdm(range(start, end), desc='copy {}'.format(outname))):
srcname = join(path, 'images', key, '{:06d}.jpg'.format(nf))
dstname = join(outdir, '{:06d}.jpg'.format(nnf))
shutil.copyfile(srcname, dstname) | null |
13,375 | import myerror, mywarn, run_cmd
from easymocap.mytools.vis_base import plot_line
from easymocap.annotator.basic_annotator import AnnotBase, parse_parser
from easymocap.annotator import ImageFolder
from easymocap.annotator import plot_text
from easymocap.annotator.basic_visualize import capture_screen, resize_to_screen
from easymocap.mytools import read_json, save_json
from easymocap.annotator.basic_keyboard import get_any_move
from os.path import join
import os
import numpy as np
import cv2
def run_cmd(cmd, verbo=True, bg=False):
def copy_mv_clips(path, out):
temp = join(path, 'clips.json')
assert os.path.exists(temp), temp
annots = read_json(temp)
clips = list(annots.values())[0]
for start, end in clips:
if out is None:
outdir = path + '+{:06d}+{:06d}'.format(start, end)
else:
outdir = out + '+{:06d}+{:06d}'.format(start, end)
print(outdir)
cmd = f'python3 scripts/preprocess/copy_dataset.py {path} {outdir} --start {start} --end {end}'
if len(args.sub) > 0:
cmd += ' --subs {}'.format(' '.join(args.sub))
if args.strip is not None:
cmd += ' --strip {}'.format(args.strip)
run_cmd(cmd) | null |
13,376 | from easymocap.annotator.basic_visualize import capture_screen, plot_skeleton_factory, resize_to_screen
import os
from os.path import join
import numpy as np
from easymocap.annotator import ImageFolder
from easymocap.annotator import AnnotBase
from easymocap.annotator import callback_select_bbox_corner, callback_select_bbox_center
from easymocap.annotator import plot_text, plot_bbox_body, plot_bbox_factory, vis_active_bbox, vis_bbox, plot_skeleton
from easymocap.annotator.keypoints_callback import callback_select_joints
from easymocap.annotator.keypoints_keyboard import set_unvisible, set_unvisible_according_previous, set_face_unvisible, check_track, mirror_keypoints2d, mirror_keypoints2d_leg
class Estimator:
def __init__(self) -> None:
import torch
device = torch.device('cuda')
from easymocap.estimator.HRNet import SimpleHRNet
config = {
'nof_joints': 17,
'c': 48,
'checkpoint_path': 'data/models/pose_hrnet_w48_384x288.pth'
}
self.pose_estimator = SimpleHRNet(device=device, **config)
def _detect_with_bbox(self, param, rot):
select = param['select']['bbox']
if select == -1:
return 0
img = param['img0'].copy()
annots = param['annots']['annots'][select]
bboxes = [annots['bbox']]
res = self.pose_estimator(img, bboxes, rot=rot)[0]
# annots['keypoints'][:19] = res[:19].tolist()
annots['keypoints'] = res.tolist()
return res
def _detect_with_previous(self, annotator, param, sigma):
select = param['select']['bbox']
if select == -1:
return 0
annots = param['annots']['annots'][select]
pid = annots['personID']
previous = annotator.previous()
found = [d for d in previous['annots'] if d['personID'] == pid]
if len(found) == 0:
print('[Info] Not found {} in previous frame'.format(pid))
return 0
keypoints = np.array(found[0]['keypoints'])[None]
bboxes = [annots['bbox']]
img = param['img0'].copy()
res = self.pose_estimator.predict_with_previous(img, bboxes, keypoints, sigma)[0]
# annots['keypoints'][:19] = res[:19].tolist()
annots['keypoints'] = res.tolist()
return res
def detect_with_previous_slow(self, annotator, param):
"detect_with_previous_slow"
self._detect_with_previous(annotator, param, sigma=1)
def detect_with_previous_mid(self, annotator, param):
"detect_with_previous_mid"
self._detect_with_previous(annotator, param, sigma=3)
def detect_with_previous_fast(self, annotator, param):
self._detect_with_previous(annotator, param, sigma=5)
def detect_with_bbox(self, annotator, param):
"detect"
self._detect_with_bbox(param, 0)
def detect_with_bbox90(self, annotator, param):
"detect 90"
self._detect_with_bbox(param, 90)
def detect_with_bbox180(self, annotator, param):
"detect 90"
self._detect_with_bbox(param, 180)
def detect_with_bbox270(self, annotator, param):
"detect 90"
self._detect_with_bbox(param, -90)
def resize_to_screen(img, scale=1, **kwargs):
img = cv2.resize(img, None, fx=scale, fy=scale)
return img
def capture_screen(img, capture_screen=False, **kwargs):
if capture_screen:
from datetime import datetime
time_now = datetime.now().strftime("%m-%d-%H:%M:%S")
outname = join('capture', time_now+'.jpg')
os.makedirs('capture', exist_ok=True)
cv2.imwrite(outname, img)
print('Capture current screen to {}'.format(outname))
return img
def plot_skeleton_factory(body):
restore_key = {
'body25': ('bbox', 'keypoints'),
'handl': ('bbox_handl2d', 'handl2d'),
'handr': ('bbox_handr2d', 'handr2d'),
'face': ('bbox_face2d', 'face2d'),
}
bbox_name, kpts_name = restore_key[body]
def ret_foo(img, annots, **kwargs):
return plot_skeleton(img, annots, body, bbox_name, kpts_name)
return ret_foo
def callback_select_joints(start, end, annots, select, bbox_name='bbox', kpts_name='keypoints', **kwargs):
if start is None or end is None:
select['joints'] = -1
return 0
if start[0] == end[0] and start[1] == end[1]:
select['joints'] = -1
return 0
if select['corner'] != -1:
return 0
# 判断选择了哪个角点
annots = annots['annots']
# not select a bbox
if select[bbox_name] == -1 and select['joints'] == -1:
corners = []
for annot in annots:
corners.append(np.array(annot[kpts_name]))
corners = np.stack(corners)
flag, minid = findNearestPoint(corners[..., :2], start)
if flag:
select[bbox_name] = minid[0]
select['joints'] = minid[1]
else:
select['joints'] = -1
# have selected a bbox, not select a corner
elif select[bbox_name] != -1 and select['joints'] == -1:
i = select[bbox_name]
corners = np.array(annots[i][kpts_name])[:, :2]
flag, minid = findNearestPoint(corners, start)
if flag:
select['joints'] = minid[0]
# have selected a bbox, and select a corner
elif select[bbox_name] != -1 and select['joints'] != -1:
x, y = end
# Move the corner
data = annots[select[bbox_name]]
nj = select['joints']
data[kpts_name][nj][0] = x
data[kpts_name][nj][1] = y
if kpts_name == 'keypoints': # for body
if nj in [1, 8]:
return 0
if nj in [2, 5]:
data[kpts_name][1][0] = (data[kpts_name][2][0] + data[kpts_name][5][0])/2
data[kpts_name][1][1] = (data[kpts_name][2][1] + data[kpts_name][5][1])/2
if nj in [9, 12]:
data[kpts_name][8][0] = (data[kpts_name][9][0] + data[kpts_name][12][0])/2
data[kpts_name][8][1] = (data[kpts_name][9][1] + data[kpts_name][12][1])/2
elif select[bbox_name] == -1 and select['joints'] != -1:
select['joints'] = -1
def set_unvisible(self, param, **kwargs):
"set the selected joints unvisible"
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
select = param['select']
if select[bbox_name] == -1:
return 0
if select['joints'] == -1:
return 0
param['annots']['annots'][select[bbox_name]][kpts_name][select['joints']][-1] = 0.
def set_unvisible_according_previous(self, param, **kwargs):
"set the selected joints unvisible if previous unvisible"
previous = self.previous()
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
pid = param['annots']['annots'][select[bbox_name]]['personID']
kpts_now = param['annots']['annots'][select[bbox_name]][kpts_name]
for annots in previous['annots']:
if annots['personID'] == pid:
kpts_old = annots[kpts_name]
for nj in range(len(kpts_old)):
kpts_now[nj][2] = min(kpts_old[nj][2], kpts_now[nj][2])
def mirror_keypoints2d(self, param, **kwargs):
"mirror the keypoints2d"
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
kpts = param['annots']['annots'][select[bbox_name]][kpts_name]
for pairs in [[(2, 5), (3, 6), (4, 7)], [(15, 16), (17, 18)], [(9, 12), (10, 13), (11, 14), (21, 24), (19, 22), (20, 23)]]:
for i, j in pairs:
kpts[i], kpts[j] = kpts[j], kpts[i]
def mirror_keypoints2d_leg(self, param, **kwargs):
"mirror the keypoints2d of legs and feet"
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
kpts = param['annots']['annots'][select[bbox_name]][kpts_name]
for pairs in [[(9, 12), (10, 13), (11, 14), (21, 24), (19, 22), (20, 23)]]:
for i, j in pairs:
kpts[i], kpts[j] = kpts[j], kpts[i]
def check_track(self, param):
"check the tracking keypoints"
if self.frame == 0:
return 0
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
annots_pre = self.previous()['annots']
annots = param['annots']['annots']
if len(annots) == 0 or len(annots_pre) == 0 or len(annots) != len(annots_pre):
param['stop'] = True
return 0
for data in annots:
for data_pre in annots_pre:
if data_pre['personID'] != data['personID']:
continue
l, t, r, b, c = data_pre[bbox_name][:5]
bbox_size = max(r-l, b-t)
keypoints_now = np.array(data[kpts_name])
keypoints_pre = np.array(data_pre[kpts_name])
conf = np.sqrt(keypoints_now[:, -1] * keypoints_pre[:, -1])
diff = np.linalg.norm(keypoints_now[:, :2] - keypoints_pre[:, :2], axis=-1)
dist = np.sum(diff * conf, axis=-1)/np.sum(conf, axis=-1)/bbox_size
print('{}: {:.2f}'.format(data['personID'], dist))
if dist > 0.05:
param['stop'] = True
def annot_example(path, sub, image, annot, step, args):
# define datasets
dataset = ImageFolder(path, sub=sub, image=image, annot=annot)
key_funcs = {
'v': set_unvisible,
'V': set_unvisible_according_previous,
# 'f': set_face_unvisible,
'c': check_track,
'm': mirror_keypoints2d,
'M': mirror_keypoints2d_leg,
}
if args.hrnet:
estimator = Estimator()
key_funcs['e'] = estimator.detect_with_bbox
key_funcs['r'] = estimator.detect_with_bbox90
key_funcs['t'] = estimator.detect_with_bbox180
key_funcs['y'] = estimator.detect_with_bbox270
# key_funcs['g'] = estimator.detect_with_previous_slow
key_funcs['j'] = estimator.detect_with_previous_mid
# callback of bounding box
callbacks = [callback_select_bbox_corner, callback_select_bbox_center, callback_select_joints]
# callback of keypoints
# define visualize
vis_funcs = [plot_skeleton_factory('body25'), vis_bbox, vis_active_bbox]
if args.hand:
vis_funcs += [plot_bbox_factory('bbox_handl2d'), plot_bbox_factory('bbox_handr2d'), plot_bbox_factory('bbox_face2d')]
vis_funcs += [plot_skeleton_factory('handl'), plot_skeleton_factory('handr'), plot_skeleton_factory('face')]
vis_funcs += [resize_to_screen, plot_text, capture_screen]
# construct annotations
annotator = AnnotBase(
dataset=dataset,
key_funcs=key_funcs,
vis_funcs=vis_funcs,
callbacks=callbacks,
name=sub,
step=step)
while annotator.isOpen:
annotator.run() | null |
13,377 | from easymocap.annotator.basic_visualize import plot_text, resize_to_screen, vis_bbox, vis_line
from easymocap.mytools.debug_utils import mywarn
from easymocap.mytools.vis_base import plot_point
from easymocap.annotator import ImageFolder
from easymocap.annotator import vis_point
from easymocap.annotator import AnnotBase
from easymocap.mytools import read_json, save_json
from easymocap.mytools import plot_cross, plot_line, get_rgb
import numpy as np
import cv2
from tqdm import tqdm
from os.path import join
import os
from easymocap.annotator.chessboard import get_lines_chessboard, colors_chessboard_bar, create_chessboard
from easymocap.annotator.vanish_callback import calc_vanishpoint
class Matcher:
def __init__(self, path, mode, args) -> None:
if mode == 'chessboard':
pattern = args.pattern
lines, lines_color = get_lines_chessboard(pattern)
self.nJoints = pattern[0]*pattern[1]
else:
annots = read_json(join(path, 'calib.json'))
lines = annots['lines']
if 'lines_color' in annots.keys():
lines_color = annots['lines_color']
else:
lines_color = [colors_chessboard_bar[i%len(colors_chessboard_bar)] for i in range(len(lines))]
keypoints3d = np.array(annots['keypoints3d'])
create_chessboard(path, keypoints3d, out=args.annot)
self.nJoints = len(keypoints3d)
self.lines = lines
self.lines_color = lines_color
self.cache_lines = []
self.cnt = -1
self.hint()
def hint(self):
self.cnt = (self.cnt + 1)%self.nJoints
print('>>> label point {}'.format(self.cnt))
def back(self, annotator, param, conf=1.):
"switch to previous points"
self.cnt -= 2
self.hint()
def add(self, annotator, param, conf=1.):
"switch to next points"
click = param['click']
if click is not None:
param['annots']['keypoints2d'][self.cnt] = [click[0], click[1], conf]
param['annots']['visited'] = True
param['click'] = None
self.hint()
def add_conf(self, annotator, param):
self.add(annotator, param, conf=0.5)
def add_point_by_2lines(self, annotator, param):
start = param['start']
end = param['end']
if start is None:
return 0
if len(self.cache_lines) < 2:
self.cache_lines.append((start, end))
param['start'] = None
param['end'] = None
if len(self.cache_lines) == 2:
# calculate intersect
inp = np.zeros((2, 2, 3)) # 2, points, (x, y, c)
for i in range(len(self.cache_lines)):
start, end = self.cache_lines[i]
inp[0, i, 0] = start[0]
inp[0, i, 1] = start[1]
inp[0, i, 2] = 1.
inp[1, i, 0] = end[0]
inp[1, i, 1] = end[1]
inp[1, i, 2] = 1.
intersect = calc_vanishpoint(inp)
click = (int(intersect[0]), int(intersect[1]))
param['click'] = click
self.cache_lines = []
def clear(self, annotator, param):
"clear all points"
for i in range(self.nJoints):
param['annots']['keypoints2d'][i][2] = 0.
self.cnt = self.nJoints - 1
self.hint()
def clear_point(self, annotator, param):
"clear current points"
param['annots']['keypoints2d'][self.cnt][2] = 0.
def vis(self, img, annots, **kwargs):
border = 100
text_size = 2
width = 5
cv2.putText(img, 'Current {}: {:.0f}'.format(self.cnt, annots['keypoints2d'][self.cnt][2]), (border, border), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 0, 255), width)
if kwargs['click'] is not None:
return img
lw = max(int(round(img.shape[0]/500)), 1)
width = lw * 5
k2d = np.array(annots['keypoints2d'])
# cache lines
for nl, (start, end) in enumerate(self.cache_lines):
plot_line(img, start, end, lw, (255, 255, 200))
for nl, (i, j) in enumerate(self.lines):
if k2d[i][2] > 0 and k2d[j][2] > 0:
plot_line(img, k2d[i], k2d[j], lw, self.lines_color[nl])
for i, (x, y, c) in enumerate(k2d):
if c > 0:
plot_cross(img, x, y, self.lines_color[min(len(self.lines)-1, i)], width=width, lw=lw)
plot_point(img, x, y, r=lw*2, col=self.lines_color[min(len(self.lines)-1, i)], pid=i)
if i == self.cnt:
plot_point(img, x, y, r=lw*16, col=(127, 127, 255), pid=-1, circle_type=lw*2)
return img
def print(self, annotator, **kwargs):
print(self.annots)
def detect(self, annotator, param):
"detect chessboard"
start = param['start']
end = param['end']
if start is None:
return 0
import cv2
crop = param['img0'][start[1]:end[1], start[0]:end[0]]
from easymocap.annotator.chessboard import findChessboardCorners
pattern = args.pattern
annots = {'visited':False, 'keypoints2d':np.zeros((pattern[0]*pattern[1], 3))}
print('Redetect the chessboard...')
if False:
show = findChessboardCorners(crop, annots, args.pattern, debug=True)
else:
self.detect_charuco(crop, annots)
k2d = annots['keypoints2d']
for i in range(self.nJoints):
param['annots']['keypoints2d'][i][0] = k2d[i][0] + start[0]
param['annots']['keypoints2d'][i][1] = k2d[i][1] + start[1]
param['annots']['keypoints2d'][i][2] = k2d[i][2]
def detect_charuco(self, crop, annots):
cfg = {
'long': 6,
'short': 4,
'squareLength': 0.128,
'aruco_len': 0.1,
'aruco_type': '4X4_50'
}
from easymocap.annotator.chessboard import CharucoBoard
board = CharucoBoard(**cfg)
board.detect(crop, annots)
def vis_line(img, start, end, **kwargs):
if start is not None and end is not None:
lw = max(2, img.shape[0]//500)
cv2.line(img, (int(start[0]), int(start[1])),
(int(end[0]), int(end[1])), (0, 255, 0), lw)
return img
def vis_bbox(img, start, end, **kwargs):
if start is not None and end is not None:
lw = max(2, img.shape[0]//500)
cv2.rectangle(img, (int(start[0]), int(start[1])),
(int(end[0]), int(end[1])), (0, 255, 0), lw)
return img
def resize_to_screen(img, scale=1, **kwargs):
img = cv2.resize(img, None, fx=scale, fy=scale)
return img
def plot_text(img, annots, imgname, **kwargs):
if 'isKeyframe' in annots.keys():
if annots['isKeyframe']: # 关键帧使用红框表示
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), img.shape[1]//100)
else: # 非关键帧使用绿框表示
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), img.shape[1]//100)
imgname = '/'.join(imgname.split(os.sep)[-3:])
text_size = int(max(1, img.shape[0]//1500))
border = 20 * text_size
width = 2 * text_size
cv2.putText(img, '{}'.format(imgname), (border, img.shape[0]-border), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 0, 255), width)
# 显示标注进度条:
if 'frame' in kwargs.keys():
width = img.shape[1]
frame, nFrames = kwargs['frame'], kwargs['nFrames']
lw = 12
pos = lambda x: int(width*(x+1)/nFrames)
COL_ALL = (0, 255, 0)
COL_CUR = (255, 0, 0)
COL_PIN = (255, 128, 128)
plot_line(img, (0, lw/2), (width, lw/2), lw, COL_ALL)
plot_line(img, (0, lw/2), (pos(frame), lw/2), lw, COL_CUR)
top = pos(frame)
pts = np.array([[top, lw], [top-lw, lw*4], [top+lw, lw*4]])
cv2.fillPoly(img, [pts], COL_PIN)
return img
def annot_example(path, sub, args):
# define datasets
calib = Matcher(path, mode=args.mode, args=args)
dataset = ImageFolder(path, image=args.image, sub=sub,
annot=args.annot, no_annot=False, ext=args.ext,
share_annot=True,
max_per_folder=-1)
# define visualize
vis_funcs = [vis_point, vis_line, vis_bbox, calib.vis, resize_to_screen, plot_text]
key_funcs = {
' ': calib.add,
'b': calib.back,
'z': calib.add_conf,
'p': calib.add_point_by_2lines,
'c': calib.clear_point,
'C': calib.clear,
'x': calib.clear,
'e': calib.detect,
}
# construct annotations
annotator = AnnotBase(
dataset=dataset,
key_funcs=key_funcs,
vis_funcs=vis_funcs)
while annotator.isOpen:
annotator.run() | null |
13,378 | from easymocap.annotator.file_utils import read_json, save_annot
from easymocap.annotator import ImageFolder
from easymocap.annotator import plot_text, vis_active_bbox, vis_line, plot_skeleton
from easymocap.annotator import AnnotBase
from easymocap.annotator.vanish_callback import get_record_vanish_lines, get_calc_intrinsic, clear_vanish_points, vanish_point_from_body, copy_edges, clear_body_points
from easymocap.annotator.vanish_visualize import vis_vanish_lines
edges_cache = {}
def copy_edges_from_cache(self, param, **kwargs):
def clear_vanish_points(self, param):
def clear_body_points(self, param):
def get_record_vanish_lines(index):
def vanish_point_from_body(self, param, **kwargs):
def copy_edges(self, param, **kwargs):
def get_calc_intrinsic(mode='xy'):
def vis_vanish_lines(img, annots, **kwargs):
def annot_example(path, annot, sub=None, step=100):
# define datasets
dataset = ImageFolder(path, sub=sub, annot=annot)
key_funcs = {
'X': get_record_vanish_lines(0),
'Y': get_record_vanish_lines(1),
'Z': get_record_vanish_lines(2),
'k': get_calc_intrinsic('xy'),
'K': get_calc_intrinsic('yz'),
'b': vanish_point_from_body,
'C': clear_vanish_points,
'B': clear_body_points,
'c': copy_edges_from_cache,
'v': copy_edges
}
# define visualize
vis_funcs = [vis_line, plot_skeleton, vis_vanish_lines, plot_text]
# construct annotations
annotator = AnnotBase(
dataset=dataset,
key_funcs=key_funcs,
vis_funcs=vis_funcs,
step=step)
annots = annotator.param['annots']
print(sub)
annotator.run('X')
annotator.run('Y')
annotator.run('Z')
annotator.run('k')
if 'K' in annots.keys() and False:
print('\n'.join([' '.join(['{:7.2f}'.format(i) for i in row]) for row in annots['K']]))
return 0
else:
print('K is not caculated')
while annotator.isOpen:
annotator.run()
for key in ['vanish_line', 'vanish_point']:
edges_cache[key] = annotator.param['annots'][key] | null |
13,379 | import os
from os.path import join
from easymocap.annotator import ImageFolder
from easymocap.annotator import plot_text, plot_bbox_body, vis_active_bbox, vis_line
from easymocap.annotator import AnnotBase
from easymocap.annotator import callback_select_bbox_corner, callback_select_bbox_center, auto_pose_track
def annot_example(path, subs, annot, step):
for sub in subs:
# define datasets
dataset = ImageFolder(path, sub=sub, annot=annot)
key_funcs = {
't': auto_pose_track
}
callbacks = [callback_select_bbox_corner, callback_select_bbox_center]
# define visualize
vis_funcs = [vis_line, plot_bbox_body, vis_active_bbox]
# construct annotations
annotator = AnnotBase(
dataset=dataset,
key_funcs=key_funcs,
vis_funcs=vis_funcs,
callbacks=callbacks,
name=sub,
step=step)
while annotator.isOpen:
annotator.run() | null |
13,380 | from easymocap.annotator import ImageFolder
from easymocap.annotator import vis_point, vis_line
from easymocap.annotator import AnnotBase
def annot_example(path):
# define datasets
dataset = ImageFolder(path)
# define visualize
vis_funcs = [vis_point, vis_line]
# construct annotations
annotator = AnnotBase(
dataset=dataset,
key_funcs={},
vis_funcs=vis_funcs)
while annotator.isOpen:
annotator.run() | null |
13,381 | from os.path import join
from easymocap.config import Config, load_object
from easymocap.config.baseconfig import load_config_from_index, load_object_from_cmd
from easymocap.mytools.debug_utils import mywarn, log, myerror
from tqdm import tqdm
from easymocap.mytools import Timer
def load_object_from_cmd(cfg, opt):
cfg = Config.load(cfg, opt)
model = load_object(cfg.module, cfg.args)
return model
def vis(cfg):
# 读入模型
body_model = load_object_from_cmd(args.model, [])
# # 读入参数
results = load_object(cfg.result_module, cfg.result_args, body_model=body_model)
inputs = load_object(cfg.input_module, cfg.input_args)
outputs = load_object(cfg.output_module, cfg.output_args)
silent = True
for nf in tqdm(range(cfg.ranges[0], min(cfg.ranges[1], len(results)), cfg.ranges[2]), desc='vis'):
with Timer('result', silent):
basename, result = results[nf]
with Timer('inputs', silent):
images, cameras = inputs(basename)
with Timer('outputs', silent):
outputs(images, result, cameras, basename)
if cfg.make_video:
video = load_object(cfg.video_module, cfg.video_args)
video.make_video(cfg.output_args.out) | null |
13,382 | from easymocap.config.baseconfig import load_object, Config
from easymocap.mytools import Timer
from easymocap.mytools.file_utils import save_json, write_keypoints3d, write_vertices
from easymocap.mytools.reader import read_smpl
from easymocap.bodymodel.base import Params
from os.path import join
from glob import glob
from tqdm import tqdm
import os
def write_func(tasks):
for i in tqdm(range(len(tasks))):
func, name, data = tasks[i]
func(name, data) | null |
13,383 | import random
import os
import time
import datetime as dt
nfns = 4
nargs = 4
def generate_dummy_code_pybind11(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % \
(fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + '\n'
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
return result | null |
13,384 | import random
import os
import time
import datetime as dt
nfns = 4
nargs = 4
def generate_dummy_code_boost(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % \
(fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
result += decl + '\n'
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
return result | null |
13,385 | import sys
import os
import shlex
import subprocess
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, '.build')
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(app)` to solve the following problem:
Add hook for building doxygen xml when needed
Here is the function:
def setup(app):
"""Add hook for building doxygen xml when needed"""
app.connect("builder-inited", generate_doxygen_xml) | Add hook for building doxygen xml when needed |
13,386 | from __future__ import print_function
import argparse
import sys
import sysconfig
from . import get_include
def print_includes():
dirs = [sysconfig.get_path('include'),
sysconfig.get_path('platinclude'),
get_include()]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
if d not in unique_dirs:
unique_dirs.append(d)
print(' '.join('-I' + d for d in unique_dirs)) | null |
13,387 | import os
import sys
import platform
import re
import textwrap
from clang import cindex
from clang.cindex import CursorKind
from collections import OrderedDict
from glob import glob
from threading import Thread, Semaphore
from multiprocessing import cpu_count
RECURSE_LIST = [
CursorKind.TRANSLATION_UNIT,
CursorKind.NAMESPACE,
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.CLASS_TEMPLATE
]
PRINT_LIST = [
CursorKind.CLASS_DECL,
CursorKind.STRUCT_DECL,
CursorKind.ENUM_DECL,
CursorKind.ENUM_CONSTANT_DECL,
CursorKind.CLASS_TEMPLATE,
CursorKind.FUNCTION_DECL,
CursorKind.FUNCTION_TEMPLATE,
CursorKind.CONVERSION_FUNCTION,
CursorKind.CXX_METHOD,
CursorKind.CONSTRUCTOR,
CursorKind.FIELD_DECL
]
PREFIX_BLACKLIST = [
CursorKind.TRANSLATION_UNIT
]
def d(s):
return s if isinstance(s, str) else s.decode('utf8')
def sanitize_name(name):
name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name)
for k, v in CPP_OPERATORS.items():
name = name.replace('operator%s' % k, 'operator_%s' % v)
name = re.sub('<.*>', '', name)
name = ''.join([ch if ch.isalnum() else '_' for ch in name])
name = re.sub('_$', '', re.sub('_+', '_', name))
return '__doc_' + name
def process_comment(comment):
result = ''
# Remove C++ comment syntax
leading_spaces = float('inf')
for s in comment.expandtabs(tabsize=4).splitlines():
s = s.strip()
if s.startswith('/*'):
s = s[2:].lstrip('*')
elif s.endswith('*/'):
s = s[:-2].rstrip('*')
elif s.startswith('///'):
s = s[3:]
if s.startswith('*'):
s = s[1:]
if len(s) > 0:
leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
result += s + '\n'
if leading_spaces != float('inf'):
result2 = ""
for s in result.splitlines():
result2 += s[leading_spaces:] + '\n'
result = result2
# Doxygen tags
cpp_group = '([\w:]+)'
param_group = '([\[\w:\]]+)'
s = result
s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s)
s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s)
s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s)
s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s)
s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group),
r'\n\n$Parameter ``\2``:\n\n', s)
s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group),
r'\n\n$Template parameter ``\2``:\n\n', s)
for in_, out_ in {
'return': 'Returns',
'author': 'Author',
'authors': 'Authors',
'copyright': 'Copyright',
'date': 'Date',
'remark': 'Remark',
'sa': 'See also',
'see': 'See also',
'extends': 'Extends',
'throw': 'Throws',
'throws': 'Throws'
}.items():
s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)
s = re.sub(r'\\details\s*', r'\n\n', s)
s = re.sub(r'\\brief\s*', r'', s)
s = re.sub(r'\\short\s*', r'', s)
s = re.sub(r'\\ref\s*', r'', s)
s = re.sub(r'\\code\s?(.*?)\s?\\endcode',
r"```\n\1\n```\n", s, flags=re.DOTALL)
# HTML/TeX tags
s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL)
s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL)
s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL)
s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL)
s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL)
s = re.sub(r'<li>', r'\n\n* ', s)
s = re.sub(r'</?ul>', r'', s)
s = re.sub(r'</li>', r'\n\n', s)
s = s.replace('``true``', '``True``')
s = s.replace('``false``', '``False``')
# Re-flow text
wrapper = textwrap.TextWrapper()
wrapper.expand_tabs = True
wrapper.replace_whitespace = True
wrapper.drop_whitespace = True
wrapper.width = 70
wrapper.initial_indent = wrapper.subsequent_indent = ''
result = ''
in_code_segment = False
for x in re.split(r'(```)', s):
if x == '```':
if not in_code_segment:
result += '```\n'
else:
result += '\n```\n\n'
in_code_segment = not in_code_segment
elif in_code_segment:
result += x.strip()
else:
for y in re.split(r'(?: *\n *){2,}', x):
wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
if len(wrapped) > 0 and wrapped[0] == '$':
result += wrapped[1:] + '\n'
wrapper.initial_indent = \
wrapper.subsequent_indent = ' ' * 4
else:
if len(wrapped) > 0:
result += wrapped + '\n\n'
wrapper.initial_indent = wrapper.subsequent_indent = ''
return result.rstrip().lstrip('\n')
def extract(filename, node, prefix, output):
if not (node.location.file is None or
os.path.samefile(d(node.location.file.name), filename)):
return 0
if node.kind in RECURSE_LIST:
sub_prefix = prefix
if node.kind not in PREFIX_BLACKLIST:
if len(sub_prefix) > 0:
sub_prefix += '_'
sub_prefix += d(node.spelling)
for i in node.get_children():
extract(filename, i, sub_prefix, output)
if node.kind in PRINT_LIST:
comment = d(node.raw_comment) if node.raw_comment is not None else ''
comment = process_comment(comment)
sub_prefix = prefix
if len(sub_prefix) > 0:
sub_prefix += '_'
if len(node.spelling) > 0:
name = sanitize_name(sub_prefix + d(node.spelling))
output.append((name, filename, comment)) | null |
13,388 | import os
import sys
import platform
import re
import textwrap
from clang import cindex
from clang.cindex import CursorKind
from collections import OrderedDict
from glob import glob
from threading import Thread, Semaphore
from multiprocessing import cpu_count
def extract_all(args):
parameters, filenames = read_args(args)
output = []
for filename in filenames:
thr = ExtractionThread(filename, parameters, output)
thr.start()
print('Waiting for jobs to finish ..', file=sys.stderr)
for i in range(job_count):
job_semaphore.acquire()
return output
def write_header(comments, out_file=sys.stdout):
print('''/*
This file contains docstrings for the Python bindings.
Do not edit! These were automatically extracted by mkdoc.py
*/
#define __EXPAND(x) x
#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
#define __CAT1(a, b) a ## b
#define __CAT2(a, b) __CAT1(a, b)
#define __DOC1(n1) __doc_##n1
#define __DOC2(n1, n2) __doc_##n1##_##n2
#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
#define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
#if defined(__GNUG__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
''', file=out_file)
name_ctr = 1
name_prev = None
for name, _, comment in list(sorted(comments, key=lambda x: (x[0], x[1]))):
if name == name_prev:
name_ctr += 1
name = name + "_%i" % name_ctr
else:
name_prev = name
name_ctr = 1
print('\nstatic const char *%s =%sR"doc(%s)doc";' %
(name, '\n' if '\n' in comment else ' ', comment), file=out_file)
print('''
#if defined(__GNUG__)
#pragma GCC diagnostic pop
#endif
''', file=out_file)
def mkdoc(args):
args = list(args)
out_path = None
for idx, arg in enumerate(args):
if arg.startswith("-o"):
args.remove(arg)
try:
out_path = arg[2:] or args.pop(idx)
except IndexError:
print("-o flag requires an argument")
exit(-1)
break
comments = extract_all(args)
if out_path:
try:
with open(out_path, 'w') as out_file:
write_header(comments, out_file)
except:
# In the event of an error, don't leave a partially-written
# output file.
try:
os.unlink(out_path)
except:
pass
raise
else:
write_header(comments) | null |
13,389 | import os
import shutil
import yaml
from os.path import join
from easymocap.mytools.debug_utils import log, mywarn
def compare_files(file1, file2):
def log(text):
def mywarn(text):
def copy_node(dir, nodes):
for node in nodes:
if isinstance(node, str):
srcname = join(SRC, dir, node)
dstname = join(DST, dir, node)
if not os.path.exists(srcname):
mywarn('Not exists {}'.format(srcname))
continue
if os.path.exists(dstname):
if os.path.isdir(srcname):
mywarn('Current not support overwrite folders: {}'.format(dstname))
else:
if compare_files(srcname, dstname):
pass
else:
mywarn('Overwrite file: {}'.format(dstname))
shutil.copyfile(srcname, dstname)
else:
if os.path.isdir(srcname):
log('Copy dir: {}'.format(dstname))
shutil.copytree(srcname, dstname)
else:
os.makedirs(join(DST, dir), exist_ok=True)
log('Copy file: {}'.format(dstname))
shutil.copyfile(srcname, dstname)
elif isinstance(node, dict):
for subdir, subnode in node.items():
copy_node(join(dir, subdir), subnode) | null |
13,390 | import re
import numpy as np
import os, sys
import cv2
import shutil
from os.path import join
from tqdm import trange, tqdm
from multiprocessing import Pool
import json
def parseImg(imgname):
""" 解析图像名称
Arguments:
imgname {str} --
Returns:
dic -- 包含文件图像信息的字典
"""
s = re.search(
'(?P<id>\d+)_(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})_(?P<hour>\d{2})-(?P<min>\d{2})-(?P<sec>\d{2})\.(?P<ms>\d{3})',
imgname)
assert s is not None, imgname
dic = s.groupdict()
for key in dic.keys():
dic[key] = int(dic[key])
dic['time'] = dic['ms'] + dic['sec'] * 1000 + dic['min'] * 60000 + dic['hour'] * 60000 * 60
return dic
def getCamNum(x):
return x.split('_B')[1]
def getImgId(x):
return x.split('_')[4]
def findBeginEnd(images_info):
begin_time = 0
end_time = np.inf
for key in images_info:
first_frame = images_info[key]['first_frame']
last_frame = images_info[key]['last_frame']
curr_f_time = images_info[key][first_frame]['time']
curr_e_time = images_info[key][last_frame]['time']
if curr_f_time > begin_time:
begin_time = curr_f_time
if curr_e_time < end_time:
end_time = curr_e_time
return begin_time, end_time
def findRef(images_info):
ref_cam = 0
min_frame = np.inf
for key in images_info:
first_frame = images_info[key]['first_frame']
last_frame = images_info[key]['last_frame']
f_id = images_info[key][first_frame]['id']
e_id = images_info[key][last_frame]['id']
if (e_id - f_id) < min_frame:
min_frame = e_id - f_id
ref_cam = key
return ref_cam
def findNearest(cam_info, time):
# 找time最接近的帧的名称
select_frame = ''
# WARN: 确保cam_info是有序的
img_pre = None
for idx, img in enumerate(cam_info.keys()):
if isinstance(cam_info[img], dict):
if cam_info[img]['time'] < time:
img_pre = img
continue
else:
select_frame = img
break
# 判断一下处于边界上的两帧,哪一帧的时间更接近
if img_pre is not None:
if abs(time - cam_info[img_pre]['time']) < abs(time - cam_info[img]['time']):
select_frame = img_pre
return select_frame
from tabulate import tabulate
def get_filelists(path, save_path):
cameralists = sorted(os.listdir(path), key=lambda x: getCamNum(x))
images_info = {}
for camname in cameralists:
images_info[camname] = {}
imglists = listdir([path, camname])
imglists.sort(key=lambda x: getImgId(x))
for imgname in tqdm(imglists, desc=camname):
images_info[camname][imgname] = parseImg(imgname)
images_info[camname]['first_frame'] = imglists[0]
images_info[camname]['last_frame'] = imglists[-1]
# print(images_info[camname])
# 寻找最晚开始最早结束的时间
begin_time, end_time = findBeginEnd(images_info)
print('begin time: {}, end time: {}'.format(begin_time, end_time))
# 寻找帧率最低的视频,以这个视频为参考
if args.ref is None:
ref_cam = findRef(images_info)
else:
ref_cam = args.ref
print('The reference camera is {}'.format(ref_cam))
# 以帧率最低的相机为参考,对每一帧寻找其他相机时间最接近的帧
output_info = {key: [] for key in cameralists}
for imgname in tqdm(images_info[ref_cam].keys(), 'sync'):
if isinstance(images_info[ref_cam][imgname], dict):
cur_time = images_info[ref_cam][imgname]['time']
if cur_time < begin_time:
continue
if cur_time > end_time:
break
for cam in cameralists:
if cam == ref_cam:
select = imgname
else:
select = findNearest(images_info[cam], cur_time)
output_info[cam].append(select)
# 将图片保存
mkdir(save_path)
# 保存匹配信息
# TODO:增加匹配时间差的指标
import json
with open(join(save_path, 'match_info.json'), 'w') as f:
json.dump(output_info, f, indent=4)
for cam in cameralists:
mkdir(join(save_path, cam))
for i, imgname in enumerate(tqdm(output_info[cam], desc=cam)):
src = join(path, cam, imgname)
dst = join(save_path, cam, '%06d.jpg' % i)
img = cv2.imread(src)
if img.shape[0] == 2048:
img = cv2.resize(img, (1024, 1024), cv2.INTER_NEAREST)
cv2.imwrite(dst, img) | null |
13,391 | import re
import numpy as np
import os, sys
import cv2
import shutil
from os.path import join
from tqdm import trange, tqdm
from multiprocessing import Pool
import json
def save_json(file, data):
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f, indent=4)
def getFileDict(path):
cams = sorted(os.listdir(path))
cams = [cam for cam in cams if os.path.isdir(join(path, cam))]
cams = list(filter(
lambda x:\
x.startswith('Camera')\
and x not in filter_list
, cams)) # B6相机同步有问题 不要使用了
results = {}
for cam in cams:
# 注意:lightstage的图像直接sort就是按照顺序了的
files = sorted(os.listdir(join(path, cam)))
files = [f for f in files if f.endswith('.jpg')]
results[cam] = files
return cams, results
def sync_by_name(imagelists, times_all, cams):
# 选择开始帧
start = max([t[0] for t in times_all.values()])
# 弹出开始帧以前的数据
for cam in cams:
times = times_all[cam].tolist()
while times[0] < start:
times.pop(0)
imagelists[cam].pop(0)
times_all[cam] = np.array(times)
# 选择参考视角的时候,应该选择与其他视角的距离最近的作为参考
best_distances = []
for cam in cams:
# 分别对每个进行设置, 使用第一帧的时间,留有余地
ref_time = times_all[cam][1]
distances = []
for c in cams:
dist = np.abs(times_all[c] - ref_time).min()
distances.append(dist)
print('{:10s}: {:.2f}'.format(cam, sum(distances)/len(cams)))
best_distances.append(sum(distances)/len(cams))
best_distances = np.array(best_distances)
ref_view = best_distances.argmin()
if args.ref is None:
ref_cam = cams[best_distances.argmin()]
else:
ref_cam = args.ref
ref_view = cams.index(ref_cam)
times_all = [times_all[cam] for cam in cams]
print('Select reference view: ', cams[ref_view])
if False:
distance = np.eye((dimGroups[-1]))
for nv0 in range(len(times_all)-1):
for nv1 in range(nv0+1, len(times_all)):
dist = np.abs(times_all[nv0][:, None] - times_all[nv1][None, :])
dist = (MAX_DIST - dist)/MAX_DIST
dist[dist<0] = 0
distance[dimGroups[nv0]:dimGroups[nv0+1], dimGroups[nv1]:dimGroups[nv1+1]] = dist
distance[dimGroups[nv1]:dimGroups[nv1+1], dimGroups[nv0]:dimGroups[nv0+1]] = dist.T
matched, ref_view = match_dtw(distance, dimGroups, debug=args.debug)
elif True:
# 使用最近邻选择
matched = []
for nv in range(len(times_all)):
dist = np.abs(times_all[ref_view][:, None] - times_all[nv][None, :])
rows = np.arange(dist.shape[0])
argmin0 = dist.argmin(axis=1)
# 直接选择最近的吧
# 去掉开头
for i in range(argmin0.shape[0]):
if argmin0[i] == argmin0[i+1]:
argmin0[i] = -1
else:
break
# 去掉结尾
for i in range(1, argmin0.shape[0]):
if argmin0[-i] == argmin0[-i-1]:
argmin0[-i] = -1
else:
break
matched.append(argmin0)
matched = np.stack(matched)
elif False:
# 1. 首先判断一下所有视角的最接近的点
nViews = len(times_all)
TIME_STEP = 20
REF_OFFSET = 20 # 给参考视角增加一个帧的偏移,保证所有相机都正常开启了,同时增加一个帧的结束,保证所有相机都结束了
views_ref = [ref_view]
matched = {
ref_view:np.arange(REF_OFFSET, times_all[ref_view].shape[0]-REF_OFFSET)
}
while True:
times_mean = np.stack([times_all[ref][matched[ref]] for ref in matched.keys()])
times_mean = np.mean(times_mean, axis=0)
infos = []
for nv in range(nViews):
if nv in matched.keys():
continue
if False:
dist_all = []
for ref, indices in matched.items():
dist = np.abs(times_all[ref][indices, None] - times_all[nv][None, :])
dist[dist>TIME_STEP] = 10*TIME_STEP
dist_all.append(dist)
dist = np.stack(dist_all).sum(axis=0)
dist = dist / len(matched.keys())
else:
dist = np.abs(times_mean[:, None] - times_all[nv][None, :])
argmin0 = dist.argmin(axis=1)
rows = np.arange(dist.shape[0])
dist_sum = dist.min(axis=1).mean()
infos.append({
'v': nv,
'dist_sum': dist_sum,
'argmin': argmin0
})
print(nv, dist_sum)
if len(infos) == 0:
break
infos.sort(key=lambda x:x['dist_sum'])
print('Select view: ', infos[0]['v'], infos[0]['dist_sum'])
matched[infos[0]['v']] = infos[0]['argmin']
matched = np.stack([matched[nv] for nv in range(nViews)])
else:
# 选择一个开头,计算最佳的偏移
# 开始帧:所有的开始帧中的最晚的一帧
# 假定恒定帧率,只需要选择一个开头就好了
nViews = len(times_all)
start_t = max([t[0] for t in times_all])
# 留出10帧来操作
start_f = [np.where(t>start_t)[0][0] + 10 for t in times_all]
start_t = times_all[ref_view][start_f[ref_view]]
valid_f = [[np.where(t<start_t)[0][-1],np.where(t>=start_t)[0][0]] for t in times_all]
from copy import deepcopy
valid_f_copy = deepcopy(valid_f)
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
while True:
min_v, min_t = -1, 1e10
min_info, max_info = [], []
max_v, max_t = -1, -1
for nv in range(nViews):
if len(valid_f[nv]) == 1:
continue
# 存在多个的
min_info.append({
'v': nv,
't': times_all[nv][valid_f[nv][0]]
})
max_info.append({
'v': nv,
't': times_all[nv][valid_f[nv][-1]]
})
# 判断最小和最大的弹出谁
min_info.sort(key=lambda x:x['t'])
max_info.sort(key=lambda x:-x['t'])
if len(min_info) > 1 and len(max_info) > 1:
# delta_min = min_info[1]['t'] - min_info[0]['t']
# delta_max = max_info[0]['t'] - max_info[1]['t']
delta_max = max_info[0]['t'] - start_t
delta_min = start_t - min_info[0]['t']
if delta_max > delta_min:
valid_f[max_info[0]['v']].pop(-1)
else:
valid_f[min_info[0]['v']].pop(0)
else:
nv = min_info[0]['v']
t_min = times_all[nv][valid_f[0]]
t_max = times_all[nv][valid_f[1]]
delta_min = start_t - t_min
delta_max = t_max - start_t
if delta_max > delta_min:
valid_f[nv].pop(-1)
else:
valid_f[nv].pop(0)
break
plt.plot([0, nViews], [start_t, start_t])
for nv in range(len(valid_f)):
if len(valid_f[nv]) > 1:
start, end = valid_f[nv]
start, end = times_all[nv][start], times_all[nv][end]
plt.plot([nv, nv], [start, end])
else:
start, end = valid_f_copy[nv][0], valid_f_copy[nv][-1]
start, end = times_all[nv][start], times_all[nv][end]
plt.plot([nv, nv], [start, end])
plt.scatter(nv, times_all[nv][valid_f[nv]])
plt.show()
matched = np.arange(times_all[ref_view].shape[0]).reshape(1, -1).repeat(nViews, 0)
matched = np.arange(2).reshape(1, -1).repeat(nViews, 0)
start = np.array(valid_f).reshape(-1, 1)
matched += start
shape = np.array([t.shape[0] for t in times_all]).reshape(-1, 1) - 10
matched[matched<0] = -1
# matched[matched>shape] = -1
matched = matched[:, (matched!=-1).all(axis=0)]
matched_time = np.zeros_like(matched)
for nv in range(matched.shape[0]):
matched_time[nv] = times_all[nv][matched[nv]]
max_time = matched_time.max(axis=0)
min_time = matched_time.min(axis=0)
diff = max_time - min_time
step = matched_time[:, 1:] - matched_time[:, :-1]
headers = ['camera', 'start', 'end', 'delta_mean', 'delta_min', 'delta_max', 'diff_max', 'diff_min', 'diff_mean']
infos = []
dist_to_ref_all = 0
for nv, cam in enumerate(cams):
dist_to_ref = (matched_time[nv] - matched_time[ref_view]).tolist()
dist_to_ref_all += np.abs(dist_to_ref).mean()
dist_to_ref.sort(key=lambda x: abs(x))
infos.append([cam, matched_time[nv, 0], matched_time[nv, -1], step[nv].mean(), step[nv].min(), step[nv].max(), dist_to_ref[-1], dist_to_ref[0], np.abs(np.array(dist_to_ref)).mean()])
print(tabulate(infos, headers=headers))
# import matplotlib.pyplot as plt
# plt.plot(times_all[7][:100])
# plt.plot(times_all[ref_view][:100])
# plt.show()
# import ipdb;ipdb.set_trace()
print("Max sync difference = {}ms, Mean max sync difference = {:.1f}ms".format(diff.max(), diff.mean()))
print("Mean sync diff : {}".format(dist_to_ref_all/len(cams)))
if not args.nocheck: import ipdb;ipdb.set_trace()
return matched, matched_time
def copy_with_match(path, out, matched, imagelists, cams, multiple_thread = False):
print('---')
print('Copy {} to {}'.format(path, out))
print('---')
pad_2 = lambda x:'{:02d}'.format(int(x))
remove_cam = lambda x:x.replace('Camera_B', '').replace('Camera_', '').replace('Camera (', '').replace(')', '')
cvt_viewname = lambda x:pad_2(remove_cam(x))
reports = [[] for _ in range(matched.shape[1])]
for nv in tqdm(range(matched.shape[0])):
outdir = join(out, 'images', cvt_viewname(cams[nv]))
if os.path.exists(outdir):
if matched.shape[1] == len(os.listdir(outdir)):
print('exists enough images')
continue
else:
print('exists not enough images')
else:
os.makedirs(outdir, exist_ok=True)
imgname_old_s = [[] for _ in range(THREAD_CNT)]
imgname_new_s = [[] for _ in range(THREAD_CNT)]
for nfnew in range(matched.shape[1]):
nf = matched[nv, nfnew]
imgname_old = join(path, cams[nv], imagelists[cams[nv]][nf])
imgname_old_s[nfnew % THREAD_CNT].append(imgname_old)
imgname_new_s[nfnew % THREAD_CNT].append(join(outdir, '{:06d}.jpg'.format(nfnew)))
reports[nfnew].append(imgname_old)
if multiple_thread:
import threading
threads = []
for i in range(THREAD_CNT):
thread = threading.Thread(target=copy_func_batch, args=(imgname_old_s[i], imgname_new_s[i])) # 应该不存在任何数据竞争
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
else:
for nfnew in tqdm(range(matched.shape[1]), desc='{}'.format(cams[nv])):
nf = matched[nv, nfnew]
imgname_old = join(path, cams[nv], imagelists[cams[nv]][nf])
imgname_new = join(outdir, '{:06d}.jpg'.format(nfnew))
copy_func(imgname_old, imgname_new)
save_json(join(out, 'match_name.json'), reports)
from tabulate import tabulate
def parse_time(imagelists, cams):
times_all = {}
headers = ['camera', 'frames', 'mean', 'min', 'max', 'number>mean', 'start', 'end']
MAX_STEP = 20
infos = []
start_time = -1
for cam in cams:
times = []
for imgname in imagelists[cam]:
time = parseImg(imgname)['time']
times.append(time)
times = np.array(times)
times_all[cam] = times
if start_time < 0:
start_time = times[0]
else:
start_time = min(start_time, times[0])
print('Start time: {}'.format(start_time))
for cam in cams:
times = times_all[cam]
times -= start_time
delta = times[1:] - times[:-1]
infos.append([cam, times.shape[0],
delta.mean(),
'{}/{}'.format(delta.min(), delta.argmin()),
'{}/{}'.format(delta.max(), delta.argmax()),
(delta>delta.mean()).sum(),
times[0]%60000,
times[-1]%60000])
print(tabulate(infos, headers=headers))
return times_all
def soft_sync(path, out, multiple_thread = False):
os.makedirs(out, exist_ok=True)
# 获取图像名称
cams, imagelists = getFileDict(path)
if args.static:
# 静止场景,直接保存第一帧图像
matched = np.zeros((len(cams), 1), dtype=np.int)
elif args.nosync:
assert len(cams) == 1
times_all = parse_time(imagelists, cams)
matched = np.arange(0, len(imagelists[cams[0]])).reshape(1, -1)
# matched = np.arange((1, len(imagelists[cams[0]])), dtype=np.int)
else:
# 获取图像时间
times_all = parse_time(imagelists, cams)
matched, matched_time = sync_by_name(imagelists, times_all, cams)
matched = matched[:, ::args.step]
times_all = {key:val.tolist() for key, val in times_all.items()}
save_json(join(out, 'timestamp.json'), times_all)
np.savetxt(join(out, 'sync_time.txt'), matched_time-matched_time.min(), fmt='%10d')
# 保存图像
copy_with_match(path, out, matched, imagelists, cams, multiple_thread) | null |
13,392 | import re
import numpy as np
import os, sys
import cv2
import shutil
from os.path import join
from tqdm import trange, tqdm
from multiprocessing import Pool
import json
from tabulate import tabulate
def read_json(path):
with open(path) as f:
data = json.load(f)
return data | null |
13,393 | import numpy as np
import json
from glob import glob
from os.path import join
import os
from easymocap.mytools import write_camera, read_json, save_json
from easymocap.dataset import CONFIG
import shutil
from tqdm import tqdm, trange
SCALE = 100
def convert_camera(inp, out):
camnames = glob(join(inp, '*.json'))
assert len(camnames) == 1, camnames
# Load camera calibration parameters
with open(camnames[0]) as cfile:
calib = json.load(cfile)
# Cameras are identified by a tuple of (panel#,node#)
cameras_ = {cam['name']:cam for cam in calib['cameras']}
cameras = {}
# Convert data into numpy arrays for convenience
for k, cam in cameras_.items():
if cam['type'] != 'hd':
continue
cam['K'] = np.array(cam['K'])
cam['dist'] = np.array(cam['distCoef']).reshape(1, -1)
cam['R'] = np.array(cam['R'])
cam['T'] = np.array(cam['t']).reshape((3,1))/SCALE
cam = {key:cam[key] for key in ['K', 'dist', 'R', 'T']}
cameras[k] = cam
write_camera(cameras, out) | null |
13,394 | import numpy as np
import json
from glob import glob
from os.path import join
import os
from easymocap.mytools import write_camera, read_json, save_json
from easymocap.dataset import CONFIG
import shutil
from tqdm import tqdm, trange
def copy_videos(inp, out):
outdir = join(out, 'videos')
os.makedirs(outdir, exist_ok=True)
hdnames = os.listdir(join(inp, 'hdVideos'))
for hdname in tqdm(hdnames):
outname = join(outdir, hdname.replace('hd_', ''))
shutil.copy(join(inp, 'hdVideos', hdname), outname) | null |
13,395 | import numpy as np
import json
from glob import glob
from os.path import join
import os
from easymocap.mytools import write_camera, read_json, save_json
from easymocap.dataset import CONFIG
import shutil
from tqdm import tqdm, trange
SCALE = 100
def convert_keypoints3d(inp, out):
bodynames = join(inp, 'hdPose3d_stage1_coco19', 'body3DScene_{:08d}.json')
handnames = join(inp, 'hdHand3d', 'handRecon3D_hd{:08d}.json')
out = join(out, 'keypoints3d')
os.makedirs(out, exist_ok=True)
names_i = CONFIG['panoptic']['joint_names']
names_o = CONFIG['body25']['joint_names']
commons = [i for i in names_o if i in names_i]
idx_i = [names_i.index(i) for i in commons]
idx_o = [names_o.index(i) for i in commons]
use_hand = True
if use_hand:
zero_body = np.zeros((25 + 21 + 21, 4))
else:
zero_body = np.zeros((25, 4))
for i in trange(10000):
bodyname = bodynames.format(i)
if not os.path.exists(bodyname):
continue
bodies = read_json(bodyname)
results = []
for data in bodies['bodies']:
pid = data['id']
joints19 = np.array(data['joints19']).reshape(-1, 4)
joints19[:, :3] /= SCALE
keypoints3d = zero_body.copy()
keypoints3d[idx_o, :] = joints19[idx_i, :]
results.append({'id': pid, 'keypoints3d': keypoints3d})
handname = handnames.format(i)
hands = read_json(handname)
lwrists = np.stack([res['keypoints3d'][7] for res in results])
left_valid = np.zeros(len(results)) + 0.2
rwrists = np.stack([res['keypoints3d'][4] for res in results])
right_valid = np.zeros(len(results)) + 0.2
for data in hands['people']:
pid = data['id']
if 'left_hand' in data.keys():
left_p = np.array(data['left_hand']['landmarks']).reshape((-1,3))
left_v = np.array(data['left_hand']['averageScore']).reshape((-1,1))
left = np.hstack((left_p/SCALE, left_v))
if left[0, -1] > 0 and (left_v > 0).sum() > 10:
dist = np.linalg.norm(left[:1, :3] - lwrists[:, :3], axis=1)
dist_min, pid = dist.min(), dist.argmin()
if left_valid[pid] > dist_min:
left_valid[pid] = dist_min
results[pid]['keypoints3d'][25:25+21, :] = left
if 'right_hand' in data.keys():
right_p = np.array(data['right_hand']['landmarks']).reshape((-1,3))
right_v = np.array(data['right_hand']['averageScore']).reshape((-1,1))
right = np.hstack((right_p/SCALE, right_v))
if right[0, -1] > 0 and (right_v > 0).sum() > 10:
dist = np.linalg.norm(right[:1, :3] - rwrists[:, :3], axis=1)
dist_min, pid = dist.min(), dist.argmin()
if right_valid[pid] > dist_min:
right_valid[pid] = dist_min
results[pid]['keypoints3d'][25+21:25+21+21, :] = right
# find the correspondent people
outname = join(out, '{:06d}.json'.format(i))
# results = [val for key, val in results.items()]
for res in results:
res['keypoints3d'] = res['keypoints3d'].tolist()
save_json(outname, results) | null |
13,396 | import os
from os.path import join
from glob import glob
import numpy as np
import cv2
from easymocap.mytools.camera_utils import write_camera
def process_camera(xml_path, seq, act, cams):
def write_camera(camera, path):
def convert_h36m_easymocap(H36M_ROOT, OUT_ROOT, seqs, cams):
xml_path = join(H36M_ROOT, 'metadata.xml')
for seq in seqs:
print('convert {}'.format(seq))
# path with GT 3D pose
pose_path = join(H36M_ROOT, seq, 'MyPoseFeatures', 'D3_Positions_mono')
action_list = glob(join(pose_path, '*.cdf'))
action_list = list(set([os.path.basename(seq).split('.')[0] for seq in action_list]))
action_list.sort()
for action in action_list:
print(' ', action)
outdir = join(OUT_ROOT, seq, action)
# conver cameras
cameras = process_camera(xml_path, seq, action, cams)
write_camera(cameras, outdir) | null |
13,397 | from glob import glob
from os.path import join
from urllib.error import URLError
from pytube import YouTube
import os
from easymocap.mytools.debug_utils import log, mkdir, myerror
extensions = ['.mp4', '.webm']
def log(text):
myprint(text, 'info')
def myerror(text):
myprint(text, 'error')
def download_youtube(vid, outdir):
outname = join(outdir, vid)
url = 'https://www.youtube.com/watch?v={}'.format(vid)
for ext in extensions:
if os.path.exists(outname+ext) and not args.restart:
log('[Info]: skip video {}'.format(outname+ext))
return 0
log('[Info]: start to download video {}'.format(outname))
log('[Info]: {}'.format(url))
yt = YouTube(url)
try:
streams = yt.streams
except KeyError:
myerror('[Error]: not found streams: {}'.format(url))
return 1
except URLError:
myerror('[Error]: Url error: {}'.format(url))
return 1
find = False
streams_valid = []
res_range = ['2160p', '1440p', '1080p', '720p'] if not args.only4k else ['2160p']
if args.no720:
res_range.remove('720p')
for res in res_range:
for fps in [60, 50, 30, 25, 24]:
for ext in ['webm', 'mp4']:
for stream in streams:
if stream.resolution == res and \
stream.fps == fps and \
stream.mime_type == 'video/{}'.format(ext):
streams_valid.append(stream)
if len(streams_valid) == 0:
for stream in streams:
print(stream)
myerror('[BUG ] Not found valid stream, please check the streams')
return 0
# best_stream = yt.streams.order_by('filesize')[-1]
title = streams_valid[0].title
log('[Info]: {}'.format(title))
for stream in streams_valid:
res = stream.resolution
log('[Info]: The resolution is {}, ext={}'.format(res, stream.mime_type))
filename = '{}.{}'.format(vid, stream.mime_type.split('/')[-1])
try:
stream.download(output_path=outdir, filename=filename, max_retries=0)
log('[Info]: Succeed')
except:
myerror('[BUG ]: Failed')
continue
break | null |
13,398 | import os
from os.path import join
import shutil
from tqdm import tqdm
from glob import glob
import cv2
from easymocap.mytools.debug_utils import myerror, mywarn
mkdir = lambda x:os.makedirs(x, exist_ok=True)
import json
def save_json(file, data):
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f, indent=4)
def read_json(path):
with open(path) as f:
data = json.load(f)
return data
def mywarn(text):
myprint(text, 'warn')
def myerror(text):
myprint(text, 'error')
def copy_dataset(inp, out, start, end, step, keys, args):
copy_keys = {
'images': args.ext,
'annots': '.json',
'mask-schp': '.png',
}
copy_share_keys = {
'output-keypoints3d/keypoints3d': '.json'
}
mkdir(out)
if os.path.exists(join(inp, 'intri.yml')):
shutil.copyfile(join(inp, 'intri.yml'), join(out, 'intri.yml'))
shutil.copyfile(join(inp, 'extri.yml'), join(out, 'extri.yml'))
if os.path.exists(join(inp, 'match_name.json')):
names = read_json(join(inp, 'match_name.json'))
names = names[start:end:step]
save_json(join(out, 'match_name.json'), names)
if os.path.exists(join(inp, 'sync_time.txt')):
import numpy as np
times = np.loadtxt(join(inp, 'sync_time.txt'))
times = times.reshape(times.shape[0], -1)
times = times[:, start:end:step]
np.savetxt(join(out, 'sync_time.txt'), times, fmt='%10d')
os.system('touch ' + join(out, '{}-{}-{}'.format(start, end, step)))
for copy, ext in copy_share_keys.items():
if not os.path.exists(join(inp, copy)):
continue
if len(args.frames) == 0:
ranges = [i for i in range(start, end, step)]
else:
ranges = args.frames
outdir = join(out, copy)
if os.path.exists(outdir) and len(os.listdir(outdir)) == len(ranges):
pass
os.makedirs(outdir, exist_ok=True)
for nnf, nf in enumerate(tqdm(ranges, desc='{}'.format(copy))):
oldname = join(inp, copy, '{:06d}{}'.format(nf, ext))
if not os.path.exists(oldname):
mywarn('{} not exists'.format(oldname))
continue
newname = join(outdir, '{:06d}{}'.format(nnf, ext))
shutil.copyfile(oldname, newname)
for copy in keys:
ext = copy_keys.get(copy, '.json')
if not os.path.exists(join(inp, copy)):
continue
if len(args.subs) == 0:
subs = sorted(os.listdir(join(inp, copy)))
subs = [s for s in subs if os.path.isdir(join(inp, copy, s))]
else:
subs = args.subs
for sub in subs:
if not os.path.exists(join(inp, copy)):
continue
outdir = join(out, copy, sub.replace(args.strip, ''))
os.makedirs(outdir, exist_ok=True)
if args.end == -1:
oldnames = sorted(glob(join(inp, copy, sub, '*{}'.format(ext))))
end = len(oldnames)
print('{} has {} frames'.format(sub, end))
if args.sample == -1:
if len(args.frames) == 0:
ranges = [i for i in range(start, end, step)]
else:
ranges = args.frames
else:
ranges = [(i/args.sample)*(end-start-2*args.strip_frame)+start+args.strip_frame for i in range(args.sample)]
ranges = [int(i+0.5) for i in ranges]
if os.path.exists(outdir) and len(os.listdir(outdir)) == len(ranges):
mywarn('[copy] Skip {}'.format(outdir))
continue
for nnf, nf in enumerate(tqdm(ranges, desc='{}:{}'.format(sub, copy))):
oldname = join(inp, copy, sub, '{:06d}{}'.format(nf, ext))
if not os.path.exists(oldname):
oldnames = sorted(glob(join(inp, copy, sub, '{:06d}_*{}'.format(nf, ext))))
if len(oldnames) == 0:
myerror('{} not exists'.format(oldname))
import ipdb;ipdb.set_trace()
else:
for oldname in oldnames:
newname = join(outdir, os.path.basename(oldname).replace('{:06d}'.format(nf), '{:06d}'.format(nnf)))
shutil.copyfile(oldname, newname)
else:
newname = join(outdir, '{:06d}{}'.format(nnf, ext))
if copy == 'images' and args.scale != 1:
img = cv2.imread(oldname)
img = cv2.resize(img, None, fx=args.scale, fy=args.scale)
cv2.imwrite(newname, img)
else:
shutil.copyfile(oldname, newname)
# make videos
if copy == 'images' and args.make_video:
os.makedirs(join(out, 'videos'), exist_ok=True)
for sub in subs:
shell = '{} -y -i {}/images/{}/%06d{} -vcodec libx264 {}/videos/{}.mp4 -loglevel quiet'.format(
args.ffmpeg, out, sub, ext, out, sub
)
print(shell)
os.system(shell) | null |
13,399 | import os
from os.path import join
import shutil
from tqdm import tqdm
from glob import glob
import cv2
from easymocap.mytools.debug_utils import myerror, mywarn
mkdir = lambda x:os.makedirs(x, exist_ok=True)
import json
def export(root, out, keys):
mkdir(out)
for key in keys:
src = join(root, key)
dst = join(out, key)
if key == 'videos':
if os.path.exists(src):
shutil.copytree(src, dst)
else:
mkdir(dst)
subs = sorted(os.listdir(join(root, 'images')))
for sub in subs:
cmd = '{ffmpeg} -r {fps} -i {inp}/%06d.jpg -vcodec libx264 {out}'.format(
ffmpeg=args.ffmpeg, fps=50, inp=join(root, 'images', sub),
out=join(dst, sub+'.mp4')
)
os.system(cmd)
if not os.path.exists(src):
print(src)
continue
shutil.copytree(src, dst)
for name in ['intri.yml', 'extri.yml']:
if os.path.exists(join(root, name)):
shutil.copyfile(join(root, name), join(out, name)) | null |
13,400 | import os, sys
import cv2
from os.path import join
from tqdm import tqdm
from glob import glob
import numpy as np
import json
def extract_video(videoname, path, start, end, step):
base = os.path.basename(videoname).replace('.mp4', '')
if not os.path.exists(videoname):
return base
outpath = join(path, 'images', base)
if os.path.exists(outpath) and len(os.listdir(outpath)) > 0:
num_images = len(os.listdir(outpath))
print('>> exists {} frames'.format(num_images))
return base
else:
os.makedirs(outpath, exist_ok=True)
video = cv2.VideoCapture(videoname)
totalFrames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
for cnt in tqdm(range(totalFrames), desc='{:10s}'.format(os.path.basename(videoname))):
ret, frame = video.read()
if cnt < start:continue
if cnt >= end:break
if not ret:continue
if (cnt % step ==0):
cv2.imwrite(join(outpath, '{:06d}.jpg'.format(cnt)), frame)
video.release()
return base | null |
13,401 | import os, sys
import cv2
from os.path import join
from tqdm import tqdm
from glob import glob
import numpy as np
import json
def extract_2d(openpose, image, keypoints, render, args):
skip = False
if os.path.exists(keypoints):
# check the number of images and keypoints
if len(os.listdir(image)) == len(os.listdir(keypoints)):
skip = True
if not skip:
os.makedirs(keypoints, exist_ok=True)
if os.name != 'nt':
cmd = './build/examples/openpose/openpose.bin --image_dir {} --write_json {} --display 0'.format(image, keypoints)
else:
cmd = 'bin\\OpenPoseDemo.exe --image_dir {} --write_json {} --display 0'.format(join(os.getcwd(),image), join(os.getcwd(),keypoints))
if args.highres!=1:
cmd = cmd + ' --net_resolution -1x{}'.format(int(16*((368*args.highres)//16)))
if args.handface:
cmd = cmd + ' --hand --face'
if args.render:
if os.path.exists(join(os.getcwd(),render)):
cmd = cmd + ' --write_images {}'.format(join(os.getcwd(),render))
else:
os.makedirs(join(os.getcwd(),render), exist_ok=True)
cmd = cmd + ' --write_images {}'.format(join(os.getcwd(),render))
else:
cmd = cmd + ' --render_pose 0'
os.chdir(openpose)
os.system(cmd) | null |
13,402 | import os, sys
import cv2
from os.path import join
from tqdm import tqdm
from glob import glob
import numpy as np
import json
def save_json(file, data):
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f, indent=4)
def create_annot_file(annotname, imgname):
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
height, width = img.shape[0], img.shape[1]
imgnamesep = imgname.split(os.sep)
filename = os.sep.join(imgnamesep[imgnamesep.index('images'):])
annot = {
'filename':filename,
'height':height,
'width':width,
'annots': [],
'isKeyframe': False
}
save_json(annotname, annot)
return annot
def load_openpose(opname):
mapname = {'face_keypoints_2d':'face2d', 'hand_left_keypoints_2d':'handl2d', 'hand_right_keypoints_2d':'handr2d'}
assert os.path.exists(opname), opname
data = read_json(opname)
out = []
pid = 0
for i, d in enumerate(data['people']):
keypoints = d['pose_keypoints_2d']
keypoints = np.array(keypoints).reshape(-1, 3)
annot = {
'bbox': bbox_from_openpose(keypoints),
'personID': pid + i,
'keypoints': keypoints.tolist(),
'isKeyframe': False
}
for key in ['face_keypoints_2d', 'hand_left_keypoints_2d', 'hand_right_keypoints_2d']:
if len(d[key]) == 0:
continue
kpts = np.array(d[key]).reshape(-1, 3)
annot[mapname[key]] = kpts.tolist()
out.append(annot)
return out
def convert_from_openpose(path_orig, src, dst, annotdir):
# convert the 2d pose from openpose
os.chdir(path_orig)
inputlist = sorted(os.listdir(src))
for inp in tqdm(inputlist, desc='{:10s}'.format(os.path.basename(dst))):
annots = load_openpose(join(src, inp))
base = inp.replace('_keypoints.json', '')
annotname = join(dst, base+'.json')
imgname = annotname.replace(annotdir, 'images').replace('.json', '.jpg')
annot = create_annot_file(annotname, imgname)
annot['annots'] = annots
save_json(annotname, annot) | null |
13,403 | import os, sys
import cv2
from os.path import join
from tqdm import tqdm
from glob import glob
import numpy as np
import json
def save_json(file, data):
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f, indent=4)
def create_annot_file(annotname, imgname):
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
height, width = img.shape[0], img.shape[1]
imgnamesep = imgname.split(os.sep)
filename = os.sep.join(imgnamesep[imgnamesep.index('images'):])
annot = {
'filename':filename,
'height':height,
'width':width,
'annots': [],
'isKeyframe': False
}
save_json(annotname, annot)
return annot
def detect_frame(detector, img, pid=0):
lDetections = detector.detect([img])[0]
annots = []
for i in range(len(lDetections)):
annot = {
'bbox': [float(d) for d in lDetections[i]['bbox']],
'personID': pid + i,
'keypoints': lDetections[i]['keypoints'].tolist(),
'isKeyframe': True
}
annots.append(annot)
return annots
config_high = {
'yolov4': {
'ckpt_path': 'data/models/yolov4.weights',
'conf_thres': 0.3,
'box_nms_thres': 0.5 # 阈值=0.9,表示IOU 0.9的不会被筛掉
},
'hrnet':{
'nof_joints': 17,
'c': 48,
'checkpoint_path': 'data/models/pose_hrnet_w48_384x288.pth'
},
'detect':{
'MIN_PERSON_JOINTS': 10,
'MIN_BBOX_AREA': 5000,
'MIN_JOINTS_CONF': 0.3,
'MIN_BBOX_LEN': 150
}
}
config_low = {
'yolov4': {
'ckpt_path': 'data/models/yolov4.weights',
'conf_thres': 0.1,
'box_nms_thres': 0.9 # 阈值=0.9,表示IOU 0.9的不会被筛掉
},
'hrnet':{
'nof_joints': 17,
'c': 48,
'checkpoint_path': 'data/models/pose_hrnet_w48_384x288.pth'
},
'detect':{
'MIN_PERSON_JOINTS': 0,
'MIN_BBOX_AREA': 0,
'MIN_JOINTS_CONF': 0.0,
'MIN_BBOX_LEN': 0
}
}
def extract_yolo_hrnet(image_root, annot_root, ext='jpg', use_low=False):
imgnames = sorted(glob(join(image_root, '*.{}'.format(ext))))
import torch
device = torch.device('cuda')
from easymocap.estimator import Detector
config = config_low if use_low else config_high
print(config)
detector = Detector('yolo', 'hrnet', device, config)
for nf, imgname in enumerate(tqdm(imgnames)):
annotname = join(annot_root, os.path.basename(imgname).replace('.{}'.format(ext), '.json'))
annot = create_annot_file(annotname, imgname)
img0 = cv2.imread(imgname)
annot['annots'] = detect_frame(detector, img0, 0)
for i in range(len(annot['annots'])):
x = annot['annots'][i]
x['area'] = max(x['bbox'][2] - x['bbox'][0], x['bbox'][3] - x['bbox'][1])**2
annot['annots'].sort(key=lambda x:-x['area'])
# 重新赋值人的ID
for i in range(len(annot['annots'])):
annot['annots'][i]['personID'] = i
save_json(annotname, annot) | null |
13,404 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
def save_json(output, json_path):
os.system('mkdir -p {}'.format(os.path.dirname(json_path)))
with open(json_path, 'w') as f:
json.dump(output, f, indent=4) | null |
13,405 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
def is_right(model_start_point, model_end_point, gt_strat_point, gt_end_point, alpha=0.5):
bone_lenth = np.linalg.norm ( gt_end_point - gt_strat_point )
start_difference = np.linalg.norm ( gt_strat_point - model_start_point )
end_difference = np.linalg.norm ( gt_end_point - model_end_point )
return ((start_difference + end_difference) / 2) <= alpha * bone_lenth | null |
13,406 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `openpose2shelf3D` function. Write a Python function `def openpose2shelf3D(pose3d, score)` to solve the following problem:
transform coco order(our method output) 3d pose to shelf dataset order with interpolation :param pose3d: np.array with shape nJx3 :return: 3D pose in shelf order with shape 14x3
Here is the function:
def openpose2shelf3D(pose3d, score):
"""
transform coco order(our method output) 3d pose to shelf dataset order with interpolation
:param pose3d: np.array with shape nJx3
:return: 3D pose in shelf order with shape 14x3
"""
shelf_pose = np.zeros ( (14, 3) )
shelf_score = np.zeros ( (14, 1) )
# coco2shelf = np.array ( [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9] )
openpose2shelf = np.array([11, 10, 9, 12, 13, 14, 4, 3, 2, 5, 6, 7])
shelf_pose[0: 12] += pose3d[openpose2shelf]
shelf_score[0: 12] += score[openpose2shelf]
if True:
shelf_pose[12] = pose3d[1] # Use middle of shoulder to init
shelf_pose[13] = pose3d[0] # use nose to init
shelf_pose[13] = shelf_pose[12] + (shelf_pose[13] - shelf_pose[12]) * np.array ( [0.75, 0.75, 1.5] )
shelf_pose[12] = shelf_pose[12] + (pose3d[0] - shelf_pose[12]) * np.array ( [1. / 2., 1. / 2., 1. / 2.] )
shelf_score[12] = score[0]*score[1]
shelf_score[13] = score[0]*score[1]
else:
shelf_pose[12] = pose3d[1]
shelf_pose[13] = pose3d[0]
return shelf_pose, shelf_score | transform coco order(our method output) 3d pose to shelf dataset order with interpolation :param pose3d: np.array with shape nJx3 :return: 3D pose in shelf order with shape 14x3 |
13,407 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
def convert_openpose_shelf(keypoints3d):
shelf15 = np.zeros((15, 4))
openpose2shelf = np.array([11, 10, 9, 12, 13, 14, 4, 3, 2, 5, 6, 7, 1, 0, 8])
shelf15 = keypoints3d[openpose2shelf].copy()
# interp head
faceDir = np.cross(shelf15[12, :3] - shelf15[14, :3], shelf15[8, :3] - shelf15[9, :3])
faceDir = faceDir/np.linalg.norm(faceDir)
zDir = np.array([0., 0., 1.])
shoulderCenter = (keypoints3d[2, :3] + keypoints3d[5, :3])/2.
# headCenter = (keypoints3d[15, :3] + keypoints3d[16, :3])/2.
headCenter = (keypoints3d[17, :3] + keypoints3d[18, :3])/2.
shelf15[12, :3] = shoulderCenter + (headCenter - shoulderCenter) * 0.5
shelf15[13, :3] = shelf15[12, :3] + faceDir * 0.125 + zDir * 0.145
return shelf15 | null |
13,408 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
def convert_shelf_shelfgt(keypoints):
gt_hip = (keypoints[2] + keypoints[3]) / 2
gt = np.vstack((keypoints, gt_hip))
return gt | null |
13,409 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `vectorize_distance` function. Write a Python function `def vectorize_distance(a, b)` to solve the following problem:
Calculate euclid distance on each row of a and b :param a: Nx... np.array :param b: Mx... np.array :return: MxN np.array representing correspond distance
Here is the function:
def vectorize_distance(a, b):
"""
Calculate euclid distance on each row of a and b
:param a: Nx... np.array
:param b: Mx... np.array
:return: MxN np.array representing correspond distance
"""
N = a.shape[0]
a = a.reshape ( N, -1 )
M = b.shape[0]
b = b.reshape ( M, -1 )
a2 = np.tile ( np.sum ( a ** 2, axis=1 ).reshape ( -1, 1 ), (1, M) )
b2 = np.tile ( np.sum ( b ** 2, axis=1 ), (N, 1) )
dist = a2 + b2 - 2 * (a @ b.T)
return np.sqrt ( dist ) | Calculate euclid distance on each row of a and b :param a: Nx... np.array :param b: Mx... np.array :return: MxN np.array representing correspond distance |
13,410 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
def distance(a, b, score):
# a: (N, J, 3)
# b: (M, J, 3)
# score: (M, J, 1)
# return: (M, N)
a = a[None, :, :, :]
b = b[:, None, :, :]
score = score[:, None, :, 0]
diff = np.sum((a - b)**2, axis=3)*score
dist = diff.sum(axis=2)/score.sum(axis=2)
return np.sqrt(dist) | null |
13,411 | import os
import sys
from os.path import join
import re
import json
import time
import scipy.io as scio
import numpy as np
from tqdm import tqdm
def convert_openpose_shelf1(keypoints3d):
shelf15 = np.zeros((15, 4))
openpose2shelf = np.array([11, 10, 9, 12, 13, 14, 4, 3, 2, 5, 6, 7, 1, 0, 8])
shelf15 = keypoints3d[openpose2shelf].copy()
# interp head
faceDir = np.cross(keypoints3d[1, :3] - keypoints3d[8, :3], keypoints3d[2, :3] - shelf15[5, :3])
faceDir = faceDir/np.linalg.norm(faceDir)
upDir = keypoints3d[1, :3] - keypoints3d[8, :3]
upDir = upDir/np.linalg.norm(upDir)
shoulderCenter = keypoints3d[1, :3]
ear = (keypoints3d[17, :3] + keypoints3d[18, :3])/2 - keypoints3d[1, :3]
eye = (keypoints3d[15, :3] + keypoints3d[16, :3])/2 - keypoints3d[1, :3]
nose = keypoints3d[0, :3] - keypoints3d[1, :3]
head = (ear + eye + nose)/3.
noseLen = np.linalg.norm(head)
noseDir = head / noseLen
headDir = (noseDir * 2 + upDir)
headDir = headDir / np.linalg.norm(headDir)
neck = shoulderCenter + noseLen*headDir * 0.5
shelf15[12, :3] = neck
shelf15[13, :3] = neck + headDir * noseLen * 0.8
return shelf15
def readResult(filePath, range_=None, isA4d=None):
res = {}
if range_ is None:
from glob import glob
filelists = glob(join(filePath, '*.txt'))
range_ = [i for i in range(len(filelists))]
if isA4d is None:
isA4d = args.a4d
for imgId in tqdm(range_):
res[imgId] = _readResult(join(filePath, '{:06d}.json'.format(imgId)), isA4d)
return res
class ShelfGT:
def __init__(self, actor3D) -> None:
self.actor3D = actor3D
self.actor3D = self.actor3D[:3]
def __getitem__(self, index):
results = []
for pid in range(len(self.actor3D)):
gt_pose = self.actor3D[pid][index-2][0]
if gt_pose.shape == (1, 0) or gt_pose.shape == (0, 0):
continue
keypoints3d = convert_shelf_shelfgt(gt_pose)
results.append({'id': pid, 'keypoints3d': keypoints3d})
return results
def write_to_csv(filename, results, id_wise=True):
keys = [key for key in results[0].keys() if isinstance(results[0][key], float)]
if id_wise:
ids = list(set([res['id'] for res in results]))
header = [''] + ['{:s}'.format(key.replace(' ', '')) for key in keys]
contents = []
if id_wise:
for pid in ids:
content = ['{}'.format(pid)]
for key in keys:
vals = [res[key] for res in results if res['id'] == pid]
content.append('{:.3f}'.format(sum(vals)/len(vals)))
contents.append(content)
# 计算平均值
content = ['Mean']
for i, key in enumerate(keys):
content.append('{:.3f}'.format(sum([float(con[i+1]) for con in contents])/len(ids)))
contents.append(content)
else:
content = ['Mean']
for key in keys:
content.append('{:.3f}'.format(sum([res[key] for res in results])/len(results)))
contents.append(content)
import tabulate
print(tabulate.tabulate(contents, header, tablefmt='fancy_grid'))
print(tabulate.tabulate(contents, header, tablefmt='fancy_grid'), file=open(filename.replace('.csv', '.txt'), 'w'))
with open(filename, 'w') as f:
# 写入头
header = list(results[0].keys())
f.write(','.join(header) + '\n')
for res in results:
f.write(','.join(['{}'.format(res[key]) for key in header]) + '\n')
def evaluate(actor3D, range_, out):
shelfgt = ShelfGT(actor3D)
check_result = np.zeros ( (len ( actor3D[0] ), len ( actor3D ), 10), dtype=np.int32 )
result = readResult(out, range_)
bones = [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13], [12, 14]]
start = [ 9, 8, 10, 7, 3, 2, 4, 1, 12, 12,]
end = [10, 7, 11, 6, 4, 1, 5, 0, 13, 14]
names = ["Left Upper Arm", "Right Upper Arm", "Left Lower Arm", "Right Lower Arm", "Left Upper Leg", "Right Upper Leg", "Left Lower Leg", "Right Lower Leg", "Head", "Torso" ]
results = []
for img_id in tqdm(range_):
# 转化成model_poses
ests = []
for res in result[img_id]:
ests.append({'id': res['id'], 'keypoints3d': convert_openpose_shelf1(res['keypoints3d'])})
gts = shelfgt[img_id]
if len(gts) < 1:
continue
# 匹配最近的
kpts_gt = np.stack([v['keypoints3d'] for v in gts])
kpts_dt = np.stack([v['keypoints3d'] for v in ests])
distances = np.linalg.norm(kpts_gt[:, None, :, :3] - kpts_dt[None, :, :, :3], axis=-1)
conf = (kpts_gt[:, None, :, -1] > 0) * (kpts_dt[None, :, :, -1] > 0)
dist = (distances * conf).sum(axis=-1)/conf.sum(axis=-1)
# 贪婪的匹配
ests_new = []
for igt, gt in enumerate(gts):
bestid = np.argmin(dist[igt])
ests_new.append(ests[bestid])
ests = ests_new
# 计算误差
for i, data in enumerate(gts):
kpts_gt = data['keypoints3d']
kpts_est = ests[i]['keypoints3d']
# 计算各种误差,存成字典
da = np.linalg.norm(kpts_gt[start, :3] - kpts_est[start, :3], axis=1)
db = np.linalg.norm(kpts_gt[end, :3] - kpts_est[end, :3], axis=1)
l = np.linalg.norm(kpts_gt[start, :3] - kpts_gt[end, :3], axis=1)
isright = 1.0*((da + db) < l)
if args.joint:
res = {name: isright[i] for i, name in enumerate(names)}
else:
res = {}
res['Mean'] = isright.mean()
res['nf'] = img_id
res['id'] = data['id']
results.append(res)
write_to_csv(join(out, '..', 'report.csv'), results)
return 0 | null |
13,412 | from glob import glob
from tqdm import tqdm
from os.path import join
import os
import numpy as np
from easymocap.dataset import CONFIG
from easymocap.mytools.reader import read_keypoints3d
from easymocap.mytools import read_camera
from eval_utils import keypoints_error
from pprint import pprint
class Conversion:
def __init__(self, type_i, type_o, type_e=None):
names_i = CONFIG[type_i]['joint_names']
names_o = CONFIG[type_o]['joint_names']
if type_e is None:
self.commons = [i for i in names_o if i in names_i]
else:
names_e = CONFIG[type_e]['joint_names']
self.commons = [i for i in names_e if i in names_i and i in names_o]
self.idx_i = [names_i.index(i) for i in self.commons]
self.idx_o = [names_o.index(i) for i in self.commons]
def inp(self, inp):
return inp[..., self.idx_i, :]
def out(self, out):
return out[..., self.idx_o, :]
def __call__(self, inp, out):
return inp[..., self.idx_i, :], out[..., self.idx_o, :]
def write_to_csv(filename, results):
from tabulate import tabulate
keys = list(results[0].keys())
headers, table = [], []
for key in keys:
if isinstance(results[0][key], float):
headers.append(key)
table.append('{:.3f}'.format(sum([res[key] for res in results])/len(results)))
print('>> Totally {} samples:'.format(len(results)))
print(tabulate([table], headers, tablefmt='fancy_grid'))
with open(filename, 'w') as f:
# 写入头
header = list(results[0].keys())
f.write(','.join(header) + '\n')
for res in results:
f.write(','.join(['{}'.format(res[key]) for key in header]) + '\n')
def read_keypoints3d(filename):
data = read_json(filename)
res_ = []
for d in data:
pid = d['id'] if 'id' in d.keys() else d['personID']
ret = {'id': pid, 'type': 'body25'}
for key in ['keypoints3d', 'handl3d', 'handr3d', 'face3d']:
if key not in d.keys():continue
pose3d = np.array(d[key], dtype=np.float32)
if pose3d.shape[1] == 3:
pose3d = np.hstack([pose3d, np.ones((pose3d.shape[0], 1))])
ret[key] = pose3d
res_.append(ret)
return res_
def keypoints_error(gt, est, names, use_align=False, joint_level=True):
assert gt.shape[-1] == 4
assert est.shape[-1] == 4
isValid = est[..., -1] > 0
isValidGT = gt[..., -1] > 0
isValid_common = isValid * isValidGT
est = est[..., :-1]
gt = gt[..., :-1]
dist = {}
dist['abs'] = np.sqrt(((gt - est)**2).sum(axis=-1)) * 1000
dist['pck@50'] = dist['abs'] < 50
# dist['pck@100'] = dist['abs'] < 100
# dist['pck@150'] = dist['abs'] < 0.15
if use_align:
l_id = names.index('LHip')
r_id = names.index('RHip')
assert isValid[l_id] and isValid[r_id]
assert isValidGT[l_id] and isValidGT[r_id]
# root align
gt, est = align_by_pelvis(gt, names), align_by_pelvis(est, names)
# Absolute error (MPJPE)
dist['ra'] = np.sqrt(((est - gt) ** 2).sum(axis=-1)) * 1000
# Reconstuction_error
est_hat = compute_similarity_transform(est, gt)
dist['pa'] = np.sqrt(((est_hat - gt) ** 2).sum(axis=-1)) * 1000
result = {}
for key in ['abs', 'ra', 'pa', 'pck@50', 'pck@100']:
if key not in dist:
continue
result[key+'_mean'] = dist[key].mean()
if joint_level:
for i, name in enumerate(names):
result[key+'_'+name] = dist[key][i]
return result
def run_eval_keypoints(inp, out, type_i, type_o, step_gt, mode='single', args=None):
# 遍历输出文件夹
conversion = Conversion(type_i, type_o)
inplists = sorted(glob(join(inp, '*.json')))[::step_gt]
outlists = sorted(glob(join(out, '*.json')))[args.start:args.end]
assert len(inplists) == len(outlists), '{} != {}'.format(len(inplists), len(outlists))
results = []
for nf, inpname in enumerate(tqdm(inplists)):
outname = outlists[nf]
gts = read_keypoints3d(inpname)
ests = read_keypoints3d(outname)
# 将GT转换到当前坐标系
for gt in gts:
gt['keypoints3d'] = conversion.inp(gt['keypoints3d'])
if gt['keypoints3d'].shape[1] == 3:
gt['keypoints3d'] = np.hstack([gt['keypoints3d'], np.ones((gt['keypoints3d'].shape[0], 1))])
for est in ests:
est['keypoints3d'] = conversion.out(est['keypoints3d'])
if est['keypoints3d'].shape[1] == 3:
est['keypoints3d'] = np.hstack([est['keypoints3d'], np.ones((est['keypoints3d'].shape[0], 1))])
# 这一步将交换est的顺序
if mode == 'single':
# 单人的:直接匹配上
pass
elif mode == 'matched': # ID已经匹配过了
pass
else: # 进行匹配
# 把估计的id都清空
for est in ests:
est['id'] = -1
# 计算距离先
kpts_gt = np.stack([v['keypoints3d'] for v in gts])
kpts_dt = np.stack([v['keypoints3d'] for v in ests])
distances = np.linalg.norm(kpts_gt[:, None, :, :3] - kpts_dt[None, :, :, :3], axis=-1)
conf = (kpts_gt[:, None, :, -1] > 0) * (kpts_dt[None, :, :, -1] > 0)
dist = (distances * conf).sum(axis=-1)/conf.sum(axis=-1)
# 贪婪的匹配
ests_new = []
for igt, gt in enumerate(gts):
bestid = np.argmin(dist[igt])
ests_new.append(ests[bestid])
ests = ests_new
# 计算误差
for i, data in enumerate(gts):
kpts_gt = data['keypoints3d']
kpts_est = ests[i]['keypoints3d']
# 计算各种误差,存成字典
result = keypoints_error(kpts_gt, kpts_est, conversion.commons, joint_level=args.joint, use_align=args.align)
result['nf'] = nf
result['id'] = data['id']
results.append(result)
write_to_csv(join(out, '..', 'report.csv'), results)
return 0
keys = list(results[list(results.keys())[0]][0].keys())
reports = {}
for pid, result in results.items():
vals = {key: sum([res[key] for res in result])/len(result) for key in keys}
reports[pid] = vals
from tabulate import tabulate
headers = [''] + keys
table = []
for pid, report in reports.items():
res = ['{}'.format(pid)] + ['{:.2f}'.format(report[key]) for key in keys]
table.append(res)
savename = 'tmp.txt'
print(tabulate(table, headers, tablefmt='fancy_grid'))
print(tabulate(table, headers, tablefmt='fancy_grid'), file=open(savename, 'w')) | null |
13,413 | from glob import glob
from tqdm import tqdm
from os.path import join
import os
import numpy as np
from easymocap.dataset import CONFIG
from easymocap.mytools.reader import read_keypoints3d
from easymocap.mytools import read_camera
from eval_utils import keypoints_error
from pprint import pprint
class Conversion:
def __init__(self, type_i, type_o, type_e=None):
names_i = CONFIG[type_i]['joint_names']
names_o = CONFIG[type_o]['joint_names']
if type_e is None:
self.commons = [i for i in names_o if i in names_i]
else:
names_e = CONFIG[type_e]['joint_names']
self.commons = [i for i in names_e if i in names_i and i in names_o]
self.idx_i = [names_i.index(i) for i in self.commons]
self.idx_o = [names_o.index(i) for i in self.commons]
def inp(self, inp):
return inp[..., self.idx_i, :]
def out(self, out):
return out[..., self.idx_o, :]
def __call__(self, inp, out):
return inp[..., self.idx_i, :], out[..., self.idx_o, :]
def write_to_csv(filename, results):
from tabulate import tabulate
keys = list(results[0].keys())
headers, table = [], []
for key in keys:
if isinstance(results[0][key], float):
headers.append(key)
table.append('{:.3f}'.format(sum([res[key] for res in results])/len(results)))
print('>> Totally {} samples:'.format(len(results)))
print(tabulate([table], headers, tablefmt='fancy_grid'))
with open(filename, 'w') as f:
# 写入头
header = list(results[0].keys())
f.write(','.join(header) + '\n')
for res in results:
f.write(','.join(['{}'.format(res[key]) for key in header]) + '\n')
def read_keypoints3d(filename):
data = read_json(filename)
res_ = []
for d in data:
pid = d['id'] if 'id' in d.keys() else d['personID']
ret = {'id': pid, 'type': 'body25'}
for key in ['keypoints3d', 'handl3d', 'handr3d', 'face3d']:
if key not in d.keys():continue
pose3d = np.array(d[key], dtype=np.float32)
if pose3d.shape[1] == 3:
pose3d = np.hstack([pose3d, np.ones((pose3d.shape[0], 1))])
ret[key] = pose3d
res_.append(ret)
return res_
def keypoints_error(gt, est, names, use_align=False, joint_level=True):
assert gt.shape[-1] == 4
assert est.shape[-1] == 4
isValid = est[..., -1] > 0
isValidGT = gt[..., -1] > 0
isValid_common = isValid * isValidGT
est = est[..., :-1]
gt = gt[..., :-1]
dist = {}
dist['abs'] = np.sqrt(((gt - est)**2).sum(axis=-1)) * 1000
dist['pck@50'] = dist['abs'] < 50
# dist['pck@100'] = dist['abs'] < 100
# dist['pck@150'] = dist['abs'] < 0.15
if use_align:
l_id = names.index('LHip')
r_id = names.index('RHip')
assert isValid[l_id] and isValid[r_id]
assert isValidGT[l_id] and isValidGT[r_id]
# root align
gt, est = align_by_pelvis(gt, names), align_by_pelvis(est, names)
# Absolute error (MPJPE)
dist['ra'] = np.sqrt(((est - gt) ** 2).sum(axis=-1)) * 1000
# Reconstuction_error
est_hat = compute_similarity_transform(est, gt)
dist['pa'] = np.sqrt(((est_hat - gt) ** 2).sum(axis=-1)) * 1000
result = {}
for key in ['abs', 'ra', 'pa', 'pck@50', 'pck@100']:
if key not in dist:
continue
result[key+'_mean'] = dist[key].mean()
if joint_level:
for i, name in enumerate(names):
result[key+'_'+name] = dist[key][i]
return result
def run_eval_keypoints_mono(inp, out, type_i, type_o, type_e, step_gt, cam_path, mode='single'):
conversion = Conversion(type_i, type_o, type_e)
inplists = sorted(glob(join(inp, '*.json')))[::step_gt]
# TODO:only evaluate a subset of views
if len(args.sub) == 0:
views = sorted(os.listdir(out))
else:
views = args.sub
# read camera
cameras = read_camera(join(cam_path, 'intri.yml'), join(cam_path, 'extri.yml'), views)
cameras = {key:cameras[key] for key in views}
if args.cam_res is not None:
cameras_res = read_camera(join(args.cam_res, 'intri.yml'), join(args.cam_res, 'extri.yml'), views)
cameras_res = {key:cameras_res[key] for key in views}
results = []
for view in views:
outlists = sorted(glob(join(out, view, '*.json')))
RT = cameras[view]['RT']
for outname in outlists:
basename = os.path.basename(outname)
gtname = join(inp, basename)
gts = read_keypoints3d(gtname)
ests = read_keypoints3d(outname)
# 将GT转换到当前坐标系
for gt in gts:
keypoints3d = conversion.inp(gt['keypoints3d'])
conf = keypoints3d[:, -1:].copy()
keypoints3d[:, -1] = 1
keypoints3d = (RT @ keypoints3d.T).T
gt['keypoints3d'] = np.hstack([keypoints3d, conf])
for est in ests:
est['keypoints3d'] = conversion.out(est['keypoints3d'])
if est['keypoints3d'].shape[1] == 3:
# 增加置信度为1
est['keypoints3d'] = np.hstack([est['keypoints3d'], np.ones((est['keypoints3d'].shape[0], 1))])
# 计算误差
for i, data in enumerate(gts):
kpts_gt = data['keypoints3d']
kpts_est = ests[i]['keypoints3d']
# 计算各种误差,存成字典
result = keypoints_error(kpts_gt, kpts_est, conversion.commons, joint_level=args.joint, use_align=True)
result['pid'] = data['id']
result['view'] = view
results.append(result)
write_to_csv(join(out, '..', 'report.csv'), results) | null |
13,414 | import numpy as np
def compute_similarity_transform(S1, S2):
"""
Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem.
"""
transposed = False
if S1.shape[0] != 3 and S1.shape[0] != 2:
S1 = S1.T
S2 = S2.T
transposed = True
assert(S2.shape[1] == S1.shape[1])
# 1. Remove mean.
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, s, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale*(R.dot(mu1))
# 7. Error:
S1_hat = scale*R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
The provided code snippet includes necessary dependencies for implementing the `reconstruction_error` function. Write a Python function `def reconstruction_error(S1, S2, reduction='mean')` to solve the following problem:
Do Procrustes alignment and compute reconstruction error.
Here is the function:
def reconstruction_error(S1, S2, reduction='mean'):
"""Do Procrustes alignment and compute reconstruction error."""
S1_hat = compute_similarity_transform(S1, S2)
re = np.sqrt( ((S1_hat - S2)** 2).sum(axis=-1))
if reduction == 'mean':
re = re.mean()
elif reduction == 'sum':
re = re.sum()
return re | Do Procrustes alignment and compute reconstruction error. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.