id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
180,811 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def save_mesh_to_obj(obj_path, verts, faces=None):
assert isinstance(verts, np.ndarray)
assert isinstance(faces, np.ndarray)
with open(obj_path, 'w') as out_f:
# write verts
for v in verts:
out_f.write(f"v {v[0]:.4f} {v[1]:.4f} {v[2]:.4f}\n")
# write faces
if faces is not None:
faces = faces.copy() + 1
for f in faces:
out_f.write(f"f {f[0]} {f[1]} {f[2]}\n") | null |
180,812 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def renew_dir(target_dir):
if osp.exists(target_dir):
shutil.rmtree(target_dir)
os.makedirs(target_dir) | null |
180,813 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def update_extension(file_path, new_extension):
assert new_extension[0] == '.'
old_extension = '.' + file_path.split('.')[-1]
new_file_path = file_path.replace(old_extension, new_extension)
return new_file_path | null |
180,814 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def remove_swp(in_dir):
remove_files = list()
for subdir, dirs, files in os.walk(in_dir):
for file in files:
if file.endswith('.swp'):
full_path = osp.join(subdir,file)
os.remove(full_path) | null |
180,815 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def remove_pyc(in_dir):
remove_files = list()
for subdir, dirs, files in os.walk(in_dir):
for file in files:
if file.endswith('.pyc'):
full_path = osp.join(subdir,file)
os.remove(full_path) | null |
180,816 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def md5sum(file_path):
import hashlib
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as in_f:
hash_md5.update(in_f.read())
return hash_md5.hexdigest() | null |
180,817 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def load_npz(npz_file):
res_data = dict()
assert npz_file.endswith(".npz")
raw_data = np.load(npz_file, mmap_mode='r')
for key in raw_data.files:
res_data[key] = raw_data[key]
return res_data | null |
180,818 | import os, sys, shutil
import os.path as osp
import multiprocessing as mp
import numpy as np
import cv2
import pickle
import json
def update_npz_file(npz_file, new_key, new_data):
# load original data
assert npz_file.endswith(".npz")
raw_data = np.load(npz_file, mmap_mode='r')
all_data = dict()
for key in raw_data.files:
all_data[key] = raw_data[key]
# add new data && save
all_data[new_key] = new_data
np.savez(npz_file, **all_data) | null |
180,819 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
def flip_hand_pose(pose):
if len(pose.shape) == 1:
pose = pose.reshape(-1, 3)
pose[:, 1] *= -1
pose[:, 2] *= -1
return pose.reshape(-1,)
else:
assert len(pose.shape) == 2
pose[:, 1] *= -1
pose[:, 2] *= -1
return pose | null |
180,820 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
def flip_hand_joints_3d(joints_3d):
assert joints_3d.shape[1] == 3
assert len(joints_3d.shape) == 2
rot_mat = np.diag([-1, 1, 1])
return np.matmul(rot_mat, joints_3d.T).T | null |
180,821 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
pi = torch.Tensor([3.14159265358979323846])
The provided code snippet includes necessary dependencies for implementing the `rad2deg` function. Write a Python function `def rad2deg(tensor)` to solve the following problem:
r"""Function that converts angles from radians to degrees. See :class:`~torchgeometry.RadToDeg` for details. Args: tensor (Tensor): Tensor of arbitrary shape. Returns: Tensor: Tensor with same shape as input. Example: >>> input = tgm.pi * torch.rand(1, 3, 3) >>> output = tgm.rad2deg(input)
Here is the function:
def rad2deg(tensor):
r"""Function that converts angles from radians to degrees.
See :class:`~torchgeometry.RadToDeg` for details.
Args:
tensor (Tensor): Tensor of arbitrary shape.
Returns:
Tensor: Tensor with same shape as input.
Example:
>>> input = tgm.pi * torch.rand(1, 3, 3)
>>> output = tgm.rad2deg(input)
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(tensor)))
return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) | r"""Function that converts angles from radians to degrees. See :class:`~torchgeometry.RadToDeg` for details. Args: tensor (Tensor): Tensor of arbitrary shape. Returns: Tensor: Tensor with same shape as input. Example: >>> input = tgm.pi * torch.rand(1, 3, 3) >>> output = tgm.rad2deg(input) |
180,822 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
pi = torch.Tensor([3.14159265358979323846])
The provided code snippet includes necessary dependencies for implementing the `deg2rad` function. Write a Python function `def deg2rad(tensor)` to solve the following problem:
r"""Function that converts angles from degrees to radians. See :class:`~torchgeometry.DegToRad` for details. Args: tensor (Tensor): Tensor of arbitrary shape. Returns: Tensor: Tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = tgm.deg2rad(input)
Here is the function:
def deg2rad(tensor):
r"""Function that converts angles from degrees to radians.
See :class:`~torchgeometry.DegToRad` for details.
Args:
tensor (Tensor): Tensor of arbitrary shape.
Returns:
Tensor: Tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = tgm.deg2rad(input)
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(tensor)))
return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. | r"""Function that converts angles from degrees to radians. See :class:`~torchgeometry.DegToRad` for details. Args: tensor (Tensor): Tensor of arbitrary shape. Returns: Tensor: Tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = tgm.deg2rad(input) |
180,823 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
The provided code snippet includes necessary dependencies for implementing the `convert_points_from_homogeneous` function. Write a Python function `def convert_points_from_homogeneous(points)` to solve the following problem:
r"""Function that converts points from homogeneous to Euclidean space. See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
Here is the function:
def convert_points_from_homogeneous(points):
r"""Function that converts points from homogeneous to Euclidean space.
See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_from_homogeneous(input) # BxNx2
"""
if not torch.is_tensor(points):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
return points[..., :-1] / points[..., -1:] | r"""Function that converts points from homogeneous to Euclidean space. See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_from_homogeneous(input) # BxNx2 |
180,824 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
The provided code snippet includes necessary dependencies for implementing the `convert_points_to_homogeneous` function. Write a Python function `def convert_points_to_homogeneous(points)` to solve the following problem:
r"""Function that converts points from Euclidean to homogeneous space. See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_to_homogeneous(input) # BxNx4
Here is the function:
def convert_points_to_homogeneous(points):
r"""Function that converts points from Euclidean to homogeneous space.
See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_to_homogeneous(input) # BxNx4
"""
if not torch.is_tensor(points):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
return nn.functional.pad(points, (0, 1), "constant", 1.0) | r"""Function that converts points from Euclidean to homogeneous space. See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_to_homogeneous(input) # BxNx4 |
180,825 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
def angle_axis_to_rotation_matrix(angle_axis):
"""Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
Args:
angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
Tensor: tensor of 4x4 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 4, 4)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = tgm.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx4x4
The provided code snippet includes necessary dependencies for implementing the `rtvec_to_pose` function. Write a Python function `def rtvec_to_pose(rtvec)` to solve the following problem:
Convert axis-angle rotation and translation vector to 4x4 pose matrix Args: rtvec (Tensor): Rodrigues vector transformations Returns: Tensor: transformation matrices Shape: - Input: :math:`(N, 6)` - Output: :math:`(N, 4, 4)` Example: >>> input = torch.rand(3, 6) # Nx6 >>> output = tgm.rtvec_to_pose(input) # Nx4x4
Here is the function:
def rtvec_to_pose(rtvec):
"""
Convert axis-angle rotation and translation vector to 4x4 pose matrix
Args:
rtvec (Tensor): Rodrigues vector transformations
Returns:
Tensor: transformation matrices
Shape:
- Input: :math:`(N, 6)`
- Output: :math:`(N, 4, 4)`
Example:
>>> input = torch.rand(3, 6) # Nx6
>>> output = tgm.rtvec_to_pose(input) # Nx4x4
"""
assert rtvec.shape[-1] == 6, 'rtvec=[rx, ry, rz, tx, ty, tz]'
pose = angle_axis_to_rotation_matrix(rtvec[..., :3])
pose[..., :3, 3] = rtvec[..., 3:]
return pose | Convert axis-angle rotation and translation vector to 4x4 pose matrix Args: rtvec (Tensor): Rodrigues vector transformations Returns: Tensor: transformation matrices Shape: - Input: :math:`(N, 6)` - Output: :math:`(N, 4, 4)` Example: >>> input = torch.rand(3, 6) # Nx6 >>> output = tgm.rtvec_to_pose(input) # Nx4x4 |
180,826 | import torch
import torch.nn as nn
import numpy as np
import torchgeometry as tgm
def rotation_matrix_to_angle_axis(rotation_matrix):
"""Convert 3x4 rotation matrix to Rodrigues vector
Args:
rotation_matrix (Tensor): rotation matrix.
Returns:
Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 4) # Nx4x4
>>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3
"""
# todo add check that matrix is a valid rotation matrix
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion)
The provided code snippet includes necessary dependencies for implementing the `rotmat3x3_to_angle_axis` function. Write a Python function `def rotmat3x3_to_angle_axis(init_pred_rotmat)` to solve the following problem:
init_pred_rotmat: torch.tensor with (1, N,3,3) dimension output: (1, N,3)
Here is the function:
def rotmat3x3_to_angle_axis(init_pred_rotmat):
"""
init_pred_rotmat: torch.tensor with (1, N,3,3) dimension
output: (1, N,3)
"""
assert init_pred_rotmat.shape[0] == 1, "Sould be fixed to handle general batch size.. not confirmed yet"
device = init_pred_rotmat.device
jointNum = init_pred_rotmat.shape[1]
ones = torch.tensor([0,0,1], dtype=torch.float32,).view(1, 3, 1).expand(jointNum, -1, -1).to(device)
pred_rotmat_hom = torch.cat([ init_pred_rotmat.view(-1, 3, 3),ones ], dim=-1) #24,3,4
pred_aa = rotation_matrix_to_angle_axis(pred_rotmat_hom).contiguous().view(1, -1) #[1,72]
# tgm.rotation_matrix_to_angle_axis returns NaN for 0 rotation, so manually hack it
pred_aa[torch.isnan(pred_aa)] = 0.0 #[1,72]
pred_aa = pred_aa.view(1,jointNum,3)
return pred_aa | init_pred_rotmat: torch.tensor with (1, N,3,3) dimension output: (1, N,3) |
180,827 | import sys
import torch
import numpy as np
import scipy.misc
import cv2
from torchvision.transforms import Normalize
def convert_smpl_to_bbox_perspective(data3D, scale_ori, trans_ori, focalLeng, scaleFactor=1.0):
data3D = data3D.copy()
resnet_input_size_half = 224 *0.5
scale = scale_ori* resnet_input_size_half
trans = trans_ori *resnet_input_size_half
if False: #Weak perspective
data3D *= scale #apply scaling
data3D[:,0:2] += trans
else:
# delta = (trans - imageShape*0.5)/scale
# Current projection already consider camera center during the rendering.
# Thus no need to consider principle axis
delta = (trans )/scale
data3D[:,0:2] +=delta
newZ = focalLeng/scale
deltaZ = newZ - np.mean(data3D[:,2])
data3D[:,2] +=deltaZ
# data3D[:,2] +=16.471718554146534 #debug
if False: #Scaling to be a certain dist from camera
texture_plan_depth = 500
ratio = texture_plan_depth /np.mean(data3D[:,2])
data3D *=ratio
else:
data3D *=scaleFactor
return data3D | null |
180,828 | import sys
import torch
import numpy as np
import scipy.misc
import cv2
from torchvision.transforms import Normalize
The provided code snippet includes necessary dependencies for implementing the `bbox_from_openpose` function. Write a Python function `def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2)` to solve the following problem:
Get center and scale for bounding box from openpose detections.
Here is the function:
def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2):
"""Get center and scale for bounding box from openpose detections."""
with open(openpose_file, 'r') as f:
data = json.load(f)
if 'people' not in data or len(data['people'])==0:
return None, None
# keypoints = json.load(f)['people'][0]['pose_keypoints_2d']
keypoints = data['people'][0]['pose_keypoints_2d']
keypoints = np.reshape(np.array(keypoints), (-1,3))
valid = keypoints[:,-1] > detection_thresh
valid_keypoints = keypoints[valid][:,:-1] #(25,2)
# min_pt = np.min(valid_keypoints, axis=0)
# max_pt = np.max(valid_keypoints, axis=0)
# bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale#, bbox | Get center and scale for bounding box from openpose detections. |
180,829 | import sys
import torch
import numpy as np
import scipy.misc
import cv2
from torchvision.transforms import Normalize
The provided code snippet includes necessary dependencies for implementing the `bbox_from_keypoint2d` function. Write a Python function `def bbox_from_keypoint2d(keypoints, rescale=1.2, detection_thresh=0.2)` to solve the following problem:
output: center: bbox center scale: scale_n2o: 224x224 -> original bbox size (max length if not a square bbox)
Here is the function:
def bbox_from_keypoint2d(keypoints, rescale=1.2, detection_thresh=0.2):
"""
output:
center: bbox center
scale: scale_n2o: 224x224 -> original bbox size (max length if not a square bbox)
"""
# """Get center and scale for bounding box from openpose detections."""
if len(keypoints.shape)==2 and keypoints.shape[1]==2: #(X,2)
valid_keypoints = keypoints
else:
keypoints = np.reshape(np.array(keypoints), (-1,3))
valid = keypoints[:,-1] > detection_thresh
valid_keypoints = keypoints[valid][:,:-1] #(25,2)
# min_pt = np.min(valid_keypoints, axis=0)
# max_pt = np.max(valid_keypoints, axis=0)
# bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale#, bbox | output: center: bbox center scale: scale_n2o: 224x224 -> original bbox size (max length if not a square bbox) |
180,830 | import sys
import torch
import numpy as np
import scipy.misc
import cv2
from torchvision.transforms import Normalize
The provided code snippet includes necessary dependencies for implementing the `bbox_from_keypoints` function. Write a Python function `def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.2, imageHeight= None)` to solve the following problem:
Get center and scale for bounding box from openpose detections.
Here is the function:
def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.2, imageHeight= None):
"""Get center and scale for bounding box from openpose detections."""
keypoints = np.reshape(np.array(keypoints), (-1,3))
valid = keypoints[:,-1] > detection_thresh
valid_keypoints = keypoints[valid][:,:-1] #(25,2)
if len(valid_keypoints)<2:
return None, None, None
if False: #Should have all limbs and nose
if np.sum(valid[ [ 2,3,4, 5,6,7, 9,10, 12,13,1,0] ]) <12:
return None, None, None
min_pt = np.min(valid_keypoints, axis=0)
max_pt = np.max(valid_keypoints, axis=0)
bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]
if imageHeight is not None:
if valid[10]==False and valid[13] == False: # No knees ub ioeb
max_pt[1] = min(max_pt[1] + (max_pt[1]- min_pt[1]), imageHeight )
bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]
valid_keypoints = np.vstack( (valid_keypoints, np.array(max_pt)) )
elif valid[11]==False and valid[14] == False: #No foot
max_pt[1] = min(max_pt[1] + (max_pt[1]- min_pt[1])*0.2, imageHeight )
bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]
valid_keypoints = np.vstack( (valid_keypoints, np.array(max_pt)) )
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale, bbox | Get center and scale for bounding box from openpose detections. |
180,831 | import sys
import torch
import numpy as np
import scipy.misc
import cv2
from torchvision.transforms import Normalize
The provided code snippet includes necessary dependencies for implementing the `bbox_from_bbr` function. Write a Python function `def bbox_from_bbr(bbox_XYWH, rescale=1.2, detection_thresh=0.2, imageHeight= None)` to solve the following problem:
Get center and scale for bounding box from openpose detections.
Here is the function:
def bbox_from_bbr(bbox_XYWH, rescale=1.2, detection_thresh=0.2, imageHeight= None):
#bbr: (minX, minY, width, height)
"""Get center and scale for bounding box from openpose detections."""
center = bbox_XYWH[:2] + 0.5 * bbox_XYWH[2:]
bbox_size = max(bbox_XYWH[2:])
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale#, bbox_XYWH | Get center and scale for bounding box from openpose detections. |
180,832 | import sys
import torch
import numpy as np
import scipy.misc
import cv2
from torchvision.transforms import Normalize
The provided code snippet includes necessary dependencies for implementing the `bbox_from_json` function. Write a Python function `def bbox_from_json(bbox_file)` to solve the following problem:
Get center and scale of bounding box from bounding box annotations. The expected format is [top_left(x), top_left(y), width, height].
Here is the function:
def bbox_from_json(bbox_file):
"""Get center and scale of bounding box from bounding box annotations.
The expected format is [top_left(x), top_left(y), width, height].
"""
with open(bbox_file, 'r') as f:
bbox = np.array(json.load(f)['bbox']).astype(np.float32)
ul_corner = bbox[:2]
center = ul_corner + 0.5 * bbox[2:]
width = max(bbox[2], bbox[3])
scale = width / 200.0
# make sure the bounding box is rectangular
return center, scale | Get center and scale of bounding box from bounding box annotations. The expected format is [top_left(x), top_left(y), width, height]. |
180,833 | import os, sys, shutil
import os.path as osp
import numpy as np
import torch
from torch.nn import functional as F
import cv2
import numpy.matlib as npm
import mocap_utils.geometry_utils_torch as gut
def flip_hand_pose(pose):
pose = pose.copy()
if len(pose.shape) == 1:
pose = pose.reshape(-1, 3)
pose[:, 1] *= -1
pose[:, 2] *= -1
return pose.reshape(-1,)
else:
assert len(pose.shape) == 2
pose[:, 1] *= -1
pose[:, 2] *= -1
return pose | null |
180,834 | import os, sys, shutil
import os.path as osp
import numpy as np
import torch
from torch.nn import functional as F
import cv2
import numpy.matlib as npm
import mocap_utils.geometry_utils_torch as gut
def flip_hand_joints_3d(joints_3d):
assert joints_3d.shape[1] == 3
assert len(joints_3d.shape) == 2
rot_mat = np.diag([-1, 1, 1])
return np.matmul(rot_mat, joints_3d.T).T | null |
180,835 | import os, sys, shutil
import os.path as osp
import numpy as np
import torch
from torch.nn import functional as F
import cv2
import numpy.matlib as npm
import mocap_utils.geometry_utils_torch as gut
The provided code snippet includes necessary dependencies for implementing the `rot6d_to_rotmat` function. Write a Python function `def rot6d_to_rotmat(x)` to solve the following problem:
Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices
Here is the function:
def rot6d_to_rotmat(x):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3,3) Batch of corresponding rotation matrices
"""
assert isinstance(x, torch.Tensor), "Current version only supports torch.tensor"
x = x.view(-1,3,2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1) | Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices |
180,836 | import os, sys, shutil
import os.path as osp
import numpy as np
import torch
from torch.nn import functional as F
import cv2
import numpy.matlib as npm
import mocap_utils.geometry_utils_torch as gut
def angle_axis_to_rotation_matrix(angle_axis):
aa = angle_axis
if isinstance(aa, torch.Tensor):
return __angle_axis_to_rotation_matrix_torch(aa)
else:
assert isinstance(aa, np.ndarray)
aa_torch = torch.from_numpy(aa)
rotmat_torch = __angle_axis_to_rotation_matrix_torch(aa_torch)
return rotmat_torch.numpy()
def angle_axis_to_rot6d(aa):
assert aa.dim() == 2
assert aa.size(1) == 3
bs = aa.size(0)
rotmat = angle_axis_to_rotation_matrix(aa)
rot6d = rotmat[:, :3, :2]
return rot6d | null |
180,837 | import cv2
import numpy as np
def draw_bbox(image, bbox, color=(0,0,255), thickness=3):
x0, y0 = int(bbox[0]), int(bbox[1])
x1, y1 = int(bbox[2]), int(bbox[3])
res_img = cv2.rectangle(image.copy(), (x0,y0), (x1,y1), color=color, thickness=thickness)
return res_img.astype(np.uint8)
def draw_raw_bbox(img, bboxes):
img = img.copy()
for bbox in bboxes:
x0, y0, w, h = bbox
bbox_xyxy = (x0, y0, x0+w, y0+h)
img = draw_bbox(img, bbox_xyxy)
return img | null |
180,838 | import cv2
import numpy as np
def draw_bbox(image, bbox, color=(0,0,255), thickness=3):
x0, y0 = int(bbox[0]), int(bbox[1])
x1, y1 = int(bbox[2]), int(bbox[3])
res_img = cv2.rectangle(image.copy(), (x0,y0), (x1,y1), color=color, thickness=thickness)
return res_img.astype(np.uint8)
def draw_body_bbox(img, body_bbox_list):
img = img.copy()
for body_bbox in body_bbox_list:
if body_bbox is not None:
x0, y0, w, h = body_bbox
img = draw_bbox(img, (x0, y0, x0+w, y0+h))
return img | null |
180,839 | import cv2
import numpy as np
def draw_keypoints(image, kps, color=(0,0,255), radius=5, check_exist=False):
# recover color
if color == 'red':
color = (0, 0, 255)
elif color == 'green':
color = (0, 255, 0)
elif color == 'blue':
color = (255, 0, 0)
else:
assert isinstance(color, tuple) and len(color) == 3
# draw keypoints
res_img = image.copy()
for i in range(kps.shape[0]):
x, y = kps[i][:2].astype(np.int32)
if check_exist:
score = kps[i][2]
else:
score = 1.0
# print(i, score)
if score > 0.0:
cv2.circle(res_img, (x,y), radius=radius, color=color, thickness=-1)
return res_img.astype(np.uint8)
def draw_arm_pose(img, body_pose_list):
img = img.copy()
for body_pose in body_pose_list:
# left & right arm
img = draw_keypoints(
img, body_pose[6:8, :], radius=10, color=(255, 0, 0))
img = draw_keypoints(
img, body_pose[3:5, :], radius=10, color=(0, 0, 255))
return img | null |
180,840 | import cv2
import numpy as np
def draw_bbox(image, bbox, color=(0,0,255), thickness=3):
x0, y0 = int(bbox[0]), int(bbox[1])
x1, y1 = int(bbox[2]), int(bbox[3])
res_img = cv2.rectangle(image.copy(), (x0,y0), (x1,y1), color=color, thickness=thickness)
return res_img.astype(np.uint8)
def draw_hand_bbox(img, hand_bbox_list):
img = img.copy()
for hand_bboxes in hand_bbox_list:
if hand_bboxes is not None:
for key in hand_bboxes:
bbox = hand_bboxes[key]
if bbox is not None:
x0, y0, w, h = bbox
bbox_new = (x0, y0, x0+w, y0+h)
color = (255, 0, 0) if key == 'left_hand' else (0, 255, 0)
img = draw_bbox(img, bbox_new, color=color)
return img | null |
180,841 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
g_ambientLight = (0.35, 0.35, 0.35, 1.0)
g_diffuseLight = (0.75, 0.75, 0.75, 0.7)
g_specular = (0.2, 0.2, 0.2, 1.0)
g_specref = (0.5, 0.5, 0.5, 1.0)
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def init_minimum():
#global width
#global height
glClearColor(1.0, 1.0, 1.0, 1.0)
# Enable depth testing
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glLightfv(GL_LIGHT0, GL_AMBIENT, g_ambientLight)
glLightfv(GL_LIGHT0, GL_DIFFUSE, g_diffuseLight)
glLightfv(GL_LIGHT0, GL_SPECULAR, g_specular)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE)
glMaterialfv(GL_FRONT, GL_SPECULAR, g_specref)
glMateriali(GL_FRONT, GL_SHININESS, 128) | null |
180,842 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
g_xTrans = 0.
g_yTrans = 0.
g_zTrans = 0.
g_zoom = 600.
g_xRotate = 59.
g_yRotate = -41.
g_zRotate = 0.
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setFree3DView():
glTranslatef(0,0,g_zoom)
glRotatef( -g_yRotate, 1.0, 0.0, 0.0)
glRotatef( -g_xRotate, 0.0, 1.0, 0.0)
glRotatef( g_zRotate, 0.0, 0.0, 1.0)
glTranslatef( g_xTrans, 0.0, 0.0 )
glTranslatef( 0.0, g_yTrans, 0.0)
glTranslatef( 0.0, 0, g_zTrans) | null |
180,843 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_camView_K = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setCamView_K(K):
global g_camView_K
g_camView_K = K | null |
180,844 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
g_bOrthoCam = False
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def SetOrthoCamera(bOrtho=True):
global g_bOrthoCam
g_bOrthoCam = bOrtho | null |
180,845 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_renderOutputSize = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setRenderOutputSize(imWidth, imHeight):
global g_renderOutputSize
g_renderOutputSize = (imWidth, imHeight) | null |
180,846 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_cameraPoses = None
g_cameraRots = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def SetCameraPoses(camRots, camPoses):
global g_cameraPoses,g_cameraRots
# g_cameraPoses = camPoses
# g_cameraRots = camRots
g_cameraPoses = camPoses #for cam Vis
g_cameraRots = [] #for cam Vis
for r in camRots:
cam_extR_4x4 = np.eye(4,dtype= r.dtype)
# cam_extR_4x4[:3,:3] = cam_R_rot.transpose()
cam_extR_4x4[:3,:3] = r.transpose() #For visualizing cameras, R should be inversed
cam_extR_4x4[3,3] =1.0
g_cameraRots.append(cam_extR_4x4) | null |
180,847 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_ptCloud =None
g_ptCloudColor =None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def SetPtCloud(ptCloud, ptCloudColor = None):
global g_ptCloud, g_ptCloudColor
g_ptCloud = ptCloud
g_ptCloudColor = ptCloudColor | null |
180,848 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_meshColor = (0.53, 0.53, 0.8)
import timeit
from multiprocessing import Pool
import scipy.io as sio
def SetMeshColor(colorName='blue'):
global g_meshColor
if colorName=='blue':
# g_meshColor = (0.4, 0.4, 0.) #prediction: blue
g_meshColor = (0.53, 0.53, 0.8) #prediction: blue
# glColor3f(0.53, 0.53, 0.8)
elif colorName=='red':
g_meshColor = (0.7, 0.5, 0.5) #targer: red
else:
assert False | null |
180,849 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_bApplyRootOffset = False
ROOT_OFFSET_DIST = 160
g_bodyNormals = None
g_frameIdx = 0
g_speech = None
g_speechGT = None
g_colors = [ (255,0,0), (0, 255, 127), (170, 170, 0), (0, 0, 128), (153, 50, 204), (60, 20, 220),
(0, 128, 0), (180, 130, 70), (147, 20, 255), (128, 128, 240), (154, 250, 0), (128, 0, 0),
(30, 105, 210), (0, 165, 255), (170, 178, 32), (238, 104, 123)]
g_colors = [(0, 255, 127), (255,0,0), (170, 170, 0), (0, 0, 128), (153, 50, 204), (60, 20, 220),
(0, 128, 0), (180, 130, 70), (147, 20, 255), (128, 128, 240), (154, 250, 0), (128, 0, 0),
(30, 105, 210), (0, 165, 255), (170, 178, 32), (238, 104, 123)]
g_colors = [ (0,0,255), (255,0,0), (0, 255, 127), (170, 170, 0), (0, 0, 128), (153, 50, 204), (60, 20, 220),
(0, 128, 0), (180, 130, 70), (147, 20, 255), (128, 128, 240), (154, 250, 0), (128, 0, 0),
(30, 105, 210), (0, 165, 255), (170, 178, 32), (238, 104, 123)]
import timeit
from multiprocessing import Pool
def drawbody_SMC19(joints, color, normal=None):
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(int(len(joints)/3)):
if g_bSimpleHead and (i>=15 or i==1):
continue
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
connMat = g_connMat_smc19
#Visualize Bones
for conn in connMat:
# x0, y0, z0 is the coordinate of the base point
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]] - x0
y1 = joints[3*conn[1]+1] - y0
z1 = joints[3*conn[1]+2] - z0
if g_bSimpleHead and conn[0] == 0 and conn[1]==1:
x1 = x1*0.5
y1 = y1*0.5
z1 = z1*0.5
length = math.sqrt(x1*x1 + y1*y1 + z1*z1)
theta = math.degrees(math.acos(z1/length))
phi = math.degrees(math.atan2(y1, x1))
glPushMatrix()
glTranslate(x0, y0, z0)
glRotatef(phi, 0, 0, 1)
glRotatef(theta, 0, 1, 0)
glutSolidCone(2, length, 10, 10)
glPopMatrix()
# Visualize Normals
if normal is not None:
i=1
facePt = joints[(3*i):(3*i+3)]
normalPt = facePt + normal*50
glColor3ub(0, 255, 255)
glPushMatrix()
glTranslate(normalPt[0], normalPt[1], normalPt[2])
glutSolidSphere(1, 10, 10)
glPopMatrix()
glBegin(GL_LINES)
glVertex3f(facePt[0], facePt[1], facePt[2])
glVertex3f(normalPt[0], normalPt[1], normalPt[2])
glEnd()
def drawbody_SMPLCOCO_TotalCap26(joints, color, normal=None):
connMat = [ [12,2], [2,1], [1,0], #Right leg
[12,3], [3,4], [4,5], #Left leg
[12,9], [9,10], [10,11], #Left Arm
[12,8], [8,7], [7,6], #Right shoulder
[12,14],[14,16],[16,18], #Neck(12)->Nose(14)->rightEye(16)->rightEar(18)
[14,15],[15,17], #Nose(14)->leftEye(15)->leftEar(17).
[14,13], #Nose->headMidle(13)
[12,19], #headTop19
[5,20], [5,21], [5,22], #leftFoot
[0,23], [0,24], [0,25] #rightFoot
]
connMat = np.array(connMat, dtype=int) #zero Idx
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(int(len(joints)/3)):
if g_bSimpleHead and (i>=15 or i==1):
continue
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
#Visualize Bones
for conn in connMat:
# x0, y0, z0 is the coordinate of the base point
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]] - x0
y1 = joints[3*conn[1]+1] - y0
z1 = joints[3*conn[1]+2] - z0
if g_bSimpleHead and conn[0] == 0 and conn[1]==1:
x1 = x1*0.5
y1 = y1*0.5
z1 = z1*0.5
length = math.sqrt(x1*x1 + y1*y1 + z1*z1)
theta = math.degrees(math.acos(z1/length))
phi = math.degrees(math.atan2(y1, x1))
glPushMatrix()
glTranslate(x0, y0, z0)
glRotatef(phi, 0, 0, 1)
glRotatef(theta, 0, 1, 0)
glutSolidCone(2, length, 10, 10)
glPopMatrix()
# Visualize Normals
if normal is not None:
i=1
facePt = joints[(3*i):(3*i+3)]
normalPt = facePt + normal*50
glColor3ub(0, 255, 255)
glPushMatrix()
glTranslate(normalPt[0], normalPt[1], normalPt[2])
glutSolidSphere(1, 10, 10)
glPopMatrix()
glBegin(GL_LINES)
glVertex3f(facePt[0], facePt[1], facePt[2])
glVertex3f(normalPt[0], normalPt[1], normalPt[2])
glEnd()
def drawbody_joint_ptOnly(joints, color, normal=None):
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(int(len(joints)/3)):
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
def drawbody_joint32_human36m(joints, color, normal=None):
bBoneIsLeft = [0 ,0, 0, 0, 0,
1, 1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0] #To draw left as different color. Torso is treated as left
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(int(len(joints)/3)):
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
#Visualize Bones
for i, conn in enumerate(g_connMat_joint32_human36m):
if bBoneIsLeft[i]: #Left as a color
glColor3ub(color[0], color[1], color[2])
else: #Right as black
glColor3ub(0,0,0)
# x0, y0, z0 is the coordinate of the base point
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]] - x0
y1 = joints[3*conn[1]+1] - y0
z1 = joints[3*conn[1]+2] - z0
length = math.sqrt(x1*x1 + y1*y1 + z1*z1)
theta = math.degrees(math.acos(z1/length))
phi = math.degrees(math.atan2(y1, x1))
glPushMatrix()
glTranslate(x0, y0, z0)
glRotatef(phi, 0, 0, 1)
glRotatef(theta, 0, 1, 0)
glutSolidCone(2, length, 10, 10)
glPopMatrix()
def drawbody_joint31(joints, color, normal=None):
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(int(len(joints)/3)):
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
#Visualize Bones
for conn in g_connMat_coco31:
# x0, y0, z0 is the coordinate of the base point
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]] - x0
y1 = joints[3*conn[1]+1] - y0
z1 = joints[3*conn[1]+2] - z0
length = math.sqrt(x1*x1 + y1*y1 + z1*z1)
theta = math.degrees(math.acos(z1/length))
phi = math.degrees(math.atan2(y1, x1))
glPushMatrix()
glTranslate(x0, y0, z0)
glRotatef(phi, 0, 0, 1)
glRotatef(theta, 0, 1, 0)
glutSolidCone(2, length, 10, 10)
glPopMatrix()
def drawbody_joint22(joints, color, normal=None, ignore_root=False):
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(1,int(len(joints)/3)):
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
connMat_coco22 = g_connMat_coco22
#Visualize Bones
for conn in connMat_coco22:
# x0, y0, z0 is the coordinate of the base point
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]] - x0
y1 = joints[3*conn[1]+1] - y0
z1 = joints[3*conn[1]+2] - z0
length = math.sqrt(x1*x1 + y1*y1 + z1*z1)
theta = math.degrees(math.acos(z1/length))
phi = math.degrees(math.atan2(y1, x1))
glPushMatrix()
glTranslate(x0, y0, z0)
glRotatef(phi, 0, 0, 1)
glRotatef(theta, 0, 1, 0)
glutSolidCone(2, length, 10, 10)
glPopMatrix()
#Spine to ground projection
conn = [0,1]
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]]
y1 = joints[3*conn[1]+1]
z1 = joints[3*conn[1]+2]
glBegin(GL_LINES)
glVertex3f(x0, y0, z0)
glVertex3f(x1, y1, z1)
glEnd()
# # Visualize Normals
# if normal is not None:
# i=1
# facePt = joints19[(3*i):(3*i+3)]
# normalPt = facePt + normal*50
# glColor3ub(0, 255, 255)
# glPushMatrix()
# glTranslate(normalPt[0], normalPt[1], normalPt[2])
# glutSolidSphere(1, 10, 10)
# glPopMatrix()
# glBegin(GL_LINES);
# glVertex3f(facePt[0], facePt[1], facePt[2]);
# glVertex3f(normalPt[0], normalPt[1], normalPt[2]);
# glEnd()
import scipy.io as sio
def DrawSkeletonsGT():
assert False #Deprecated
global g_colors
# global g_skeletons_GT, g_frameIdx#, g_normals
global g_speech,g_speechGT
global g_bApplyRootOffset
global g_bodyNormals
#print(g_frameIdx)
if g_skeletons_GT is None:
return
#frameLimit = g_skeletons.shape[2]
#frameLens = [l.shape[1] for l in g_skeletons]
#g_frameLimit = min(frameLens)
#for humanIdx in range(g_skeletons.shape[0]):
for humanIdx in range(len(g_skeletons_GT)):
# if skelIdx ==0:
# # if g_idx+time_offset>=g_skeletons.shape[2]:
# # continue
# skel = g_skeletons[skelIdx, :, g_idx+time_offset]
# # normal = g_normals[skelIdx, :, g_idx+time_offset]
# else:
#skel = g_skeletons[humanIdx, :, g_frameIdx]
if(g_frameIdx >= g_skeletons_GT[humanIdx].shape[1]):
continue
skel = g_skeletons_GT[humanIdx][:, g_frameIdx]
# normal = g_normals[skelIdx, :, g_idx]
if g_bApplyRootOffset:
skel = skel.copy()
#skel[0::3] = skel[0::3]+ 70 *humanIdx
skel[0::3] = skel[0::3]+ ROOT_OFFSET_DIST *humanIdx
if skel.shape[0]==78: #SMPlCOCO19 + headtop (19) + (leftFoot --toe20 pink21 heel22) + (rightFoot--toe23-pink24-heel25)
drawbody_SMPLCOCO_TotalCap26(skel, [0,255,0])
elif skel.shape[0]==57: #Panoptic Studio (SMC19) with 19 joints. Note SMC21 includes headtop
drawbody_SMC19(skel, g_colors[humanIdx % len(g_colors)])
elif skel.shape[0]==96: #human36
drawbody_joint32_human36m(skel, g_colors[humanIdx % len(g_colors)])
elif skel.shape[0]==66: #Holden's converted form
drawbody_joint22(skel, g_colors[humanIdx % len(g_colors)])
elif skel.shape[0]==93: #CMU Mocap Raw data (31joints)
drawbody_joint31(skel, g_colors[humanIdx % len(g_colors)])
else:
drawbody_joint_ptOnly(skel, g_colors[humanIdx % len(g_colors)])
# if False:#g_bodyNormals is not None and len(g_bodyNormals)> humanIdx:
# if g_bodyNormals[humanIdx].shape[1]<=g_frameIdx:
# print("Warning: g_bodyNormals[humanIdx].shape[2]<=g_frameId")
# continue
# normal3D = g_bodyNormals[humanIdx][:,g_frameIdx] #3x1
# #drawbodyNormal(normal3D, skel, [255, 0, 0])
# rootPt = skel[(0*3):(0*3+3)]
# drawNormal(normal3D, rootPt, [255, 0, 0])
# #Draw Speeking Annotation
# if skel.shape[0]==57 and g_speech is not None: #Coco19
# if(g_frameIdx < len(g_speech[humanIdx]['word'])):
# draw_speaking_joint19(skel, g_speech[humanIdx]['indicator'][g_frameIdx], g_speech[humanIdx]['word'][g_frameIdx], g_colors[humanIdx % len(g_colors)])
# # if skel.shape[0]==66 and g_speech is not None: #Holden's
# # if(g_frameIdx < len(g_speech[humanIdx])):
# # draw_speaking_joint22(skel, g_speech[humanIdx][g_frameIdx],None, g_colors[humanIdx % len(g_colors)])
# # if skel.shape[0]==66 and g_speechGT is not None: #Holden's
# # if(g_frameIdx < len(g_speechGT[humanIdx])):
# # draw_speaking_joint22(skel, g_speechGT[humanIdx][g_frameIdx],"GT: Speaking", [255, 0, 0], 40) | null |
180,850 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
g_bSimpleHead = False
if g_bSimpleHead==False:
g_connMat_smc19 = [ [1,2], [1,4], [4,5], [5,6], [1,3], [3,7], [7,8], [8,9], [3,13],[13,14], [14,15], [1,10], [10, 11], [11, 12], [2, 16], [16, 17], [2, 18], [18, 19] ]
else:
g_connMat_smc19 = [ [1,2], [1,4], [4,5], [5,6], [1,3], [3,7], [7,8], [8,9], [3,13],[13,14], [14,15], [1,10], [10, 11], [11, 12]]
g_connMat_coco14 = [ [13,3], [3,2], [2,1], #Right leg
[13,4], [4,5], [5,6], #Left leg
[13,10], [10,11], [11,12], #Left Arm
[13,9], [9,8], [8,7], #Right shoulder
]
g_connMat_coco14 = np.array(g_connMat_coco14, dtype=int) - 1
import scipy.io as sio
def drawbody_joint14(joints, color, normal=None):
#Visualize Joints
glColor3ub(color[0], color[1], color[2])
for i in range(int(len(joints)/3)):
if g_bSimpleHead and (i>=15 or i==1):
continue
glPushMatrix()
glTranslate(joints[3*i], joints[3*i+1], joints[3*i+2])
glutSolidSphere(2, 10, 10)
glPopMatrix()
connMat_coco14 = g_connMat_coco14
#Visualize Bones
for conn in connMat_coco14:
# x0, y0, z0 is the coordinate of the base point
x0 = joints[3*conn[0]]
y0 = joints[3*conn[0]+1]
z0 = joints[3*conn[0]+2]
# x1, y1, z1 is the vector points from the base to the target
x1 = joints[3*conn[1]] - x0
y1 = joints[3*conn[1]+1] - y0
z1 = joints[3*conn[1]+2] - z0
if g_bSimpleHead and conn[0] == 0 and conn[1]==1:
x1 = x1*0.5
y1 = y1*0.5
z1 = z1*0.5
length = math.sqrt(x1*x1 + y1*y1 + z1*z1)
theta = math.degrees(math.acos(z1/length))
phi = math.degrees(math.atan2(y1, x1))
glPushMatrix()
glTranslate(x0, y0, z0)
glRotatef(phi, 0, 0, 1)
glRotatef(theta, 0, 1, 0)
glutSolidCone(2, length, 10, 10)
glPopMatrix()
# Visualize Normals
if normal is not None:
i=1
facePt = joints[(3*i):(3*i+3)]
normalPt = facePt + normal*50
glColor3ub(0, 255, 255)
glPushMatrix()
glTranslate(normalPt[0], normalPt[1], normalPt[2])
glutSolidSphere(1, 10, 10)
glPopMatrix()
glBegin(GL_LINES)
glVertex3f(facePt[0], facePt[1], facePt[2])
glVertex3f(normalPt[0], normalPt[1], normalPt[2])
glEnd() | null |
180,851 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
def RenderString(str):
# glRasterPos3d(0,-2,0)
for c in str:
#glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ord(c))
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, ord(c))
import scipy.io as sio
def draw_speaking_joint19(joints, bSpeak, word, color, normal=None):
# Visualize Speaking signal
if bSpeak:
i=1
facePt = joints[(3*i):(3*i+3)]
normalPt = facePt + np.array([0,-1,0])*20
#glColor3ub(0, 255, 255)
glColor3ub(color[0], color[1], color[2])
glPushMatrix()
glTranslate(normalPt[0], normalPt[1], normalPt[2])
glutSolidSphere(1, 10, 10)
#Render String
if word is not None:
RenderString(word)
else:
RenderString('speaking')
glPopMatrix()
glBegin(GL_LINES)
glVertex3f(facePt[0], facePt[1], facePt[2])
glVertex3f(normalPt[0], normalPt[1], normalPt[2])
glEnd() | null |
180,852 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
def RenderString(str):
import scipy.io as sio
def draw_speaking_joint22(joints, bSpeak, word, color, offset=20, normal=None):
# Visualize Speaking signal
if bSpeak:
i=13
facePt = joints[(3*i):(3*i+3)]
normalPt = facePt + np.array([0,-1,0])*offset
#glColor3ub(0, 255, 255)
glColor3ub(color[0], color[1], color[2])
glPushMatrix()
glTranslate(normalPt[0], normalPt[1], normalPt[2])
glutSolidSphere(1, 10, 10)
#Render String
if word is not None:
RenderString(word)
else:
RenderString('speaking')
glPopMatrix()
glBegin(GL_LINES)
glVertex3f(facePt[0], facePt[1], facePt[2])
glVertex3f(normalPt[0], normalPt[1], normalPt[2])
glEnd() | null |
180,853 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
HOLDEN_DATA_SCALING = 5
import timeit
def reshape(width, height):
#lightPos = (-50.0, 50.0, 100.0, 1.0)
nRange = 250.0
global g_Width, g_Height
g_Width = width
g_Height = height
glViewport(0, 0, g_Width, g_Height)
# # Set perspective (also zoom)
# glMatrixMode(GL_PROJECTION)
# glLoadIdentity()
# #gluPerspective(zoom, float(g_Width)/float(g_Height), g_nearPlane, g_farPlane)
# gluPerspective(65, float(g_Width)/float(g_Height), g_nearPlane, g_farPlane)
# print("here: {}".format(float(g_Width)/float(g_Height)))
from multiprocessing import Pool
def setTrajectory(traj_list):
#Add Skeleton Data
global g_trajectory,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
# if len(skel_list)>1:
# lens = [len(l) for l in skel_list]
# minLeng=max(lens)
# for i in range(0,len(skel_list)):
# skel_list[i] = skel_list[i][:,:minLeng]
#g_skeletons = np.asarray(skel_list) #no effect if skel_list is already np.array
g_trajectory = traj_list #List of 2dim np.array
#g_frameLimit = g_skeletons.shape[2]
frameLens = [l.shape[1] for l in g_trajectory]
g_frameLimit = max(g_frameLimit,min(frameLens))
import scipy.io as sio
def set_Holden_Trajectory_3(traj_list, initRot = None, initTrans=None ):
global HOLDEN_DATA_SCALING
traj_list_output = []
for ai in range(len(traj_list)):
root_x, root_z, root_r = traj_list[ai][0,:], traj_list[ai][1,:], traj_list[ai][2,:]
if initRot is None:
rotation = Quaternions.id(1)
else:
rotation = initRot[ai]
offsets = []
if initTrans is None:
translation = np.array([[0,0,0]]) #1x3
else:
translation = np.array(initTrans[ai])
if translation.shape[0]==3:
translation = np.swapaxes(translation,0,1)
# joints = np.array([0,0,0])
# joints = np.repeat(joints, )
# joints = joints.reshape((len(joints), -1, 3)) #(frameNum,66) -> (frameNum, 22, 3)
joints = np.zeros((len(root_x),2,3)) #(frames, 2,3) for original and directionPt
joints[:,1,2] = 10 #Normal direction
for i in range(len(joints)):
joints[i,:,:] = rotation * joints[i]
joints[i,:,0] = joints[i,:,0] + translation[0,0]
joints[i,:,2] = joints[i,:,2] + translation[0,2]
rotation = Quaternions.from_angle_axis(-root_r[i], np.array([0,1,0])) * rotation
offsets.append(rotation * np.array([0,0,1]))
translation = translation + rotation * np.array([root_x[i], 0, root_z[i]])
#Reshaping
joints = joints.reshape(joints.shape[0], joints.shape[1]*joints.shape[2]) # (frameNum,jointDim,3) -> (frameNum, jointDim*3)
joints = np.swapaxes(joints, 0, 1) # jointDim*3 x frameNum
joints = joints*HOLDEN_DATA_SCALING
traj_list_output.append(joints)
traj_list_output = np.asarray(traj_list_output)
setTrajectory(traj_list_output) #(trajNum, joitnDim*3, frames) | null |
180,854 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_speech = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setSpeech_binary(speech_list):
global g_speech#,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
for i, _ in enumerate(speech_list):
if len(speech_list[i].shape) ==1:
no = [None] * len(speech_list[i])
speech_list[i] = {'indicator': speech_list[i], 'word':no}
g_speech = speech_list #List of 2dim np.array | null |
180,855 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_speechGT = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setSpeechGT_binary(speech_list):
global g_speechGT#,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
for i, _ in enumerate(speech_list):
if len(speech_list[i].shape) ==1:
no = [None] * len(speech_list[i])
speech_list[i] = {'indicator': speech_list[i], 'word':no}
g_speechGT = speech_list #List of 2dim np.array | null |
180,856 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_speechGT = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setSpeechGT(speech_list):
global g_speechGT#,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
g_speechGT = speech_list #List of 2dim np.array
# #g_frameLimit = g_skeletons.shape[2]
# frameLens = [l.shape[1] for l in g_skeletons]
# g_frameLimit = min(frameLens) | null |
180,857 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_faces = None
g_frameLimit = -1
import timeit
from multiprocessing import Pool
import scipy.io as sio
def showFace(face_list):
#Add Skeleton Data
global g_faces,g_frameLimit#nparray: (faceNum, faceDim, frameNum)
#g_faces = np.asarray(face_list) #no effect if face_list is already np.array
g_faces = face_list
frameLens = [l.shape[1] for l in g_faces]
g_frameLimit = max(g_frameLimit,min(frameLens))
#init_gl() | null |
180,858 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_posOnly = None
g_frameLimit = -1
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setPosOnly(pos_list):
global g_posOnly,g_frameLimit#nparray: (faceNum, faceDim, frameNum)
g_posOnly = [ x.copy() for x in pos_list]
for i, p in enumerate(g_posOnly):
if p.shape[0]==2:
newData = np.zeros((3, p.shape[1]))
newData[0,:] = p[0,:]
#newData[1,:] = -100 #some fixed number
newData[1,:] = 0 #some fixed number
newData[2,:] = p[1,:]
g_posOnly[i] = newData
frameLens = [l.shape[1] for l in g_posOnly]
g_frameLimit = max(g_frameLimit,min(frameLens)) | null |
180,859 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
def setSkeleton(skel_list, jointType=None, colorRGB=None, bReset= True):
global g_skeletons,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
# global g_skeletons_GT #nparray: (skelNum, skelDim, frameNum)
#If skel_list is not a list
if isinstance(skel_list,list) == False and len(skel_list.shape)==2:
skel_list = skel_list[np.newaxis,:]
#add joint type
if g_skeletons is None or bReset:
g_skeletons =[]
# if color is None:
# color = (255,0,0)
#color can None
for s in skel_list:
g_skeletons.append({"skeleton":s, "color":colorRGB, "type":jointType})
# else:# bisGT == False: TODO: no need to have g_skeletons_GT anymore?
# g_skeletons_GT =[]
# for s in skel_list:
# g_skeletons_GT.append({"skeleton":s, "color":(255,0,0), "type":jointType})
# if jointType =='smpl':
# print("Use smplcoco instead of smpl!")
# assert(False)
# if bIsGT==False:
# #Add Skeleton Data
# # if len(skel_list)>1:
# # lens = [len(l) for l in skel_list]
# # minLeng=max(lens)
# # for i in range(0,len(skel_list)):
# # skel_list[i] = skel_list[i][:,:minLeng]
# #g_skeletons = np.asarray(skel_list) #no effect if skel_list is already np.array
# g_skeletons = skel_list #List of 2dim np.array
# #g_frameLimit = g_skeletons.shape[2]
# # frameLens = [l.shape[1] for l in g_skeletons]
# # g_frameLimit = max(g_frameLimit,min(frameLens))
# else:
# #Add Skeleton Data
# g_skeletons_GT = skel_list #List of 2dim np.array
# #g_frameLimit = g_skeletons.shape[2]
# # frameLens = [l.shape[1] for l in g_skeletons_GT]
# # g_frameLimit = max(g_frameLimit,min(frameLens))
setFrameLimit()
import scipy.io as sio
def addSkeleton(skel_list, jointType=None, colorRGB=None):
setSkeleton(skel_list, jointType= jointType, colorRGB= colorRGB, bReset = False) | null |
180,860 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_skeletons = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def resetSkeleton():
global g_skeletons
g_skeletons =[] | null |
180,861 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_skeletons = None
g_frameLimit = -1
import timeit
from multiprocessing import Pool
import scipy.io as sio
def showSkeleton(skel_list):
#Add Skeleton Data
global g_skeletons,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
# if len(skel_list)>1:
# lens = [len(l) for l in skel_list]
# minLeng=max(lens)
# for i in range(0,len(skel_list)):
# skel_list[i] = skel_list[i][:,:minLeng]
#g_skeletons = np.asarray(skel_list) #no effect if skel_list is already np.array
g_skeletons = skel_list #List of 2dim np.array
#g_frameLimit = g_skeletons.shape[2]
frameLens = [l.shape[1] for l in g_skeletons]
g_frameLimit = max(g_frameLimit,min(frameLens))
# init_gl() | null |
180,862 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_frameLimit = -1
import timeit
from multiprocessing import Pool
import scipy.io as sio
def resetFrameLimit():
global g_frameLimit
g_frameLimit =0 | null |
180,863 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_meshes = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def resetMeshData():
global g_meshes
g_meshes =[] | null |
180,864 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_meshes = None
import timeit
from multiprocessing import Pool
def setMeshData(mesh_list, bComputeNormal = False):
global g_meshes
#g_skeletons = np.asarray(skel_list) #no effect if skel_list is already np.array
##
g_meshes = [ d.copy() for d in mesh_list]
if len(g_meshes)==0:
return
if len(g_meshes)>40:
print("Warning: too many meshes ({})".format(len(g_meshes)))
g_meshes =g_meshes[:40]
if len(g_meshes)==0:
return
if len(g_meshes)>40:
print("Warning: too many meshes ({})".format(len(g_meshes)))
g_meshes =g_meshes[:40]
for element in g_meshes:
if len(element['ver'].shape) ==2:
# print("## setMeshData: Warning: input size should be (N, verNum, 3). Current input is (verNum, 3). I am automatically fixing this.")
element['ver'] = element['ver'][np.newaxis,:,:]
if 'normal' in element.keys():
element['normal'] = element['normal'][np.newaxis,:,:]
#Auto computing normal
if bComputeNormal:
# print("## setMeshData: Computing face normals automatically.")
for element in g_meshes:
element['normal'] = ComputeNormal(element['ver'],element['f']) #output: (N, 18540, 3)
#g_frameLimit = g_skeletons.shape[2]
#mesh_list[0]['ver'].shape
# frameLens = [l['ver'].shape[0] for l in g_meshes]
# g_frameLimit = max(g_frameLimit,min(frameLens))
setFrameLimit()
import scipy.io as sio
def addMeshData(mesh_list, bComputeNormal = False):
if g_meshes is not None:
setMeshData(g_meshes + mesh_list, bComputeNormal) #TODO: no need to compute normal for already added one..
else:
setMeshData(mesh_list, bComputeNormal) | null |
180,865 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
g_faceModel = None
from multiprocessing import Pool
import scipy.io as sio
def GetFaceMesh(faceModel, faceParam_list, bComputeNormal = True, bApplyRot = False, bApplyTrans = False, bShowFaceId = False, bApplyRotFlip=False):
MoshParam_list = []
v_template = faceModel['v_template'] #11510 x 3
v_template_flat = v_template.flatten() #(34530,)
v_template_flat = v_template_flat[:,np.newaxis] #(34530,1) for broadcasting
trifaces = faceModel['trifaces'] #22800 x 3
U_id = faceModel['U_id'] #34530 x 150
U_exp = faceModel['U_exp'] #34530 x 200
for i, faceParam in enumerate(faceParam_list):
print('processing: humanIdx{0}/{1}'.format(i, len(faceParam_list) ))
#faceParam = faceParam_all[humanIdx]
#Debug: only use the first 5
faceParam['face_exp'][5:,:]=0
"""Computing face vertices for all frames simultaneously"""
face_exp_component = np.matmul(U_exp, faceParam['face_exp']) #(34,530 x 200) x (200 x frames)
v_face_allFrames = v_template_flat + face_exp_component #(34530 x frames). Ignore face Identity information
if bShowFaceId:
face_id_component = np.matmul(U_id, faceParam['face_id']) #(34,530 x 150) x (150 x frames)
v_face_allFrames += face_id_component #(34530 x frames)
#v_face_allFrames = v_template_flat+ face_id_component +face_exp_component #(34530 x frames)
v_face_allFrames = v_face_allFrames.swapaxes(0,1) # (frames, 34530)
v_face_allFrames = np.reshape(v_face_allFrames,[v_face_allFrames.shape[0], -1, 3]) # (frames, 11510, 3)
faceMassCenter = np.zeros((3,faceParam['face_exp'].shape[1]))#*0.0 #(3, frames)
if 'rot_pivot' in faceParam.keys():
rot_pivot = np.swapaxes(faceParam['rot_pivot'],0,1) #(frames,3)
rot_pivot = np.expand_dims(rot_pivot,1) #(frames,1, 3)
v_face_allFrames = v_face_allFrames - rot_pivot # (frames, 11510, 3)
if bApplyRot:
assert False, "This code cannot be run."
from modelViewer.batch_lbs import batch_rodrigues
#Apply rotationsvf
global_rot = None
#computing global rotation
global_rot = batch_rodrigues(np.swapaxes(faceParam['rot'],0,1)) #input (Nx3), output: (N,3,3)
#global_rot *( v_face_allFrames - rot_pivot)
for f in range(v_face_allFrames.shape[0]):
pts = np.swapaxes(v_face_allFrames[f,:,:],0,1) # (3,11510)
if bApplyRotFlip:
#Flip
rot = np.array( [ 0, -1, 0, 1, 0, 0, 0, 0, 1])
rot = np.reshape(rot,(3,3))
pts = np.matmul( rot, pts) # (3,3) x (11510,3) =>(3,11510)
pts = np.matmul( rot, pts) # (3,3) x (11510,3) =>(3,11510)
pts *= 0.94
# rot = np.array( [ 0, -1, 0, 1, 0, 0,0, 0, 1])
# rot = np.reshape(rot,(3,3))
rot = global_rot[f,:,:]
pts = np.matmul( rot, pts) # (3,3) x (11510,3) =>(3,11510)
v_face_allFrames[f,:,:] = pts.transpose()
else: #Rotate 180 degrees to flip y axis
#global_rot = batch_rodrigues(np.swapaxes(faceParam['rot'],0,1)) #input (Nx3), output: (N,3,3)
#global_rot *( v_face_allFrames - rot_pivot)
for f in range(v_face_allFrames.shape[0]):
pts = np.swapaxes(v_face_allFrames[f,:,:],0,1) # (3,11510)
#rot = np.array( [ 1, 0, 0, 0, 1, 0, 0, 0, -1])
#rot = np.array( [ 1, 0, 0, 0, 1, 0, 0, 0, -1])
rot = np.array( [ 0, -1, 0, 1, 0, 0, 0, 0, 1])
#rot = np.array( [ 0, 1, 0, 1, 0, 0, 0, 0, 1])
rot = np.reshape(rot,(3,3))
#rot = global_rot[f,:,:]
pts = np.matmul( rot, pts) # (3,3) x (11510,3) =>(3,11510)
pts = np.matmul( rot, pts) # (3,3) x (11510,3) =>(3,11510)
#trans = np.array([[0.0,-1.5,0.2]])
trans = np.array([[0.0,-1,0.2]])
v_face_allFrames[f,:,:] = pts.transpose() +trans
if bApplyTrans:
trans = np.swapaxes(faceParam['trans'],0,1) #(frames,3)
trans = np.expand_dims(trans,1) #(frames,1, 3)
v_face_allFrames = v_face_allFrames + trans # (frames, 11510, 3)
faceMassCenter += faceParam['trans'] #(3, frames)
if bComputeNormal==False:
#Debug. no normal
faceMassCenter = v_face_allFrames[:,5885,:] #5885th vertex. around the head top.
MoshParam = {'ver': v_face_allFrames, 'normal': [], 'f': trifaces, 'centers': faceMassCenter} # support rendering two models together
MoshParam_list.append(MoshParam)
continue
# # CPU version
start = time.time()
vertex_normals = ComputeNormal(v_face_allFrames, trifaces)
print("CPU: normal computing time: {}".format( time.time() - start))
# GPU version
# start = time.time()
# vertex_normals = ComputeNormal_gpu(v_face_allFrames, trifaces)
# print("GPU: normal computing time: {}".format( time.time() - start))
faceMassCenter = v_face_allFrames[:,5885,:] #5885th vertex. around the head top.
#faceMassCenter = np.swapaxes(faceMassCenter,0,1) # (3,frames) - >(frames,3)
MoshParam = {'ver': v_face_allFrames, 'normal': vertex_normals, 'f': trifaces, 'centers': faceMassCenter} # support rendering two models together
MoshParam_list.append(MoshParam)
return MoshParam_list
def setFaceParmData(faceParam_list, bComputeNormal = True):
global g_faceModel
if g_faceModel is None:
import scipy.io as sio
g_faceModel = sio.loadmat('/ssd/data/totalmodel/face_model_totalAligned.mat')
#check the dimension. If dim <200, padding zeros
for i,f in enumerate(faceParam_list):
if (f.shape[0]<200):
newData = np.zeros ( (200,f.shape[1]) )
newData[:f.shape[0],:] = f
faceParam_list[i] = newData
faceParam_list = [ {'face_exp': f} for f in faceParam_list]
faceMesh_list = GetFaceMesh(g_faceModel,faceParam_list, bComputeNormal= bComputeNormal)
etMeshData( faceMesh_list) | null |
180,866 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
g_faceModel = None
from multiprocessing import Pool
import scipy.io as sio
def GetFaceMesh(faceModel, faceParam_list, bComputeNormal = True, bApplyRot = False, bApplyTrans = False, bShowFaceId = False, bApplyRotFlip=False):
def setFaceParmDataWithTrans(faceParam_list, bComputeNormal = True, trans= None, rot = None):
global g_faceModel
if g_faceModel is None:
import scipy.io as sio
g_faceModel = sio.loadmat('/ssd/data/totalmodel/face_model_totalAligned.mat')
#check the dimension. If dim <200, padding zeros
for i,f in enumerate(faceParam_list):
if (f.shape[0]<200):
newData = np.zeros ( (200,f.shape[1]) )
newData[:f.shape[0],:] = f
faceParam_list[i] = newData
#faceParam_list = [ {'face_exp': f} for f in faceParam_list]
faceParam_list_new =[]
for i in range(len(faceParam_list)):
data = dict()
data['face_exp'] = faceParam_list[i] #(200,frames)
if trans is not None:
data['trans'] = trans[i] #(3,frames)
frameLen = min( data['face_exp'].shape[1], data['trans'].shape[1] )
data['face_exp'] = data['face_exp'][:,:frameLen]
data['trans'] = data['trans'][:,:frameLen] *0.01
ROT_PIVOT = np.array([0.003501, 0.475611, 0.115576])
ROT_PIVOT[2] -=0.1
#faceMassCenter = np.array([-0.002735 , -1.44728992, 0.2565446 ])#*100
data['rot_pivot'] = data['trans']*0 + ROT_PIVOT[:,np.newaxis]
if rot is not None:
data['rot'] = rot[i]
frameLen = min( data['face_exp'].shape[1], data['rot'].shape[1] )
data['face_exp'] = data['face_exp'][:,:frameLen]
if trans is not None:
data['trans'] = data['trans'][:,:frameLen]
data['rot'] = data['rot'][:,:frameLen]
faceParam_list_new.append(data)
#faceMesh_list = GetFaceMesh(g_faceModel,faceParam_list, bComputeNormal= bComputeNormal)
faceMesh_list = GetFaceMesh(g_faceModel,faceParam_list_new, bComputeNormal= bComputeNormal,bApplyTrans=(trans is not None),bApplyRot=(rot is not None), bApplyRotFlip=(rot is not None))
etMeshData( faceMesh_list) | null |
180,867 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def ComputeFaceNormal(face_list):
##Compute face normal
faceNormal_list =[]
for s in face_list:
leftEye = s['face70'][(45*3):(45*3+3),:].transpose() #210xframes
rightEye = s['face70'][(36*3):(36*3+3),:].transpose() #210xframes
nose = s['face70'][(33*3):(33*3+3),:].transpose() #210xframes
left2Right = rightEye - leftEye
right2nose = nose - rightEye
from sklearn.preprocessing import normalize
left2Right = normalize(left2Right, axis=1)
#Check: np.linalg.norm(left2Right,axis=1)
right2nose = normalize(right2nose, axis=1)
faceNormal = np.cross(left2Right,right2nose)
faceNormal[:,1] = 0 #Project on x-z plane, ignoring y axis
faceNormal = normalize(faceNormal, axis=1)
faceNormal_list.append(faceNormal.transpose())
return faceNormal_list | null |
180,868 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def ComputeBodyNormal_panoptic(body_list):
#Compute Body Normal
bodyNormal_list =[]
for s in body_list:
leftShoulder = s['joints19'][(3*3):(3*3+3),:].transpose() #210xframes
rightShoulder = s['joints19'][(9*3):(9*3+3),:].transpose() #210xframes
bodyCenter = s['joints19'][(2*3):(2*3+3),:].transpose() #210xframes
left2Right = rightShoulder - leftShoulder
right2center = bodyCenter - rightShoulder
from sklearn.preprocessing import normalize
left2Right = normalize(left2Right, axis=1)
#Check: np.linalg.norm(left2Right,axis=1)
right2center = normalize(right2center, axis=1)
bodyNormal = np.cross(left2Right,right2center)
bodyNormal[:,1] = 0 #Project on x-z plane, ignoring y axis
bodyNormal = normalize(bodyNormal, axis=1)
bodyNormal_list.append(bodyNormal.transpose())
return bodyNormal_list | null |
180,869 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_meshes = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def getFaceRootCenter():
global g_meshes
speech_rootData = []
for f in g_meshes:
speech_rootData.append(f['centers'])
return speech_rootData | null |
180,870 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_bSaveOnlyMode = False
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setSaveOnlyMode(mode):
global g_bSaveOnlyMode
g_bSaveOnlyMode = mode | null |
180,871 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_bSaveToFile = False
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setSave(mode):
global g_bSaveToFile
g_bSaveToFile = mode | null |
180,872 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def ComputeNormal(vertices, trifaces):
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
U = vertices[:,trifaces[:,1],:] - vertices[:,trifaces[:,0],:] #frames x faceNum x 3
V = vertices[:,trifaces[:,2],:] - vertices[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = U.shape #remember: frames x faceNum x 3
U = np.reshape(U, [-1,3])
V = np.reshape(V, [-1,3])
faceNormals = np.cross(U,V) #frames x 13776 x 3
from sklearn.preprocessing import normalize
if np.isnan(np.max(faceNormals)):
print('ComputeNormal: Warning nan is detected {0}')
return
faceNormals = normalize(faceNormals)
faceNormals = np.reshape(faceNormals, originalShape)
if False: #Slow version
vertex_normals = np.zeros(vertices.shape) #(frames x 11510) x 3
for fIdx, vIdx in enumerate(trifaces[:,0]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,1]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,2]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
else: #Faster version
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = vertex_normals.astype(np.float64)
originalShape = vertex_normals.shape
vertex_normals = np.reshape(vertex_normals, [-1,3])
vertex_normals = normalize(vertex_normals)
vertex_normals = np.reshape(vertex_normals,originalShape)
return vertex_normals
def LoadObjMesh(filename):
import pywavefront
mesh = pywavefront.Wavefront(filename, collect_faces=True)
faces = np.array(mesh.mesh_list[0].faces) # (#Faces, 3). Zero-based
# faces = faces-1
vertices = np.array(mesh.vertices) # (#Vertices, 3)
#Compute Normals
vertices = vertices[np.newaxis,:] #1 x meshVerNum x 3
normal_all = ComputeNormal(vertices,faces)
#Save as pkl
meshModel = {'vertices': vertices, 'normals':normal_all , 'faces': faces}
return meshModel | null |
180,873 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
g_zoom = 600.
from collections import deque
g_bShowFloor = False
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setupRotationView():
global g_bShowFloor,g_zoom
glutReshapeWindow(1000,1000)
g_bShowFloor = False
# g_zoom = 220
g_zoom = 2220 | null |
180,874 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_saveImageName_last = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def show_SMPL_cameraView(bSaveToFile = False, bResetSaveImgCnt=True, countImg = True, bShowBG = True, bReturnRendered= False):
show_SMPL(bSaveToFile = bSaveToFile, bResetSaveImgCnt = bResetSaveImgCnt, countImg = countImg, bShowBG = bShowBG, mode = 'camera')
if bReturnRendered and g_saveImageName_last is not None:
renderedImg = cv2.imread(g_saveImageName_last)
return renderedImg
def show_SMPL_sideView(bSaveToFile = False, bResetSaveImgCnt=True, countImg = True, bReturnRendered= True):
show_SMPL_cameraView(bSaveToFile, bResetSaveImgCnt, countImg, False)
if bReturnRendered and g_saveImageName_last is not None:
renderedImg = cv2.imread(g_saveImageName_last)
return renderedImg | null |
180,875 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_saveImageName_last = None
import timeit
from multiprocessing import Pool
import scipy.io as sio
def show_SMPL(bSaveToFile = False, bResetSaveImgCnt = True, countImg = True, bShowBG = True, zoom = 230, mode = 'camera'):
init_gl_util()
if mode == 'init':
#Setup for rendering
keyboard('c',0,0)
global g_bSaveToFile, g_bSaveToFile_done, g_bShowSkeleton, g_bShowFloor, g_viewMode, g_saveFrameIdx
global g_xTrans, g_yTrans, g_zoom, g_xRotate, g_yRotate, g_zRotate
g_bSaveToFile_done = False
g_bSaveToFile = bSaveToFile
# g_bShowSkeleton = False
g_bShowFloor = False
if mode == 'youtube':
# bShowBG = True
g_bShowFloor = True
if False: #Original
g_xTrans= -86.0
g_yTrans= 0.0
g_zoom= zoom
g_xRotate = 34.0
g_yRotate= -32.0
g_zRotate= 0.0
g_viewMode = 'free'
elif True: # cook
g_xTrans= 170
g_yTrans= 0.0
# g_zoom= 1600 #Cook
# g_zoom= 1000 #Comedian
g_zoom= 800 #Bengio
g_xRotate = 34.0
g_yRotate= -32.0
g_zRotate= 0.0
g_viewMode = 'free'
elif True: # almost size
g_xTrans= 0
g_yTrans= 0
g_zoom= zoom #230
g_xRotate = 61
g_yRotate= 3
g_zRotate= 0.0
g_viewMode = 'free'
elif mode =="side":
bShowBG = False
g_bShowFloor = False
# global g_xTrans, g_yTrans, g_zoom, g_xRotate, g_yRotate, g_zRotate
# g_xTrans= 170
# g_yTrans= 0.0
# g_zoom= 1600 #Cook
# g_zoom= 1000 #Comedian
g_zoom= 266 #Bengio
g_xRotate = 90
g_yRotate= 0
g_zRotate= 0.0
g_viewMode = 'free'
elif mode == 'camera' or mode == 'init':
g_viewMode = 'camView'
global g_bShowBackground
g_bShowBackground = bShowBG
if bResetSaveImgCnt:
g_saveFrameIdx = 0 #+= 1 #always save as: scene_00000000.png
if mode == 'init':
global g_stopMainLoop
g_stopMainLoop=False
# while True:
while g_rotateView_counter*g_rotateInterval<360:
glutPostRedisplay()
if bool(glutMainLoopEvent)==False:
continue
glutMainLoopEvent()
break
if g_stopMainLoop:
break
else:
if g_bSaveToFile:
while g_bSaveToFile_done == False:
glutPostRedisplay()
if bool(glutMainLoopEvent)==False:
continue
glutMainLoopEvent()
else:
for i in range(6): ##Render more than one to be safer
glutPostRedisplay()
if bool(glutMainLoopEvent)==False:
continue
glutMainLoopEvent()
if countImg:
g_saveFrameIdx +=1
g_bSaveToFile = False
def show_SMPL_youtubeView(bSaveToFile = False, bResetSaveImgCnt=True, countImg = True, zoom = 230, bReturnRendered= False):
show_SMPL(bSaveToFile = bSaveToFile, bResetSaveImgCnt = bResetSaveImgCnt, countImg = countImg, zoom = zoom, mode = 'youtube')
if bReturnRendered and g_saveImageName_last is not None:
renderedImg = cv2.imread(g_saveImageName_last)
return renderedImg | null |
180,876 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
g_bShowBackground = True
import timeit
def setWindowSize(new_width, new_height):
global g_Width, g_Height
if new_height>1600: #Max height of screen
new_width = int(new_width *0.7)
new_height = int(new_height *0.7)
if new_width != g_Width or new_height!=g_Height:
g_Width = new_width
g_Height =new_height
#reshape(g_Width, g_Height)
if g_bGlInitDone:
glutReshapeWindow(g_Width,g_Height)
def setBackgroundTexture(img):
global g_textureData#, g_textureImgOriginal
g_textureData = img
#In MTC, the background should be always 1920x1080
# g_textureData = np.ones( (1080, 1920, 3), dtype=img.dtype)*0 #dtype==np.unit8
# g_textureData[:img.shape[0],:img.shape[1] ] = img
# g_textureImgOriginal = img #keep the original image
# import cv2
# cv2.imshow('here??',img)
# cv2.waitKey(0)
from multiprocessing import Pool
import scipy.io as sio
def setSaveFolderName(folderName):
global g_saveFolderName
g_saveFolderName = folderName
def setSaveImgName(imgName):
global g_saveImageName
g_saveImageName = imgName
def show_SMPL(bSaveToFile = False, bResetSaveImgCnt = True, countImg = True, bShowBG = True, zoom = 230, mode = 'camera'):
init_gl_util()
if mode == 'init':
#Setup for rendering
keyboard('c',0,0)
global g_bSaveToFile, g_bSaveToFile_done, g_bShowSkeleton, g_bShowFloor, g_viewMode, g_saveFrameIdx
global g_xTrans, g_yTrans, g_zoom, g_xRotate, g_yRotate, g_zRotate
g_bSaveToFile_done = False
g_bSaveToFile = bSaveToFile
# g_bShowSkeleton = False
g_bShowFloor = False
if mode == 'youtube':
# bShowBG = True
g_bShowFloor = True
if False: #Original
g_xTrans= -86.0
g_yTrans= 0.0
g_zoom= zoom
g_xRotate = 34.0
g_yRotate= -32.0
g_zRotate= 0.0
g_viewMode = 'free'
elif True: # cook
g_xTrans= 170
g_yTrans= 0.0
# g_zoom= 1600 #Cook
# g_zoom= 1000 #Comedian
g_zoom= 800 #Bengio
g_xRotate = 34.0
g_yRotate= -32.0
g_zRotate= 0.0
g_viewMode = 'free'
elif True: # almost size
g_xTrans= 0
g_yTrans= 0
g_zoom= zoom #230
g_xRotate = 61
g_yRotate= 3
g_zRotate= 0.0
g_viewMode = 'free'
elif mode =="side":
bShowBG = False
g_bShowFloor = False
# global g_xTrans, g_yTrans, g_zoom, g_xRotate, g_yRotate, g_zRotate
# g_xTrans= 170
# g_yTrans= 0.0
# g_zoom= 1600 #Cook
# g_zoom= 1000 #Comedian
g_zoom= 266 #Bengio
g_xRotate = 90
g_yRotate= 0
g_zRotate= 0.0
g_viewMode = 'free'
elif mode == 'camera' or mode == 'init':
g_viewMode = 'camView'
global g_bShowBackground
g_bShowBackground = bShowBG
if bResetSaveImgCnt:
g_saveFrameIdx = 0 #+= 1 #always save as: scene_00000000.png
if mode == 'init':
global g_stopMainLoop
g_stopMainLoop=False
# while True:
while g_rotateView_counter*g_rotateInterval<360:
glutPostRedisplay()
if bool(glutMainLoopEvent)==False:
continue
glutMainLoopEvent()
break
if g_stopMainLoop:
break
else:
if g_bSaveToFile:
while g_bSaveToFile_done == False:
glutPostRedisplay()
if bool(glutMainLoopEvent)==False:
continue
glutMainLoopEvent()
else:
for i in range(6): ##Render more than one to be safer
glutPostRedisplay()
if bool(glutMainLoopEvent)==False:
continue
glutMainLoopEvent()
if countImg:
g_saveFrameIdx +=1
g_bSaveToFile = False
def render_on_image(saveDir, saveFileName, inputImage, scaleFactor=1, is_showBackground=True):
# (dirName, fileName, rawImg)
g_bShowBackground = is_showBackground
setBackgroundTexture(inputImage)
setWindowSize(inputImage.shape[1] *scaleFactor, inputImage.shape[0]*scaleFactor)
setSaveFolderName(saveDir)
setSaveImgName(saveFileName)
show_SMPL(bSaveToFile = True, bResetSaveImgCnt = False, countImg = True, mode = 'camera') | null |
180,877 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
from collections import deque
import timeit
from multiprocessing import Pool
def setSkeleton(skel_list, jointType=None, colorRGB=None, bReset= True):
global g_skeletons,g_frameLimit #nparray: (skelNum, skelDim, frameNum)
# global g_skeletons_GT #nparray: (skelNum, skelDim, frameNum)
#If skel_list is not a list
if isinstance(skel_list,list) == False and len(skel_list.shape)==2:
skel_list = skel_list[np.newaxis,:]
#add joint type
if g_skeletons is None or bReset:
g_skeletons =[]
# if color is None:
# color = (255,0,0)
#color can None
for s in skel_list:
g_skeletons.append({"skeleton":s, "color":colorRGB, "type":jointType})
# else:# bisGT == False: TODO: no need to have g_skeletons_GT anymore?
# g_skeletons_GT =[]
# for s in skel_list:
# g_skeletons_GT.append({"skeleton":s, "color":(255,0,0), "type":jointType})
# if jointType =='smpl':
# print("Use smplcoco instead of smpl!")
# assert(False)
# if bIsGT==False:
# #Add Skeleton Data
# # if len(skel_list)>1:
# # lens = [len(l) for l in skel_list]
# # minLeng=max(lens)
# # for i in range(0,len(skel_list)):
# # skel_list[i] = skel_list[i][:,:minLeng]
# #g_skeletons = np.asarray(skel_list) #no effect if skel_list is already np.array
# g_skeletons = skel_list #List of 2dim np.array
# #g_frameLimit = g_skeletons.shape[2]
# # frameLens = [l.shape[1] for l in g_skeletons]
# # g_frameLimit = max(g_frameLimit,min(frameLens))
# else:
# #Add Skeleton Data
# g_skeletons_GT = skel_list #List of 2dim np.array
# #g_frameLimit = g_skeletons.shape[2]
# # frameLens = [l.shape[1] for l in g_skeletons_GT]
# # g_frameLimit = max(g_frameLimit,min(frameLens))
setFrameLimit()
import scipy.io as sio
def show(maxIter=-10):
init_gl(maxIter)
def VisSkeleton_single(skel):
skel = skel[:,np.newaxis] #(N, 1)
setSkeleton( [skel] , jointType='smplcoco')#(skelNum, dim, frames)
show() | null |
180,878 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import json
import numpy as np
from PIL import Image, ImageOps
import cv2
import numpy as np
import sys, math
import threading
import time
import pickle
from renderer.render_utils import ComputeNormal
g_nearPlane = 0.01
from collections import deque
import timeit
from multiprocessing import Pool
import scipy.io as sio
def setNearPlane(p):
global g_nearPlane
g_nearPlane = p | null |
180,879 | import numpy as np
def ComputeNormal_gpu(vertices, trifaces):
import torch
import torch.nn.functional as F
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
#trifaces_cuda = torch.from_numpy(trifaces.astype(np.long)).cuda()
vertices_cuda = torch.from_numpy(vertices.astype(np.float32)).cuda()
U_cuda = vertices_cuda[:,trifaces[:,1],:] - vertices_cuda[:,trifaces[:,0],:] #frames x faceNum x 3
V_cuda = vertices_cuda[:,trifaces[:,2],:] - vertices_cuda[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = list(U_cuda.size()) #remember: frames x faceNum x 3
U_cuda = torch.reshape(U_cuda, [-1,3])#.astype(np.float32)
V_cuda = torch.reshape(V_cuda, [-1,3])#.astype(np.float32)
faceNormals = U_cuda.cross(V_cuda)
faceNormals = F.normalize(faceNormals,dim=1)
faceNormals = torch.reshape(faceNormals, originalShape)
# trifaces has duplicated vertex index, so cannot be parallazied
# vertex_normals = torch.zeros(vertices.shape,dtype=torch.float32).cuda() #(frames x 11510) x 3
# for fIdx, vIdx in enumerate(trifaces[:,0]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,1]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,2]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = torch.from_numpy(vertex_normals).float().cuda()
vertex_normals = F.normalize(vertex_normals,dim=2)
vertex_normals = vertex_normals.data.cpu().numpy() #(batch, chunksize, dim)
return vertex_normals | null |
180,880 | import os
from OpenGL.GL import *
def findFileOrThrow(strBasename):
def loadShader(shaderType, shaderFile):
# check if file exists, get full path name
strFilename = findFileOrThrow(shaderFile)
shaderData = None
with open(strFilename, 'r') as f:
shaderData = f.read()
shader = glCreateShader(shaderType)
glShaderSource(shader, shaderData) # note that this is a simpler function call than in C
# This shader compilation is more explicit than the one used in
# framework.cpp, which relies on a glutil wrapper function.
# This is made explicit here mainly to decrease dependence on pyOpenGL
# utilities and wrappers, which docs caution may change in future versions.
glCompileShader(shader)
status = glGetShaderiv(shader, GL_COMPILE_STATUS)
if status == GL_FALSE:
# Note that getting the error log is much simpler in Python than in C/C++
# and does not require explicit handling of the string buffer
strInfoLog = glGetShaderInfoLog(shader)
strShaderType = ""
if shaderType is GL_VERTEX_SHADER:
strShaderType = "vertex"
elif shaderType is GL_GEOMETRY_SHADER:
strShaderType = "geometry"
elif shaderType is GL_FRAGMENT_SHADER:
strShaderType = "fragment"
print("Compilation failure for " + strShaderType + " shader:\n" + str(strInfoLog))
return shader | null |
180,881 | import os
from OpenGL.GL import *
def createProgram(shaderList):
program = glCreateProgram()
for shader in shaderList:
glAttachShader(program, shader)
glLinkProgram(program)
status = glGetProgramiv(program, GL_LINK_STATUS)
if status == GL_FALSE:
# Note that getting the error log is much simpler in Python than in C/C++
# and does not require explicit handling of the string buffer
strInfoLog = glGetProgramInfoLog(program)
print("Linker failure: \n" + str(strInfoLog))
for shader in shaderList:
glDetachShader(program, shader)
return program | null |
180,882 | import numpy as np
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from renderer.shaders.framework import createProgram, loadShader
from renderer.render_utils import ComputeNormal
import cv2
The provided code snippet includes necessary dependencies for implementing the `loadSMPL` function. Write a Python function `def loadSMPL()` to solve the following problem:
Converting SMPL parameters to vertices
Here is the function:
def loadSMPL():
from modelViewer.batch_smpl import SMPL
smplWrapper = SMPL("/home/hjoo/codes/glViewer/models/neutral_smpl_with_cocoplus_reg.pkl")
#beta = np.array([ 0.60774645, 0.76093562, -0.46162634, 0.0948126 , 0.05115048, 0.18798076, 0.02297921, -0.2294132 , 0.14359247, 0.07730228])
beta = np.array([-0.2723351 , 0.24493244, 0.66188693, 3.080746 , 1.803318 ,-0.06044015, -0.19139446, 0.07565568, 0.9439081 , -0.51000655])
pose = np.array([ 1.21531341, 1.11861894, 1.1508407 , 0.03090198, 0.07568664,
0.05786929, -0.01946101, -0.04791601, 0.00566624, -0.01975956,
0.04040587, -0.02909228, 0.17217339, -0.18295232, -0.05333628,
0.24321426, 0.16652959, 0.01652244, 0.184938 , -0.08139203,
0.08136881, -0.09354749, 0.22522661, -0.07165985, -0.08359848,
-0.27758324, 0.00502749, -0.17570865, -0.00369029, -0.0219912,
-0.34913435, -0.05382582, 0.22288936, 0.10101145, 0.32377259,
-0.08444951, -0.03223499, -0.07053581, -0.08183003, -0.1110253 ,
0.00895658, -0.38919476, -0.00748763, -0.02522146, 0.5864923 ,
0.58635307, -0.00583143, -0.03246076, -0.10047339, -0.92346576,
-0.36538482, 0.2815331 , 0.24593229, 0.79902594, -0.17193863,
-2.14336745, 0.39068873, -0.15159283, 0.2525081 , -0.02509047,
0.08939309, -0.0801741 , 0.40276617, -0.03815543, -0.05893454,
-0.07858882, -0.24278936, -0.3096408 , -0.55118646, -0.09647344,
0.45875036, 0.42067384])
pose = np.array([ 9.0558e-01, 6.4592e-01, -2.8690e+00, 3.1094e-01, 1.0175e-01,
5.6915e-02, 3.7163e-01, -1.1514e-01, -3.2411e-02, 9.1940e-02,
1.3573e-02, 5.8944e-03, -3.4365e-02, -1.7157e-01, 2.0417e-02,
-6.8286e-02, 1.2189e-03, -2.1876e-02, 1.2365e-01, -6.9564e-02,
4.4505e-03, -6.7063e-02, 1.0760e-01, 7.0232e-02, -1.2466e-01,
-1.3891e-01, -1.2108e-01, -1.6219e-02, -5.5884e-02, -9.7147e-03,
-4.0098e-02, 1.6649e-01, -1.4749e-01, 1.7493e-01, -3.9301e-02,
2.2233e-01, 2.2567e-01, -1.9609e-01, 1.3878e-02, 1.2296e-01,
7.6158e-04, -5.5521e-01, 5.7593e-02, 7.3970e-02, 5.6500e-01,
7.9010e-02, -2.0025e-01, -3.3629e-02, 4.0182e-02, -1.7911e-01,
-8.7417e-01, 1.4417e-01, 1.3365e-01, 9.1869e-01, 2.1439e-01,
-2.9541e-01, 8.5324e-02, -5.2092e-02, 2.0730e-01, -1.1425e-01,
-6.1498e-02, 7.6002e-02, -2.3677e-01, -3.5770e-02, -7.9627e-02,
1.5318e-01, -1.4370e-01, -4.8611e-02, -1.3202e-01, -6.9166e-02,
1.3943e-01, 1.9009e-01])
# pose = np.array([ 2.2487712, 0.28050578,-2.1792502 , -0.10493116, 0.01239435, 0.02972716, 0.08953293,-0.10654334, -0.00504329, 0.1593982 , 0.01969572, 0.08852343, 0.09914812, 0.12574932, -0.02512331, -0.01473788, -0.04562924, 0.04665173, 0.0474331 , -0.0616711 , -0.00967203, 0.05010046, 0.1775912 , -0.08904129, -0.06684269, -0.14769007, 0.10105508, 0.0688806 , -0.02561731, 0.00964942, -0.1680568 , 0.14983022, 0.20799895, 0.06796098, 0.10919931, -0.20863819, 0.00823393,-0.17863278, 0.09926094, 0.01495223, -0.08837841, -0.28607178,-0.11105742, 0.24558525, 0.06441574, 0.299364 , -0.15079273, 0.02175152, 0.20322715, -0.45768845, -0.9899641 , -0.06223915, 0.5227556 , 0.6171622 , 0.1368894 , -1.3889741 , 0.19389033,-0.24303943, 1.1106223 , -0.2655932 , -0.6844785 , -0.17720126,-0.1870633 , -0.30705413, 0.08231031, 0.1118647 , 0.02531371, 0.00614487, -0.05623743, -0.01657844, 0.07361342, 0.04853413])
"""Converting SMPL parameters to vertices"""
beta = beta[np.newaxis,:]
pose = pose[np.newaxis,:]
v, j,_ = smplWrapper(beta, pose) #beta: (N,10), pose: (N, 72)... return: v:(N,6890,3), j(N,19,3)
v *=100
j *=100
#Load densepose data
import scipy.io as sio
densepose_info = sio.loadmat('/home/hjoo/codes/glViewer/densepose_uv_data/UV_Processed.mat') #All_FaceIndices (13774), All_Faces(13774), All_U(7829), All_U_norm(7829), All_V(7829), All_V_norm (7829), All_vertices (7829)
# All_FaceIndices - part labels for each face
# All_Faces - vertex indices for each face
# All_vertices - SMPL vertex IDs for all vertices (note that one SMPL vertex can be shared across parts and thus appear in faces with different part labels)
# All_U - U coordinates for all vertices
# All_V - V coordinates for all vertices
# All_U_norm - normalization factor for U coordinates to map them to [0, 1] interval
# All_V_norm - normalization factor for V coordinates to map them to [0, 1] interval
# vertexColor = densepose_info['All_U_norm']*255
# vertexColor = np.zeros((v.shape[1], 3))
# vertexColor[:,0] = densepose_info['All_U_norm'][:v.shape[1]].flatten() #(6890,3)
# vertexColor[:,1] = densepose_info['All_V_norm'][:v.shape[1]].flatten() #(6890,3)
# faces = smplWrapper.f
v =v[0] #(6890,3)
dp_vertex = v[densepose_info['All_vertices']-1] #(1,7829,3) #Considering repeatation
faces =densepose_info['All_Faces']-1 #0~7828
# vertexColor = densepose_info['All_FaceIndices'] #(13774,1)
# vertexColor = np.repeat(vertexColor,3,axis=1) /24.0 #(13774,3)
# vertexColor = densepose_info['All_U_norm'] #(13774,1)
vertexColor = densepose_info['All_V_norm'] #(13774,1)
vertexColor = np.repeat(vertexColor,3,axis=1)
# vertexColor[vertexColor!=2]*=0
# vertexColor[vertexColor==2]=24
return dp_vertex, faces, vertexColor | Converting SMPL parameters to vertices |
180,883 | import sys
import numpy as np
import cv2
import pdb
from PIL import Image, ImageDraw
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(70),
color=np.array([0, 0, 255])
):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
# light_pos=_rotateY(np.array([800, 10, 300]), yrot),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
# light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha.astype(
imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1.1 else img
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp | null |
180,884 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def __ValidateNumpyImg(inputImg):
veryFirstImShow = True
def ImgSC(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
minVal = np.min(inputImg)
maxVal = np.max(inputImg)
#rescale
inputImg = (inputImg-minVal)/ (maxVal-minVal)*255
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime) | null |
180,885 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Bbox_XYXY(inputImg, bbox_xyxy, color=None):
#draw biggest bbox
pt1 = ( int(bbox_xyxy[0]),int(bbox_xyxy[1]) )
pt2 = (int(bbox_xyxy[2]),int(bbox_xyxy[3]) )
if color is None:
color = (0,0,255)
cv2.rectangle(inputImg, pt1, pt2,color, 3)
return inputImg | null |
180,886 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def __ValidateNumpyImg(inputImg):
if isinstance(inputImg, Image):
# inputImg = cv2.cvtColor(np.array(inputImg), cv2.COLOR_RGB2BGR)
inputImg = np.array(inputImg)
return inputImg #Q? is this copying someting (wasting memory or time?)?
def Vis_CocoBbox(inputImg, coco_annot):
inputImg = __ValidateNumpyImg(inputImg)
bbr = np.round(coco_annot['bbox']) #[leftTop_x,leftTop_y,width,height]
#draw biggest bbox
pt1 = ( int(bbr[0]),int(bbr[1]) )
pt2 = (int(bbr[0] + bbr[2]),int(bbr[1] + bbr[3]) )
cv2.rectangle(inputImg, pt1, pt2,(255,255,255), 3)
return inputImg | null |
180,887 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def __ValidateNumpyImg(inputImg):
def Vis_CocoSkeleton(keypoints, image=None):
# def Vis_CocoSkeleton(inputImg, coco_annot):
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
image = __ValidateNumpyImg(image)
#COCO17 original annotation ordering
link2D = [ [0, 1], [1,3], #nose(0), leftEye(1), leftEar(3)
[0,5], [5, 7], [7, 9], #leftShoulder(5), leftArm(7), leftWrist(9)
[0, 11], [11, 13], [13, 15], #leftHip(11), leftKnee(13), leftAnkle(15)
[0,2], [2,4], #nose(0), rightEye(2), rightEar(4)
[0,6], [6, 8], [8, 10], #rightShoulder(6), rightArm(8), rightWrist(10)
[0, 12], [12, 14], [14, 16] #rightHip(12), rightKnee(14), rightAnkle(16)
]
bLeft = [ 1,1,
1, 1, 1,
1,1,1,
0,0,
0,0,0,
0,0,0]
# keypoints = np.round(coco_annot['keypoints']) #coco_annot['keypoints']: list with length 51
if keypoints.shape[0] == 51:
keypoints = np.reshape(keypoints, (-1,3)) #(17,3): (X, Y, Label)
else:
keypoints = np.reshape(keypoints, (-1,2)) #(17,3): (X, Y, Label)
radius = 4
for k in np.arange( len(keypoints) ):
cv2.circle(image, (int(keypoints[k][0]), int(keypoints[k][1]) ), radius,(0,0,255),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (200,200,200)
if keypoints[parent][0] ==0 or keypoints[child][0]==0: # //not annotated one
continue
cv2.line(image, (int(keypoints[parent][0]), int(keypoints[parent][1])), (int(keypoints[child][0]), int(keypoints[child][1])), c, radius - 2)
return image | null |
180,888 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def __ValidateNumpyImg(inputImg):
if isinstance(inputImg, Image):
# inputImg = cv2.cvtColor(np.array(inputImg), cv2.COLOR_RGB2BGR)
inputImg = np.array(inputImg)
return inputImg #Q? is this copying someting (wasting memory or time?)?
DP_partIdx ={
'Torso_Back': 1,
'Torso_Front': 2,
'RHand': 3,
'LHand': 4,
'LFoot': 5,
'RFoot': 6,
'R_upperLeg_back': 7,
'L_upperLeg_back': 8,
'R_upperLeg_front': 9,
'L_upperLeg_front': 10,
'R_lowerLeg_back': 11,
'L_lowerLeg_back': 12,
'R_lowerLeg_front': 13,
'L_lowerLeg_front': 14,
'L_upperArm_front': 15,
'R_upperArm_front': 16,
'L_upperArm_back': 17,
'R_upperArm_back': 18,
'L_lowerArm_back': 19,
'R_lowerArm_back': 20,
'L_lowerArm_front': 21,
'R_lowerArm_front': 22,
'RFace': 23,
'LFace': 24
}
def Vis_Densepose(inputImg, coco_annot):
inputImg = __ValidateNumpyImg(inputImg)
import sys
sys.path.append('/home/hjoo/data/DensePose/detectron/utils/')
import densepose_methods as dp_utils
DP = dp_utils.DensePoseMethods()
if('dp_x' not in coco_annot.keys()):
print("## Warning: No Densepose coco_annotation")
return inputImg
bbr = np.round(coco_annot['bbox']) #[leftTop_x,leftTop_y,width,height]
Point_x = np.array(coco_annot['dp_x'])/ 255. * bbr[2] + bbr[0] # Strech the points to current box. from 255x255 -> [bboxWidth,bboxheight]
Point_y = np.array(coco_annot['dp_y'])/ 255. * bbr[3] + bbr[1] # Strech the points to current box.
# part_seg_index = np.array(coco_annot['dp_I']) # part segment info
#coco_annot['dp_I']: indexing
# Torso Back: 1
# Torso front: 2
# RHand: 3
# LHand: 4
# LFoot: 5
# RFoot: 6
# R_upperLeg_back 7
# L_upperLeg_back 8
# R_upperLeg_front 9
# L_upperLeg_front 10
# R_lowerLeg_back 11
# L_lowerLeg_back 12
# R_lowerLeg_front 13
# L_lowerLeg_front 14
# L_upperArm_front 15
# R_upperArm_front 16
# L_upperArm_back 17
# R_upperArm_back 18
# L_lowerArm_back 19
# R_lowerArm_back 20
# L_lowerArm_front 21
# R_lowerArm_front 22
# RFace: 23
# LFace: 24
#Found BBoxes for rhand, lhand, and face using DensePose Data
RHandIdx = [i for i,x in enumerate(coco_annot['dp_I']) if x == DP_partIdx['RHand'] ] #3.0]
if len(RHandIdx)>0:
minX = min(Point_x[RHandIdx])
maxX = max(Point_x[RHandIdx])
minY = min(Point_y[RHandIdx])
maxY = max(Point_y[RHandIdx])
RhandBBox = [minX, minY, maxX-minX, maxY-minY]
else:
RhandBBox = [-1,-1,-1,-1]
LHandIdx = [i for i,x in enumerate(coco_annot['dp_I']) if x == DP_partIdx['LHand'] ]#4.0]
if len(LHandIdx)>0:
minX = min(Point_x[LHandIdx])
maxX = max(Point_x[LHandIdx])
minY = min(Point_y[LHandIdx])
maxY = max(Point_y[LHandIdx])
LhandBBox = [minX, minY, maxX-minX, maxY-minY]
else:
LhandBBox = [-1,-1,-1,-1]
FaceIdx = [i for i,x in enumerate(coco_annot['dp_I']) if x == DP_partIdx['RFace'] or x == DP_partIdx['LFace'] ] #23.0 or x == 24.0]
if len(FaceIdx)>0:
minX = min(Point_x[FaceIdx])
maxX = max(Point_x[FaceIdx])
minY = min(Point_y[FaceIdx])
maxY = max(Point_y[FaceIdx])
FaceBBox = [minX, minY, maxX-minX, maxY-minY]
else:
FaceBBox = [-1,-1,-1,-1]
# #U,V,I -> Adam vertex (Todo: should be reverified)
# adamVerIdx_vec = np.zeros(len(coco_annot['dp_I']))
# for i, (ii,uu,vv) in enumerate(zip(coco_annot['dp_I'],coco_annot['dp_U'],coco_annot['dp_V'])):
# vertexId = DP.IUV2VertexId(ii,uu,vv)
# adamVerIdx_vec[i] = vertexId
# #draw biggest bbox
# pt1 = ( int(bbr[0]),int(bbr[1]) )
# pt2 = (int(bbr[0] + bbr[2]),int(bbr[1] + bbr[3]) )
# cv2.rectangle(inputImg, pt1, pt2,(0,0,0),1)
#draw RHand bbox
pt1 = ( int(RhandBBox[0]),int(RhandBBox[1]) )
pt2 = (int(RhandBBox[0] + RhandBBox[2]),int(RhandBBox[1] + RhandBBox[3]) )
cv2.rectangle(inputImg, pt1, pt2,(0,0,255),2)
#draw lHand bbox
pt1 = ( int(LhandBBox[0]),int(LhandBBox[1]) )
pt2 = (int(LhandBBox[0] + LhandBBox[2]),int(LhandBBox[1] + LhandBBox[3]) )
cv2.rectangle(inputImg, pt1, pt2,(0,255,0),2)
#draw Face bbox
pt1 = ( int(FaceBBox[0]),int(FaceBBox[1]) )
pt2 = (int(FaceBBox[0] + FaceBBox[2]),int(FaceBBox[1] + FaceBBox[3]) )
cv2.rectangle(inputImg, pt1, pt2,(255,0,0),2)
# Draw Densepose Keypoints
tempColorIdx = np.array(coco_annot['dp_I'])/ 24 *255
#tempColorIdx = np.array(coco_annot['dp_U']) *255
#tempColorIdx = np.array(coco_annot['dp_V']) *255
tempColorIdx = np.uint8(tempColorIdx)
tempColorIdx = cv2.applyColorMap(tempColorIdx, cv2.COLORMAP_JET)
for cnt, pt in enumerate(zip(Point_x,Point_y,tempColorIdx, coco_annot['dp_I'])):
# if pt[3] != DP_partIdx['Torso_Front']: #Uncomment this if you want to draw specific part
# continue
#tempColorIdx = coco_annot['dp_I']
tempColor = pt[2][0].astype(np.int32).tolist()
cv2.circle(inputImg,(int(pt[0]),int(pt[1])), 5,tempColor, -1)
return inputImg | null |
180,889 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Skeleton_2D_H36m(pt2d, image = None, color=None):
pt2d = np.reshape(pt2d,[-1,2]) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
if(pt2d.shape[0]==16):
print("Vis_Skeleton_2D_H36m: {} joints".format(16))
#Without Nose
link2D = [ [0,1],[1,2],[2,3],#root(0), rHip(1), rKnee(2), rAnkle(3)
[0,4],[4,5],[5,6],#root(0, lHip(4), lKnee(5), lAnkle(6)
[0,7], [7,8], [8,9], #root(0, spineMid(7), neck(8), head(9)
[8,10], [10,11], [11,12], #Left Arms. neck(8). lshoulder(10), lElbow(11), lWrist (12)
[8,13], [13,14], [14,15] #Right Arm, neck(8), rshoulder(13), rElbow(14), rWrist (15)
]
bLeft = [ 0,0,0,
1, 1, 1,
1,1,1,
1,1,1,
0,0,0]
elif pt2d.shape[0]==17:
print("Vis_Skeleton_2D_H36m: {} joints".format(17))
#With Nose
link2D = [ [0,1],[1,2],[2,3],#root(0), rHip(1), rKnee(2), rAnkle(3)
[0,4],[4,5],[5,6],#root(0, lHip(4), lKnee(5), lAnkle(6)
[0,7], [7,8], [8,9], [9,10], #root(0, spineMid(7), neck(8), nose(9), head(9)
[8,11], [11,12], [12,13], #Left Arms. neck(8). lshoulder(11), lElbow(12), lWrist (13)
[8,14], [14,15], [15,16] #Right Arm, neck(8), rshoulder(14), rElbow(15), rWrist (16)
]
bLeft = [ 0,0,0,
1, 1, 1,
1,1,1, 1,
1,1,1,
0,0,0]
else:
print("Vis_Skeleton_2D_H36m: {} joints".format(32))
#Human 36m DB's mocap data. 32 joints
link2D = [ [0,1],[1,2],[2,3],[3,4],[4,5], #RightLeg: root(0), rHip(1), rKnee(2), rAnkle(3), rFootMid(4), rFootEnd(5)
[0,6],[6,7],[7,8],[8,9], [9,10], #LeftLeg: root, lHip(6), lKnee(7), lAnkle(8), lFootMid(9), lFootEnd(10)
[11,12], [12,13], [13,14], [14,15], #root2(11), spineMid(12), neck(13), nose(14), head(15) #0,11 are the same points?
[16,17], [17,18], [18,19], [20,21], [20,22], #Left Arms. neck(16==13), lshoulder(17), lElbow(18), lWrist (19=20), lThumb(21), lMiddleFinger(22)
[24,25], [25,26], [26,27], [27,29], [27,30] #Right Arm, neck(24==13), rshoulder(25), rElbow(26), rWrist (27=28), rThumb(29), rMiddleFinger(30)
]
bLeft = [0 ,0, 0, 0, 0,
1, 1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0] #To draw left as different color. Torso is treated as left
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
return image | null |
180,890 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Skeleton_2D_SMC19(pt2d, image = None, color=None):
pt2d = np.reshape(pt2d,[-1,2]) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
assert pt2d.shape[0]==19
print("Vis_Skeleton_2D_H36m: {} joints".format(16))
#Without Nose
link2D = [ [0,1], [0,2], #neck -> nose, neck-> bodyCenter
[0,3], [3,4], [4,5], #Left Arm
[2,6], [6,7], [7,8], #left leg
[2,12],[12,13], [13,14], #Right leg
[0,9], [9, 10], [10, 11], #Right Arm
[1, 15], [15, 16], #left eye
[1, 17], [17, 18]] #right eye
bLeft = [ 1,1,
1, 1, 1,
1,1,1,
0,0,0,
0,0,0,
1,1,
0,0]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
return image | null |
180,892 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Skeleton_2D_Hand(pt2d, image = None, color=None):
pt2d = np.reshape(pt2d,[-1,2]) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
# assert pt2d.shape[0]==19
print("Vis_Skeleton_2D_H36m: {} joints".format(16))
#Without Nose
link2D = [ [0,1], [1,2], [2,3], [3,4], #thumb
[0,5], [5,6],[6,7],[7,8], #index
[0,9],[9,10],[10,11],[11,12],
[0,13],[13,14],[14,15],[15,16],
[0,17],[17,18],[18,19],[19,20]
]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (0,255,255)#BGR, RED
# else:
# c = (0,0,0)
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
return image | null |
180,893 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Bbox_minmaxPt(inputImg, min_pt, max_pt, color=None):
bbr = [min_pt[0],min_pt[1], max_pt[0]- min_pt[0], max_pt[1]- min_pt[1]]
return Vis_Bbox(inputImg, bbr, color)
def Vis_Skeleton_2D_smplCOCO(pt2d, pt2d_visibility=None, image = None, blankImSize = 1000, bVis = False, color=None, bBoxWidth=None):
pt2d = np.reshape(pt2d,[-1,2]) #Just in case. Make sure (32, 2)
if pt2d_visibility is not None and len(pt2d_visibility) == len(pt2d)*2:
pt2d_visibility = pt2d_visibility[::2]
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((blankImSize,blankImSize,3),np.uint8) *255
radius = 4
if(pt2d.shape[0]==19 or pt2d.shape[0]==20):
# print("Vis_Skeleton_2D_smplCOCO: {} joints".format(16))
#Without Nose
link2D = [ [12,2], [2,1], [1,0], #Right leg
[12,3], [3,4], [4,5], #Left leg
[12,9], [9,10], [10,11], #Left Arm
[12,8], [8,7], [7,6], #Right shoulder
[12,14],[14,16],[16,18], #Neck(12)->Nose(14)->rightEye(16)->rightEar(18)
[14,15],[15,17], #Nose(14)->leftEye(15)->leftEar(17).
[14,13] #Nose->headTop(13)
]
bLeft = [ 0,0,0,
1, 1, 1,
0,0,0,
1,1,1,
1,1,1,
11,0,11,0]
elif(pt2d.shape[0]==18): #No head (13)
# print("Vis_Skeleton_2D_smplCOCO: {} joints".format(16))
#Without Nose
link2D = [ [12,2], [2,1], [1,0], #Right leg
[12,3], [3,4], [4,5], #Left leg
[12,9], [9,10], [10,11], #Left Arm
[12,8], [8,7], [7,6], #Right shoulder
[12,13],[13,15],[15,17], #Neck(12)->Nose(14)->rightEye(16)->rightEar(18)
[13,14],[14,16] #Nose(14)->leftEye(15)->leftEar(17).
# [14,13] #Nose->headTop(13)
]
bLeft = [ 0,0,0,
1, 1, 1,
1,1,1,
0,0,0,
1,0,0,
1,1]
elif(pt2d.shape[0]==26): #SMPLCOCO totalCpa26
#Without Nose
link2D = [ [12,2], [2,1], [1,0], #Right leg
[12,3], [3,4], [4,5], #Left leg
[12,9], [9,10], [10,11], #Left Arm
[12,8], [8,7], [7,6], #Right shoulder
[12,14],[14,16],[16,18], #Neck(12)->Nose(14)->rightEye(16)->rightEar(18)
[14,15],[15,17], #Nose(14)->leftEye(15)->leftEar(17).
# [14,13], #Nose->headMidle(13)
[12,19], #headTop19
[5,20], [5,21], [5,22], #leftFoot
[0,23], [0,24], [0,25] #rightFoot
]
bLeft = [ 0,0,0,
1, 1, 1,
1,1,1,
0,0,0,
1,0,0,
1,1,
1,
1,1,1,
0,0,0]
else:
assert False
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bBoxWidth is not None:
image = Vis_Bbox_minmaxPt(image, [0,0], [bBoxWidth,bBoxWidth])
if bVis:
ImShow(image,name='Vis_Skeleton_2D_smplCOCO')
return image | null |
180,894 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_smpl24(pt2d, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#SMPL 24 joints used for LBS
link2D = [ [0,3],[3,6],[6,9],[9,12],[12,15], #root-> torso -> head
[9,13],[13,16],[16,18],[18,20],[20,22], #Nect-> left hand
[9,14], [14,17], [17,19], [19,21], [21,23], #Nect-> right hand
[0,1], [1,4], [4,7], [7,10], # left Leg
[0,2], [2,5], [5,8], [8,11] #right leg
]
bLeft = [ 0,0,0,
1, 1, 1,
0,0,0,
1,1,1,
1,1,1,
11,0,11,0]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image | null |
180,895 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_smpl45(pt2d, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#SMPL 24 joints used for LBS
link2D = [ [0,3],[3,6],[6,9],[9,12],[12,15], #root-> torso -> head
[9,13],[13,16],[16,18],[18,20],[20,22], #Nect-> left hand
[9,14], [14,17], [17,19], [19,21], [21,23], #Nect-> right hand
[0,1], [1,4], [4,7], [7,10], # left Leg
[0,2], [2,5], [5,8], [8,11] #right leg
]
bLeft = [ 0,0,0,
1, 1, 1,
0,0,0,
1,1,1,
1,1,1,
11,0,11,0]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image | null |
180,896 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_MPII(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#SMPL 24 joints used for LBS
link2D = [ [6,7],[7,8],[8,9], #root-> torso -> head
[7,12], [12,11],[11,10], #right arm
[7,13], [13,14], [14,15], #left arm
[6,2],[2,1], [1,0], #right leg
[6,3], [3,4], [4,5] #left leg
]
bLeft = [ 1,1,1,
0, 0, 0,
1,1,1,
0,0,0,
1,1,1]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image | null |
180,897 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_foot(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#SMPL 24 joints used for LBS
link2D = [ [0,1],[1,2], #root-> torso -> head
[3,4], [4,5] ]
bLeft = [ 1,1,
0, 0]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
for k in np.arange( len(link2D) ):
parent = link2D[k][0]
child = link2D[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image | null |
180,898 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
def Vis_Skeleton_2D_Openpose25(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
if pt2d.shape[0]==49: #SPIN 25 (openpose) + 24 (superset) joint
return Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility, image, bVis, color)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#Openpose25
link_openpose = [ [8,1], [1,0] , [0,16] , [16,18] , [0,15], [15,17],
[1,2],[2,3],[3,4], #Right Arm
[1,5], [5,6], [6,7], #Left Arm
[8,12], [12,13], [13,14], [14,21], [14,19], [14,20],
[8,9], [9,10], [10,11], [11,24], [11,22], [11,23]
]
bLeft = [ 1,1,1,1,0,0,
0,0,0,
1,1,1,
1,1,1,1,1,1,
0,0,0,0,0,0]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
#Openpose joint drawn as blue
for k in np.arange( len(link_openpose) ):
parent = link_openpose[k][0]
child = link_openpose[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (255,0,0)#BGR, Blue
else:
c = (0,0,0) #Right Black
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
return image | null |
180,899 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Skeleton_2D_Openpose_hand(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#Openpose25
link_openpose = [ [0,1], [1,2], [2,3], [3,4], #thumb
[0,5], [5,6],[6,7],[7,8], #index
[0,9],[9,10],[10,11],[11,12],
[0,13],[13,14],[14,15],[15,16],
[0,17],[17,18],[18,19],[19,20]
]
link_openpose = np.array(link_openpose)
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
#Openpose joint drawn as blue
for k in np.arange( len(link_openpose) ):
parent = link_openpose[k][0]
child = link_openpose[k][1]
if color is not None:
c = color
else:
c = (255,0,0)#BGR, Blue
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
return image | null |
180,900 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def Vis_Skeleton_2D_Openpose18(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#Openpose18
link_openpose = [ [1,0] , [0,14] , [14,16] , [0,15], [15,17],
[1,2],[2,3],[3,4], #Right Arm
[1,5], [5,6], [6,7], #Left Arm
[1,11], [11,12], [12,13], #Left Leg
[8,1], [8,9], [9,10] #Right Leg
]
bLeft = [ 1,1,1,1,1,
0,0,0,
1,1,1,
1,1,1,
0,0,0]
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
#Openpose joint drawn as blue
for k in np.arange( len(link_openpose) ):
parent = link_openpose[k][0]
child = link_openpose[k][1]
if color is not None:
c = color
else:
if bLeft[k]:
c = (255,0,0)#BGR, Blue
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
return image | null |
180,901 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_SPIN24(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#Openpose25 in Spin Defition + SPIN global 24
# 'OP Nose', 'OP Neck', 'OP RShoulder', #0,1,2
# 'OP RElbow', 'OP RWrist', 'OP LShoulder', #3,4,5
# 'OP LElbow', 'OP LWrist', 'OP MidHip', #6, 7,8
# 'OP RHip', 'OP RKnee', 'OP RAnkle', #9,10,11
# 'OP LHip', 'OP LKnee', 'OP LAnkle', #12,13,14
# 'OP REye', 'OP LEye', 'OP REar', #15,16,17
# 'OP LEar', 'OP LBigToe', 'OP LSmallToe', #18,19,20
# 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', #21, 22, 23, 24 ##Total 25 joints for openpose
link_openpose = [ [8,1], [1,0] , [0,16] , [16,18] , [0,15], [15,17],
[1,2],[2,3],[3,4], #Right Arm
[1,5], [5,6], [6,7], #Left Arm
[8,12], [12,13], [13,14], [14,19], [19,20], [20,21], #Left Leg
[8,9], [9,10], [10,11], [11,22], [22,23], [23,24] #Right left
]
link_spin24 =[ [14,16], [16,12], [12,17] , [17,18] ,
[12,9],[9,10],[10,11], #Right Arm
[12,8], [8,7], [7,6], #Left Arm
[14,3], [3,4], [4,5],
[14,2], [2,1], [1,0]]
link_spin24 = np.array(link_spin24) + 25
# bLeft = [ 1,1,1,1,0,0,
# 0,0,0,
# 1,1,1,
# 1,1,1,1,1,1,
# 0,0,0,0,0,0]
bLeft = [ 0,0,0,0,
1,1,1,
0,0,0,
1,1,1,
0,0,0]
# for i in np.arange( len(link) ):
for k in np.arange( 25,len(pt2d) ):
if color is not None:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
# # #Openpose joint drawn as blue
# for k in np.arange( len(link_openpose) ):
# parent = link_openpose[k][0]
# child = link_openpose[k][1]
# if color is not None:
# c = color
# else:
# if True:#bLeft[k]:
# c = (255,0,0)#BGR, Blue
# else:
# c = (0,0,0)
# if pt2d_visibility is None:
# cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
# else:
# if pt2d_visibility[parent] and pt2d_visibility[child]:
# cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
#SPIN24 joint drawn as red
for k in np.arange( len(link_spin24) ):
parent = link_spin24[k][0]
child = link_spin24[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image | null |
180,902 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_coco(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None , offsetXY =None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
# 'OP RHip', 'OP RKnee', 'OP RAnkle', #9,10,11
# 'OP LHip', 'OP LKnee', 'OP LAnkle', #12,13,14
# 'OP REye', 'OP LEye', 'OP REar', #15,16,17
# 'OP LEar', 'OP LBigToe', 'OP LSmallToe', #18,19,20
# 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', #21, 22, 23, 24 ##Total 25 joints for openpose
link_coco = [ [0,1], [1,3] , [0,2] , [2,4],
[6,8],[8,10], #Right Arm
[5,7], [7,9], #Left Arm
[15,13], [13,11], [11,5], #Left Leg
[16,14], [14,12], [12,6], #Right left
]
for k in np.arange( len(pt2d) ):
if color is not None:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
# # #Openpose joint drawn as blue
for k in np.arange( len(link_coco) ):
parent = link_coco[k][0]
child = link_coco[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (255,0,0)#BGR, Blue
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image | null |
180,903 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#Openpose25 in Spin Defition + SPIN global 24
# 'OP Nose', 'OP Neck', 'OP RShoulder', #0,1,2
# 'OP RElbow', 'OP RWrist', 'OP LShoulder', #3,4,5
# 'OP LElbow', 'OP LWrist', 'OP MidHip', #6, 7,8
# 'OP RHip', 'OP RKnee', 'OP RAnkle', #9,10,11
# 'OP LHip', 'OP LKnee', 'OP LAnkle', #12,13,14
# 'OP REye', 'OP LEye', 'OP REar', #15,16,17
# 'OP LEar', 'OP LBigToe', 'OP LSmallToe', #18,19,20
# 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', #21, 22, 23, 24 ##Total 25 joints for openpose
link_openpose = [ [8,1], [1,0] , [0,16] , [16,18] , [0,15], [15,17],
[1,2],[2,3],[3,4], #Right Arm
[1,5], [5,6], [6,7], #Left Arm
[8,12], [12,13], [13,14], [14,19], [19,20], [20,21], #Left Leg
[8,9], [9,10], [10,11], [11,22], [22,23], [23,24] #Right left
]
link_spin24 =[ [14,16], [16,12], [12,17] , [17,18] ,
[12,9],[9,10],[10,11], #Right Arm
[12,8], [8,7], [7,6], #Left Arm
[14,3], [3,4], [4,5],
[14,2], [2,1], [1,0]]
link_spin24 = np.array(link_spin24) + 25
# bLeft = [ 1,1,1,1,0,0,
# 0,0,0,
# 1,1,1,
# 1,1,1,1,1,1,
# 0,0,0,0,0,0]
bLeft = [ 0,0,0,0,
1,1,1,
0,0,0,
1,1,1,
0,0,0]
# for i in np.arange( len(link) ):
for k in np.arange( 25,len(pt2d) ):
if color is not None:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
# #Openpose joint drawn as blue
for k in np.arange( len(link_openpose) ):
parent = link_openpose[k][0]
child = link_openpose[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (255,0,0)#BGR, Blue
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
#SPIN24 joint drawn as red
for k in np.arange( len(link_spin24) ):
parent = link_spin24[k][0]
child = link_spin24[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image
def Vis_Skeleton_2D_general(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None , offsetXY =None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
if offsetXY is not None:
pt2d = pt2d + np.array(offsetXY)
if pt2d.shape[0]==49: #SPIN 25 (openpose) + 24 (superset) joint
return Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility, image, bVis, color)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
if bVis:
ImShow(image)
return image | null |
180,904 | import cv2
import numpy as np
import PIL
from PIL.Image import Image
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):
pt2d = np.reshape(pt2d,(-1,2)) #Just in case. Make sure (32, 2)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
#Openpose25 in Spin Defition + SPIN global 24
# 'OP Nose', 'OP Neck', 'OP RShoulder', #0,1,2
# 'OP RElbow', 'OP RWrist', 'OP LShoulder', #3,4,5
# 'OP LElbow', 'OP LWrist', 'OP MidHip', #6, 7,8
# 'OP RHip', 'OP RKnee', 'OP RAnkle', #9,10,11
# 'OP LHip', 'OP LKnee', 'OP LAnkle', #12,13,14
# 'OP REye', 'OP LEye', 'OP REar', #15,16,17
# 'OP LEar', 'OP LBigToe', 'OP LSmallToe', #18,19,20
# 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', #21, 22, 23, 24 ##Total 25 joints for openpose
link_openpose = [ [8,1], [1,0] , [0,16] , [16,18] , [0,15], [15,17],
[1,2],[2,3],[3,4], #Right Arm
[1,5], [5,6], [6,7], #Left Arm
[8,12], [12,13], [13,14], [14,19], [19,20], [20,21], #Left Leg
[8,9], [9,10], [10,11], [11,22], [22,23], [23,24] #Right left
]
link_spin24 =[ [14,16], [16,12], [12,17] , [17,18] ,
[12,9],[9,10],[10,11], #Right Arm
[12,8], [8,7], [7,6], #Left Arm
[14,3], [3,4], [4,5],
[14,2], [2,1], [1,0]]
link_spin24 = np.array(link_spin24) + 25
# bLeft = [ 1,1,1,1,0,0,
# 0,0,0,
# 1,1,1,
# 1,1,1,1,1,1,
# 0,0,0,0,0,0]
bLeft = [ 0,0,0,0,
1,1,1,
0,0,0,
1,1,1,
0,0,0]
# for i in np.arange( len(link) ):
for k in np.arange( 25,len(pt2d) ):
if color is not None:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, color,-1)
else:
if pt2d_visibility is None:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
else:
if pt2d_visibility[k]:
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,0,255),-1)
# #Openpose joint drawn as blue
for k in np.arange( len(link_openpose) ):
parent = link_openpose[k][0]
child = link_openpose[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (255,0,0)#BGR, Blue
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
#SPIN24 joint drawn as red
for k in np.arange( len(link_spin24) ):
parent = link_spin24[k][0]
child = link_spin24[k][1]
if color is not None:
c = color
else:
if True:#bLeft[k]:
c = (0,0,255)#BGR, RED
else:
c = (0,0,0)
if pt2d_visibility is None:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
else:
if pt2d_visibility[parent] and pt2d_visibility[child]:
cv2.line(image, (int(pt2d[parent][0]), int(pt2d[parent][1])), (int(pt2d[child][0]), int(pt2d[child][1])), c, radius - 2)
if bVis:
ImShow(image)
return image
def Vis_Skeleton_3Dto2D_general(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None, offsetXY =None):
pt2d = np.reshape(pt2d,(-1,3)) #Just in case. Make sure (32, 2)
if pt2d.shape[0]==49: #SPIN 25 (openpose) + 24 (superset) joint
return Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility, image, bVis, color)
#Draw via opencv
if not isinstance(image, np.ndarray):#not image: #If no image is given, generate Blank image
image = np.ones((1000,1000,3),np.uint8) *255
radius = 4
# for i in np.arange( len(link) ):
for k in np.arange( len(pt2d) ):
cv2.circle(image, (int(pt2d[k][0]), int(pt2d[k][1]) ), radius, (0,255,0),-1)
if bVis:
ImShow(image)
return image | null |
180,905 | import os
import sys
import os.path as osp
import torch
import numpy as np
import cv2
import argparse
import json
import pickle
import smplx
from datetime import datetime
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from bodymocap.models import SMPL, SMPLX
from handmocap.hand_modules.h3dw_model import extract_hand_output
from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm
def __get_data_type(pkl_files):
for pkl_file in pkl_files:
saved_data = gnu.load_pkl(pkl_file)
return saved_data['demo_type'], saved_data['smpl_type'] | null |
180,906 | import os
import sys
import os.path as osp
import torch
import numpy as np
import cv2
import argparse
import json
import pickle
import smplx
from datetime import datetime
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from bodymocap.models import SMPL, SMPLX
from handmocap.hand_modules.h3dw_model import extract_hand_output
from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm
def __get_smpl_model(demo_type, smpl_type):
smplx_model_path = './extra_data/smpl/SMPLX_NEUTRAL.pkl'
smpl_model_path = './extra_data/smpl//basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
if demo_type == 'hand':
# use original smpl-x
smpl = smplx.create(
smplx_model_path,
model_type = "smplx",
batch_size = 1,
gender = 'neutral',
num_betas = 10,
use_pca = False,
ext='pkl'
)
else:
if smpl_type == 'smplx':
# use modified smpl-x from body module
smpl = SMPLX(
smplx_model_path,
batch_size=1,
num_betas = 10,
use_pca = False,
create_transl=False)
else:
# use modified smpl from body module
assert smpl_type == 'smpl'
smpl = SMPL(
smpl_model_path,
batch_size=1,
create_transl=False)
return smpl | null |
180,907 | import os
import sys
import os.path as osp
import torch
import numpy as np
import cv2
import argparse
import json
import pickle
import smplx
from datetime import datetime
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from bodymocap.models import SMPL, SMPLX
from handmocap.hand_modules.h3dw_model import extract_hand_output
from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm
def __calc_mesh(demo_type, smpl_type, smpl_model, img_shape, pred_output_list):
for pred_output in pred_output_list:
if pred_output is not None:
# hand
if demo_type == 'hand':
assert 'left_hand' in pred_output and 'right_hand' in pred_output
for hand_type in pred_output:
hand_pred = pred_output[hand_type]
if hand_pred is not None:
pose_params = torch.from_numpy(hand_pred['pred_hand_pose'])
betas = torch.from_numpy(hand_pred['pred_hand_betas'])
pred_verts, hand_faces = __calc_hand_mesh(hand_type, pose_params, betas, smpl_model)
hand_pred['pred_vertices_smpl'] = pred_verts
cam_scale = hand_pred['pred_camera'][0]
cam_trans = hand_pred['pred_camera'][1:]
vert_bboxcoord = convert_smpl_to_bbox(
pred_verts, cam_scale, cam_trans, bAppTransFirst=True) # SMPL space -> bbox space
bbox_scale_ratio = hand_pred['bbox_scale_ratio']
bbox_top_left = hand_pred['bbox_top_left']
vert_imgcoord = convert_bbox_to_oriIm(
vert_bboxcoord, bbox_scale_ratio, bbox_top_left,
img_shape[1], img_shape[0])
pred_output[hand_type]['pred_vertices_img'] = vert_imgcoord
# body
else:
pose_params = torch.from_numpy(pred_output['pred_body_pose'])
betas = torch.from_numpy(pred_output['pred_betas'])
if 'pred_right_hand_pose' in pred_output:
pred_right_hand_pose = torch.from_numpy(pred_output['pred_right_hand_pose'])
else:
pred_right_hand_pose = torch.zeros((1, 45), dtype=torch.float32)
if 'pred_left_hand_pose' in pred_output:
pred_left_hand_pose = torch.from_numpy(pred_output['pred_left_hand_pose'])
else:
pred_left_hand_pose = torch.zeros((1, 45), dtype=torch.float32)
pred_verts, faces = _calc_body_mesh(
smpl_type, smpl_model, pose_params, betas, pred_left_hand_pose, pred_right_hand_pose)
pred_output['pred_vertices_smpl'] = pred_verts
pred_output['faces'] = faces
cam_scale = pred_output['pred_camera'][0]
cam_trans = pred_output['pred_camera'][1:]
vert_bboxcoord = convert_smpl_to_bbox(
pred_verts, cam_scale, cam_trans, bAppTransFirst=False) # SMPL space -> bbox space
bbox_scale_ratio = pred_output['bbox_scale_ratio']
bbox_top_left = pred_output['bbox_top_left']
vert_imgcoord = convert_bbox_to_oriIm(
vert_bboxcoord, bbox_scale_ratio, bbox_top_left,
img_shape[1], img_shape[0])
pred_output['pred_vertices_img'] = vert_imgcoord
def visualize_prediction(args, demo_type, smpl_type, smpl_model, pkl_files, visualizer):
for pkl_file in pkl_files:
# load data
saved_data = gnu.load_pkl(pkl_file)
image_path = saved_data['image_path']
img_original_bgr = cv2.imread(image_path)
if img_original_bgr is None:
print(f"{image_path} does not exists, skip")
print("--------------------------------------")
demo_type = saved_data['demo_type']
assert saved_data['smpl_type'] == smpl_type
hand_bbox_list = saved_data['hand_bbox_list']
body_bbox_list = saved_data['body_bbox_list']
pred_output_list = saved_data['pred_output_list']
if not saved_data['save_mesh']:
__calc_mesh(
demo_type, smpl_type, smpl_model, img_original_bgr.shape[:2], pred_output_list)
else:
pass
pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)
# visualization
res_img = visualizer.visualize(
img_original_bgr,
pred_mesh_list = pred_mesh_list,
body_bbox_list = body_bbox_list,
hand_bbox_list = hand_bbox_list)
# save result image
demo_utils.save_res_img(args.out_dir, image_path, res_img)
# save predictions to pkl
if args.save_pred_pkl:
args.use_smplx = smpl_type == 'smplx'
demo_utils.save_pred_to_pkl(
args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list) | null |
180,908 | import os, sys, shutil
import os.path as osp
import numpy as np
import cv2
import json
import torch
from torchvision.transforms import Normalize
from demo.demo_options import DemoOptions
import mocap_utils.general_utils as gnu
import mocap_utils.demo_utils as demo_utils
from handmocap.hand_mocap_api import HandMocap
from handmocap.hand_bbox_detector import HandBboxDetector
import renderer.image_utils as imu
from renderer.viewer2D import ImShow
import time
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def run_hand_mocap(args, bbox_detector, hand_mocap, visualizer):
#Set up input data (images or webcam)
input_type, input_data = demo_utils.setup_input(args)
assert args.out_dir is not None, "Please specify output dir to store the results"
cur_frame = args.start_frame
video_frame = 0
while True:
# load data
load_bbox = False
if input_type =='image_dir':
if cur_frame < len(input_data):
image_path = input_data[cur_frame]
img_original_bgr = cv2.imread(image_path)
else:
img_original_bgr = None
elif input_type == 'bbox_dir':
if cur_frame < len(input_data):
print("Use pre-computed bounding boxes")
image_path = input_data[cur_frame]['image_path']
hand_bbox_list = input_data[cur_frame]['hand_bbox_list']
body_bbox_list = input_data[cur_frame]['body_bbox_list']
img_original_bgr = cv2.imread(image_path)
load_bbox = True
else:
img_original_bgr = None
elif input_type == 'video':
_, img_original_bgr = input_data.read()
if video_frame < cur_frame:
video_frame += 1
continue
# save the obtained video frames
image_path = osp.join(args.out_dir, "frames", f"{cur_frame:05d}.jpg")
if img_original_bgr is not None:
video_frame += 1
if args.save_frame:
gnu.make_subdir(image_path)
cv2.imwrite(image_path, img_original_bgr)
elif input_type == 'webcam':
_, img_original_bgr = input_data.read()
if video_frame < cur_frame:
video_frame += 1
continue
# save the obtained video frames
image_path = osp.join(args.out_dir, "frames", f"scene_{cur_frame:05d}.jpg")
if img_original_bgr is not None:
video_frame += 1
if args.save_frame:
gnu.make_subdir(image_path)
cv2.imwrite(image_path, img_original_bgr)
else:
assert False, "Unknown input_type"
cur_frame +=1
if img_original_bgr is None or cur_frame > args.end_frame:
break
print("--------------------------------------")
# bbox detection
if load_bbox:
body_pose_list = None
raw_hand_bboxes = None
elif args.crop_type == 'hand_crop':
# hand already cropped, thererore, no need for detection
img_h, img_w = img_original_bgr.shape[:2]
body_pose_list = None
raw_hand_bboxes = None
hand_bbox_list = [ dict(right_hand = np.array([0, 0, img_w, img_h])) ]
else:
# Input images has other body part or hand not cropped.
# Use hand detection model & body detector for hand detection
assert args.crop_type == 'no_crop'
detect_output = bbox_detector.detect_hand_bbox(img_original_bgr.copy())
body_pose_list, body_bbox_list, hand_bbox_list, raw_hand_bboxes = detect_output
# save the obtained body & hand bbox to json file
if args.save_bbox_output:
demo_utils.save_info_to_json(args, image_path, body_bbox_list, hand_bbox_list)
if len(hand_bbox_list) < 1:
print(f"No hand deteced: {image_path}")
continue
# Hand Pose Regression
pred_output_list = hand_mocap.regress(
img_original_bgr, hand_bbox_list, add_margin=True)
assert len(hand_bbox_list) == len(body_bbox_list)
assert len(body_bbox_list) == len(pred_output_list)
# extract mesh for rendering (vertices in image space and faces) from pred_output_list
pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)
# visualize
res_img = visualizer.visualize(
img_original_bgr,
pred_mesh_list = pred_mesh_list,
hand_bbox_list = hand_bbox_list)
# show result in the screen
if not args.no_display:
res_img = res_img.astype(np.uint8)
ImShow(res_img)
# save the image (we can make an option here)
if args.out_dir is not None:
demo_utils.save_res_img(args.out_dir, image_path, res_img)
# save predictions to pkl
if args.save_pred_pkl:
demo_type = 'hand'
demo_utils.save_pred_to_pkl(
args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list)
print(f"Processed : {image_path}")
#save images as a video
if not args.no_video_out and input_type in ['video', 'webcam']:
demo_utils.gen_video_out(args.out_dir, args.seq_name)
# When everything done, release the capture
if input_type =='webcam' and input_data is not None:
input_data.release()
cv2.destroyAllWindows() | null |
180,909 | import os
import sys
import os.path as osp
import torch
from torchvision.transforms import Normalize
import numpy as np
import cv2
import argparse
import json
import pickle
from datetime import datetime
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
from bodymocap.body_bbox_detector import BodyPoseEstimator
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from mocap_utils.timer import Timer
import renderer.image_utils as imu
from renderer.viewer2D import ImShow
class Timer(object):
def __init__(self):
def tic(self):
def toc(self, average=True, bPrint=False,title="Time"):
def reset(self):
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
def run_body_mocap(args, body_bbox_detector, body_mocap, visualizer):
#Setup input data to handle different types of inputs
input_type, input_data = demo_utils.setup_input(args)
cur_frame = args.start_frame
video_frame = 0
timer = Timer()
while True:
timer.tic()
# load data
load_bbox = False
if input_type =='image_dir':
if cur_frame < len(input_data):
image_path = input_data[cur_frame]
img_original_bgr = cv2.imread(image_path)
else:
img_original_bgr = None
elif input_type == 'bbox_dir':
if cur_frame < len(input_data):
print("Use pre-computed bounding boxes")
image_path = input_data[cur_frame]['image_path']
hand_bbox_list = input_data[cur_frame]['hand_bbox_list']
body_bbox_list = input_data[cur_frame]['body_bbox_list']
img_original_bgr = cv2.imread(image_path)
load_bbox = True
else:
img_original_bgr = None
elif input_type == 'video':
_, img_original_bgr = input_data.read()
if video_frame < cur_frame:
video_frame += 1
continue
# save the obtained video frames
image_path = osp.join(args.out_dir, "frames", f"{cur_frame:05d}.jpg")
if img_original_bgr is not None:
video_frame += 1
if args.save_frame:
gnu.make_subdir(image_path)
cv2.imwrite(image_path, img_original_bgr)
elif input_type == 'webcam':
_, img_original_bgr = input_data.read()
if video_frame < cur_frame:
video_frame += 1
continue
# save the obtained video frames
image_path = osp.join(args.out_dir, "frames", f"scene_{cur_frame:05d}.jpg")
if img_original_bgr is not None:
video_frame += 1
if args.save_frame:
gnu.make_subdir(image_path)
cv2.imwrite(image_path, img_original_bgr)
else:
assert False, "Unknown input_type"
cur_frame +=1
if img_original_bgr is None or cur_frame > args.end_frame:
break
print("--------------------------------------")
if load_bbox:
body_pose_list = None
else:
body_pose_list, body_bbox_list = body_bbox_detector.detect_body_pose(
img_original_bgr)
hand_bbox_list = [None, ] * len(body_bbox_list)
# save the obtained body & hand bbox to json file
if args.save_bbox_output:
demo_utils.save_info_to_json(args, image_path, body_bbox_list, hand_bbox_list)
if len(body_bbox_list) < 1:
print(f"No body deteced: {image_path}")
continue
#Sort the bbox using bbox size
# (to make the order as consistent as possible without tracking)
bbox_size = [ (x[2] * x[3]) for x in body_bbox_list]
idx_big2small = np.argsort(bbox_size)[::-1]
body_bbox_list = [ body_bbox_list[i] for i in idx_big2small ]
if args.single_person and len(body_bbox_list)>0:
body_bbox_list = [body_bbox_list[0], ]
# Body Pose Regression
pred_output_list = body_mocap.regress(img_original_bgr, body_bbox_list)
assert len(body_bbox_list) == len(pred_output_list)
# extract mesh for rendering (vertices in image space and faces) from pred_output_list
pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)
# visualization
res_img = visualizer.visualize(
img_original_bgr,
pred_mesh_list = pred_mesh_list,
body_bbox_list = body_bbox_list)
# show result in the screen
if not args.no_display:
res_img = res_img.astype(np.uint8)
ImShow(res_img)
# save result image
if args.out_dir is not None:
demo_utils.save_res_img(args.out_dir, image_path, res_img)
# save predictions to pkl
if args.save_pred_pkl:
demo_type = 'body'
demo_utils.save_pred_to_pkl(
args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list)
timer.toc(bPrint=True,title="Time")
print(f"Processed : {image_path}")
#save images as a video
if not args.no_video_out and input_type in ['video', 'webcam']:
demo_utils.gen_video_out(args.out_dir, args.seq_name)
if input_type =='webcam' and input_data is not None:
input_data.release()
cv2.destroyAllWindows() | null |
180,910 | import os
import sys
import os.path as osp
import torch
from torchvision.transforms import Normalize
import numpy as np
import cv2
import argparse
import json
import pickle
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
from handmocap.hand_mocap_api import HandMocap
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from mocap_utils.timer import Timer
from datetime import datetime
from bodymocap.body_bbox_detector import BodyPoseEstimator
from handmocap.hand_bbox_detector import HandBboxDetector
from integration.copy_and_paste import integration_copy_paste
import renderer.image_utils as imu
from renderer.viewer2D import ImShow
def run_regress(
args, img_original_bgr,
body_bbox_list, hand_bbox_list, bbox_detector,
body_mocap, hand_mocap
):
cond1 = len(body_bbox_list) > 0 and len(hand_bbox_list) > 0
cond2 = not args.frankmocap_fast_mode
# use pre-computed bbox or use slow detection mode
if cond1 or cond2:
if not cond1 and cond2:
# run detection only when bbox is not available
body_pose_list, body_bbox_list, hand_bbox_list, _ = \
bbox_detector.detect_hand_bbox(img_original_bgr.copy())
else:
print("Use pre-computed bounding boxes")
assert len(body_bbox_list) == len(hand_bbox_list)
if len(body_bbox_list) < 1:
return list(), list(), list()
# sort the bbox using bbox size
# only keep on bbox if args.single_person is set
body_bbox_list, hand_bbox_list = __filter_bbox_list(
body_bbox_list, hand_bbox_list, args.single_person)
# hand & body pose regression
pred_hand_list = hand_mocap.regress(
img_original_bgr, hand_bbox_list, add_margin=True)
pred_body_list = body_mocap.regress(img_original_bgr, body_bbox_list)
assert len(hand_bbox_list) == len(pred_hand_list)
assert len(pred_hand_list) == len(pred_body_list)
else:
_, body_bbox_list = bbox_detector.detect_body_bbox(img_original_bgr.copy())
if len(body_bbox_list) < 1:
return list(), list(), list()
# sort the bbox using bbox size
# only keep on bbox if args.single_person is set
hand_bbox_list = [None, ] * len(body_bbox_list)
body_bbox_list, _ = __filter_bbox_list(
body_bbox_list, hand_bbox_list, args.single_person)
# body regression first
pred_body_list = body_mocap.regress(img_original_bgr, body_bbox_list)
assert len(body_bbox_list) == len(pred_body_list)
# get hand bbox from body
hand_bbox_list = body_mocap.get_hand_bboxes(pred_body_list, img_original_bgr.shape[:2])
assert len(pred_body_list) == len(hand_bbox_list)
# hand regression
pred_hand_list = hand_mocap.regress(
img_original_bgr, hand_bbox_list, add_margin=True)
assert len(hand_bbox_list) == len(pred_hand_list)
# integration by copy-and-paste
integral_output_list = integration_copy_paste(
pred_body_list, pred_hand_list, body_mocap.smpl, img_original_bgr.shape)
return body_bbox_list, hand_bbox_list, integral_output_list
def ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):
inputImg = __ValidateNumpyImg(inputImg)
if scale!=1.0:
inputImg = cv2.resize(inputImg, (inputImg.shape[0]*int(scale), inputImg.shape[1]*int(scale)))
if bConvRGB2BGR:
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_RGB2BGR)
cv2.imshow(name,inputImg)
global veryFirstImShow
if False:#veryFirstImShow:
print(">> Press any key to move on")
cv2.waitKey(0) #the initial one is always blank... why?
veryFirstImShow = 0
else:
cv2.waitKey(waitTime)
def run_frank_mocap(args, bbox_detector, body_mocap, hand_mocap, visualizer):
#Setup input data to handle different types of inputs
input_type, input_data = demo_utils.setup_input(args)
cur_frame = args.start_frame
video_frame = 0
while True:
# load data
load_bbox = False
if input_type =='image_dir':
if cur_frame < len(input_data):
image_path = input_data[cur_frame]
img_original_bgr = cv2.imread(image_path)
else:
img_original_bgr = None
elif input_type == 'bbox_dir':
if cur_frame < len(input_data):
image_path = input_data[cur_frame]['image_path']
hand_bbox_list = input_data[cur_frame]['hand_bbox_list']
body_bbox_list = input_data[cur_frame]['body_bbox_list']
img_original_bgr = cv2.imread(image_path)
load_bbox = True
else:
img_original_bgr = None
elif input_type == 'video':
_, img_original_bgr = input_data.read()
if video_frame < cur_frame:
video_frame += 1
continue
# save the obtained video frames
image_path = osp.join(args.out_dir, "frames", f"{cur_frame:05d}.jpg")
if img_original_bgr is not None:
video_frame += 1
if args.save_frame:
gnu.make_subdir(image_path)
cv2.imwrite(image_path, img_original_bgr)
elif input_type == 'webcam':
_, img_original_bgr = input_data.read()
if video_frame < cur_frame:
video_frame += 1
continue
# save the obtained video frames
image_path = osp.join(args.out_dir, "frames", f"scene_{cur_frame:05d}.jpg")
if img_original_bgr is not None:
video_frame += 1
if args.save_frame:
gnu.make_subdir(image_path)
cv2.imwrite(image_path, img_original_bgr)
else:
assert False, "Unknown input_type"
cur_frame +=1
if img_original_bgr is None or cur_frame > args.end_frame:
break
print("--------------------------------------")
# bbox detection
if not load_bbox:
body_bbox_list, hand_bbox_list = list(), list()
# regression (includes integration)
body_bbox_list, hand_bbox_list, pred_output_list = run_regress(
args, img_original_bgr,
body_bbox_list, hand_bbox_list, bbox_detector,
body_mocap, hand_mocap)
# save the obtained body & hand bbox to json file
if args.save_bbox_output:
demo_utils.save_info_to_json(args, image_path, body_bbox_list, hand_bbox_list)
if len(body_bbox_list) < 1:
print(f"No body deteced: {image_path}")
continue
pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)
# visualization
res_img = visualizer.visualize(
img_original_bgr,
pred_mesh_list = pred_mesh_list,
body_bbox_list = body_bbox_list,
hand_bbox_list = hand_bbox_list)
# show result in the screen
if not args.no_display:
res_img = res_img.astype(np.uint8)
ImShow(res_img)
# save result image
if args.out_dir is not None:
demo_utils.save_res_img(args.out_dir, image_path, res_img)
# save predictions to pkl
if args.save_pred_pkl:
demo_type = 'frank'
demo_utils.save_pred_to_pkl(
args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list)
print(f"Processed : {image_path}")
# save images as a video
if not args.no_video_out and input_type in ['video', 'webcam']:
demo_utils.gen_video_out(args.out_dir, args.seq_name)
if input_type =='webcam' and input_data is not None:
input_data.release()
cv2.destroyAllWindows() | null |
180,911 | import torch
import torch.nn as nn
from torch.nn import init
import functools
import numpy as np
from . import resnet
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.