id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
14,231 | from math import sqrt
from functools import partial, lru_cache
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def wn_linear(in_dim, out_dim):
return nn.utils.weight_norm(nn.Linear(in_dim, out_dim)) | null |
14,232 | from math import sqrt
from functools import partial, lru_cache
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def shift_down(input, size=1):
return F.pad(input, [0, 0, size, 0])[:, :, : input.shape[2], :] | null |
14,233 | from math import sqrt
from functools import partial, lru_cache
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def shift_right(input, size=1):
return F.pad(input, [size, 0, 0, 0])[:, :, :, : input.shape[3]] | null |
14,234 | from math import sqrt
from functools import partial, lru_cache
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def causal_mask(size):
shape = [size, size]
mask = np.triu(np.ones(shape), k=1).astype(np.uint8).T
start_mask = np.ones(size).astype(np.float32)
start_mask[0] = 0
return (
torch.from_numpy(mask).unsqueeze(0),
torch.from_numpy(start_mask).unsqueeze(1),
) | null |
14,235 | from math import cos, pi, floor, sin
from torch.optim import lr_scheduler
def anneal_linear(start, end, proportion):
return start + proportion * (end - start) | null |
14,236 | from math import cos, pi, floor, sin
from torch.optim import lr_scheduler
def anneal_cos(start, end, proportion):
cos_val = cos(pi * proportion) + 1
return end + (start - end) / 2 * cos_val | null |
14,237 | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_primary():
return get_rank() == 0 | null |
14,238 | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
LOCAL_PROCESS_GROUP = None
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
if LOCAL_PROCESS_GROUP is None:
raise ValueError("tensorfn.distributed.LOCAL_PROCESS_GROUP is None")
return dist.get_rank(group=LOCAL_PROCESS_GROUP) | null |
14,239 | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def all_reduce(tensor, op=dist.ReduceOp.SUM):
world_size = get_world_size()
if world_size == 1:
return tensor
dist.all_reduce(tensor, op=op)
return tensor | null |
14,240 | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def all_gather(data):
world_size = get_world_size()
if world_size == 1:
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([1]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list | null |
14,241 | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_dict(input_dict, average=True):
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
keys = []
values = []
for k in sorted(input_dict.keys()):
keys.append(k)
values.append(input_dict[k])
values = torch.stack(values, 0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
values /= world_size
reduced_dict = {k: v for k, v in zip(keys, values)}
return reduced_dict | null |
14,242 | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset) | null |
14,243 | import os
import torch
from torch import distributed as dist
from torch import multiprocessing as mp
import distributed as dist_fn
def find_free_port():
def distributed_worker(
local_rank, fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args
):
def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):
world_size = n_machine * n_gpu_per_machine
if world_size > 1:
if "OMP_NUM_THREADS" not in os.environ:
os.environ["OMP_NUM_THREADS"] = "1"
if dist_url == "auto":
if n_machine != 1:
raise ValueError('dist_url="auto" not supported in multi-machine jobs')
port = find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if n_machine > 1 and dist_url.startswith("file://"):
raise ValueError(
"file:// is not a reliable init method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
distributed_worker,
nprocs=n_gpu_per_machine,
args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args),
daemon=False,
)
else:
fn(*args) | null |
14,244 | import argparse
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset import LMDBDataset
from pixelsnail import PixelSNAIL
from scheduler import CycleScheduler
def train(args, epoch, loader, model, optimizer, scheduler, device):
loader = tqdm(loader)
criterion = nn.CrossEntropyLoss()
for i, (top, bottom, label) in enumerate(loader):
model.zero_grad()
top = top.to(device)
if args.hier == 'top':
target = top
out, _ = model(top)
elif args.hier == 'bottom':
bottom = bottom.to(device)
target = bottom
out, _ = model(bottom, condition=top)
loss = criterion(out, target)
loss.backward()
if scheduler is not None:
scheduler.step()
optimizer.step()
_, pred = out.max(1)
correct = (pred == target).float()
accuracy = correct.sum() / target.numel()
lr = optimizer.param_groups[0]['lr']
loader.set_description(
(
f'epoch: {epoch + 1}; loss: {loss.item():.5f}; '
f'acc: {accuracy:.5f}; lr: {lr:.5f}'
)
) | null |
14,245 | from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content | null |
14,246 | from setuptools import find_packages, setup
def get_version():
version_file = 'mmhuman3d/version.py'
with open(version_file, 'r', encoding='utf-8') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
14,247 | from setuptools import find_packages, setup
try:
import torch
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cmd_class = {'build_ext': BuildExtension}
except ModuleNotFoundError:
cmd_class = {}
print('Skip building ext ops due to the absence of torch.')
def get_extensions():
extensions = []
try:
if torch.cuda.is_available():
ext_ops = CUDAExtension(
'mmhuman3d.core.renderer.mpr_renderer.cuda.rasterizer', # noqa: E501
[
'mmhuman3d/core/renderer/mpr_renderer/cuda/rasterizer.cpp', # noqa: E501
'mmhuman3d/core/renderer/mpr_renderer/cuda/rasterizer_kernel.cu', # noqa: E501
])
extensions.append(ext_ops)
except Exception as e:
print(f'Skip building ext ops: {e}')
return extensions | null |
14,248 | from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
Here is the function:
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages | Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" |
14,249 | import os
import sys
import pytorch_sphinx_theme
version_file = '../mmhuman3d/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
14,250 | import argparse
import time
from collections import deque
from queue import Queue
from threading import Event, Lock, Thread
import cv2
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import inference_image_based_model, init_model
from mmhuman3d.core.renderer.mpr_renderer.smpl_realrender import \
VisualizerMeshSMPL
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
StopWatch,
convert_verts_to_cam_coord,
process_mmdet_results,
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--mesh_reg_config',
type=str,
default='configs/pare/hrnet_w32_conv_pare_coco.py',
help='Config file for mesh regression')
parser.add_argument(
'--mesh_reg_checkpoint',
type=str,
default='data/checkpoints/hrnet_w32_conv_pare_mosh.pth',
help='Checkpoint file for mesh regression')
parser.add_argument('--cam-id', type=str, default='0')
parser.add_argument(
'--det_config',
type=str,
default='demo/mmdetection_cfg/'
'ssdlite_mobilenetv2_scratch_600e_coco.py',
help='Config file for detection')
parser.add_argument(
'--det_checkpoint',
type=str,
default='https://download.openmmlab.com/mmdetection/v2.0/ssd/'
'ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_'
'scratch_600e_coco_20210629_110627-974d9307.pth',
help='Checkpoint file for detection')
parser.add_argument(
'--det_cat_id',
type=int,
default=1,
help='Category id for bounding box detection model. '
'Default: 1 for human')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--body_model_dir',
type=str,
default='data/body_models/smpl',
help='Body models file path')
parser.add_argument(
'--bbox_thr',
type=float,
default=0.6,
help='Bounding box score threshold')
parser.add_argument(
'--output',
type=str,
default=None,
help='Record the video into a file. This may reduce the frame rate')
parser.add_argument(
'--out_video_fps',
type=int,
default=20,
help='Set the FPS of the output video file.')
parser.add_argument(
'--input_video_fps',
type=int,
default=30,
help='The FPS of the input video file.')
parser.add_argument(
'--buffer_size',
type=int,
default=-1,
help='Frame buffer size. If set -1, the buffer size will be '
'automatically inferred from the display delay time. Default: -1')
parser.add_argument(
'--inference_fps',
type=int,
default=10,
help='Maximum inference FPS. This is to limit the resource consuming '
'especially when the detection and pose model are lightweight and '
'very fast. Default: 10.')
parser.add_argument(
'--display_delay',
type=int,
default=0,
help='Delay the output video in milliseconds. This can be used to '
'align the output video and inference results. The delay can be '
'disabled by setting a non-positive delay time. Default: 0')
parser.add_argument(
'--synchronous',
type=str,
default=True,
help='If True, the video I/O and inference will be temporally '
'aligned. Note that this will reduce the display FPS.')
return parser.parse_args() | null |
14,251 | import argparse
import time
from collections import deque
from queue import Queue
from threading import Event, Lock, Thread
import cv2
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import inference_image_based_model, init_model
from mmhuman3d.core.renderer.mpr_renderer.smpl_realrender import \
VisualizerMeshSMPL
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
StopWatch,
convert_verts_to_cam_coord,
process_mmdet_results,
)
def read_camera():
# init video reader
print('Thread "input" started')
cam_id = args.cam_id
if cam_id.isdigit():
cam_id = int(cam_id)
vid_cap = cv2.VideoCapture(cam_id)
if not vid_cap.isOpened():
print(f'Cannot open camera (ID={cam_id})')
exit()
while not event_exit.is_set():
# capture a camera frame
ret_val, frame = vid_cap.read()
if ret_val:
ts_input = time.time()
event_inference_done.clear()
with input_queue_mutex:
input_queue.append((ts_input, frame))
if args.synchronous:
event_inference_done.wait()
frame_buffer.put((ts_input, frame))
else:
# input ending signal
frame_buffer.put((None, None))
break
vid_cap.release() | null |
14,252 | import argparse
import time
from collections import deque
from queue import Queue
from threading import Event, Lock, Thread
import cv2
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import inference_image_based_model, init_model
from mmhuman3d.core.renderer.mpr_renderer.smpl_realrender import \
VisualizerMeshSMPL
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
StopWatch,
convert_verts_to_cam_coord,
process_mmdet_results,
)
class StopWatch:
r"""A helper class to measure FPS and detailed time consuming of each phase
in a video processing loop or similar scenarios.
Args:
window (int): The sliding window size to calculate the running average
of the time consuming.
Example:
>>> from mmpose.utils import StopWatch
>>> import time
>>> stop_watch = StopWatch(window=10)
>>> with stop_watch.timeit('total'):
>>> time.sleep(0.1)
>>> # 'timeit' support nested use
>>> with stop_watch.timeit('phase1'):
>>> time.sleep(0.1)
>>> with stop_watch.timeit('phase2'):
>>> time.sleep(0.2)
>>> time.sleep(0.2)
>>> report = stop_watch.report()
"""
def __init__(self, window=1):
self.window = window
self._record = defaultdict(partial(RunningAverage, window=self.window))
self._timer_stack = []
def timeit(self, timer_name='_FPS_'):
"""Timing a code snippet with an assigned name.
Args:
timer_name (str): The unique name of the interested code snippet to
handle multiple timers and generate reports. Note that '_FPS_'
is a special key that the measurement will be in `fps` instead
of `millisecond`. Also see `report` and `report_strings`.
Default: '_FPS_'.
Note:
This function should always be used in a `with` statement, as shown
in the example.
"""
self._timer_stack.append((timer_name, Timer()))
try:
yield
finally:
timer_name, timer = self._timer_stack.pop()
self._record[timer_name].update(timer.since_start())
def report(self, key=None):
"""Report timing information.
Returns:
dict: The key is the timer name and the value is the \
corresponding average time consuming.
"""
result = {
name: r.average() * 1000.
for name, r in self._record.items()
}
if '_FPS_' in result:
result['_FPS_'] = 1000. / result.pop('_FPS_')
if key is None:
return result
return result[key]
def report_strings(self):
"""Report timing information in texture strings.
Returns:
list(str): Each element is the information string of a timed \
event, in format of '{timer_name}: {time_in_ms}'. \
Specially, if timer_name is '_FPS_', the result will \
be converted to fps.
"""
result = self.report()
strings = []
if '_FPS_' in result:
strings.append(f'FPS: {result["_FPS_"]:>5.1f}')
strings += [f'{name}: {val:>3.0f}' for name, val in result.items()]
return strings
def reset(self):
self._record = defaultdict(list)
self._active_timer_stack = []
def inference_detection():
print('Thread "det" started')
stop_watch = StopWatch(window=10)
min_interval = 1.0 / args.inference_fps
_ts_last = None # timestamp when last inference was done
while True:
while len(input_queue) < 1:
time.sleep(0.001)
with input_queue_mutex:
ts_input, frame = input_queue.popleft()
# inference detection
with stop_watch.timeit('Det'):
mmdet_results = inference_detector(det_model, frame)
t_info = stop_watch.report_strings()
with det_result_queue_mutex:
det_result_queue.append((ts_input, frame, t_info, mmdet_results))
# limit the inference FPS
_ts = time.time()
if _ts_last is not None and _ts - _ts_last < min_interval:
time.sleep(min_interval - _ts + _ts_last)
_ts_last = time.time() | null |
14,253 | import argparse
import time
from collections import deque
from queue import Queue
from threading import Event, Lock, Thread
import cv2
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import inference_image_based_model, init_model
from mmhuman3d.core.renderer.mpr_renderer.smpl_realrender import \
VisualizerMeshSMPL
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
StopWatch,
convert_verts_to_cam_coord,
process_mmdet_results,
)
def process_mmdet_results(mmdet_results, cat_id=1, bbox_thr=None):
"""Process mmdet results, and return a list of bboxes.
Args:
mmdet_results (list|tuple): mmdet results.
bbox_thr (float): threshold for bounding boxes.
cat_id (int): category id (default: 1 for human)
Returns:
person_results (list): a list of detected bounding boxes
"""
if isinstance(mmdet_results, tuple):
det_results = mmdet_results[0]
else:
det_results = mmdet_results
bboxes = det_results[cat_id - 1]
person_results = []
bboxes = np.array(bboxes)
if bbox_thr is not None:
assert bboxes.shape[-1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
for bbox in bboxes:
person = {}
person['bbox'] = bbox
person_results.append(person)
return person_results
class StopWatch:
r"""A helper class to measure FPS and detailed time consuming of each phase
in a video processing loop or similar scenarios.
Args:
window (int): The sliding window size to calculate the running average
of the time consuming.
Example:
>>> from mmpose.utils import StopWatch
>>> import time
>>> stop_watch = StopWatch(window=10)
>>> with stop_watch.timeit('total'):
>>> time.sleep(0.1)
>>> # 'timeit' support nested use
>>> with stop_watch.timeit('phase1'):
>>> time.sleep(0.1)
>>> with stop_watch.timeit('phase2'):
>>> time.sleep(0.2)
>>> time.sleep(0.2)
>>> report = stop_watch.report()
"""
def __init__(self, window=1):
self.window = window
self._record = defaultdict(partial(RunningAverage, window=self.window))
self._timer_stack = []
def timeit(self, timer_name='_FPS_'):
"""Timing a code snippet with an assigned name.
Args:
timer_name (str): The unique name of the interested code snippet to
handle multiple timers and generate reports. Note that '_FPS_'
is a special key that the measurement will be in `fps` instead
of `millisecond`. Also see `report` and `report_strings`.
Default: '_FPS_'.
Note:
This function should always be used in a `with` statement, as shown
in the example.
"""
self._timer_stack.append((timer_name, Timer()))
try:
yield
finally:
timer_name, timer = self._timer_stack.pop()
self._record[timer_name].update(timer.since_start())
def report(self, key=None):
"""Report timing information.
Returns:
dict: The key is the timer name and the value is the \
corresponding average time consuming.
"""
result = {
name: r.average() * 1000.
for name, r in self._record.items()
}
if '_FPS_' in result:
result['_FPS_'] = 1000. / result.pop('_FPS_')
if key is None:
return result
return result[key]
def report_strings(self):
"""Report timing information in texture strings.
Returns:
list(str): Each element is the information string of a timed \
event, in format of '{timer_name}: {time_in_ms}'. \
Specially, if timer_name is '_FPS_', the result will \
be converted to fps.
"""
result = self.report()
strings = []
if '_FPS_' in result:
strings.append(f'FPS: {result["_FPS_"]:>5.1f}')
strings += [f'{name}: {val:>3.0f}' for name, val in result.items()]
return strings
def reset(self):
self._record = defaultdict(list)
self._active_timer_stack = []
def inference_mesh():
print('Thread "mesh" started')
stop_watch = StopWatch(window=10)
while True:
while len(det_result_queue) < 1:
time.sleep(0.001)
with det_result_queue_mutex:
ts_input, frame, t_info, mmdet_results = det_result_queue.popleft()
with stop_watch.timeit('Mesh'):
det_results = process_mmdet_results(
mmdet_results, cat_id=args.det_cat_id, bbox_thr=args.bbox_thr)
mesh_results = inference_image_based_model(
mesh_model,
frame,
det_results,
bbox_thr=args.bbox_thr,
format='xyxy')
t_info += stop_watch.report_strings()
with mesh_result_queue_mutex:
mesh_result_queue.append((ts_input, t_info, mesh_results))
event_inference_done.set() | null |
14,254 | import argparse
import time
from collections import deque
from queue import Queue
from threading import Event, Lock, Thread
import cv2
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import inference_image_based_model, init_model
from mmhuman3d.core.renderer.mpr_renderer.smpl_realrender import \
VisualizerMeshSMPL
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
StopWatch,
convert_verts_to_cam_coord,
process_mmdet_results,
)
def convert_verts_to_cam_coord(verts,
pred_cams,
bboxes_xy,
focal_length=5000.,
bbox_scale_factor=1.25,
bbox_format='xyxy'):
"""Convert vertices from the world coordinate to camera coordinate.
Args:
verts ([np.ndarray]): The vertices in the world coordinate.
The shape is (frame,num_person,6890,3), (frame,6890,3),
or (6890,3).
pred_cams ([np.ndarray]): Camera parameters estimated by HMR or SPIN.
The shape is (frame,num_person,3), (frame,3), or (3,).
bboxes_xy ([np.ndarray]): (frame, num_person, 4|5), (frame, 4|5),
or (4|5,)
focal_length ([float],optional): Defined same as your training.
bbox_scale_factor (float): scale factor for expanding the bbox.
bbox_format (Literal['xyxy', 'xywh'] ): 'xyxy' means the left-up point
and right-bottomn point of the bbox.
'xywh' means the left-up point and the width and height of the
bbox.
Returns:
np.ndarray: The vertices in the camera coordinate.
The shape is (frame,num_person,6890,3) or (frame,6890,3).
np.ndarray: The intrinsic parameters of the pred_cam.
The shape is (num_frame, 3, 3).
"""
K0 = get_default_hmr_intrinsic(
focal_length=focal_length, det_height=224, det_width=224)
K1 = convert_bbox_to_intrinsic(
bboxes_xy,
bbox_scale_factor=bbox_scale_factor,
bbox_format=bbox_format)
# K1K0(RX+T)-> K0(K0_inv K1K0)
Ks = np.linalg.inv(K0) @ K1 @ K0
# convert vertices from world to camera
cam_trans = np.concatenate([
pred_cams[..., [1]], pred_cams[..., [2]], 2 * focal_length /
(224 * pred_cams[..., [0]] + 1e-9)
], -1)
verts = verts + cam_trans[..., None, :]
if verts.ndim == 4:
verts = np.einsum('fnij,fnkj->fnki', Ks, verts)
elif verts.ndim == 3:
verts = np.einsum('fij,fkj->fki', Ks, verts)
elif verts.ndim == 2:
verts = np.einsum('fij,fkj->fki', Ks, verts[None])
return verts, K0
class StopWatch:
r"""A helper class to measure FPS and detailed time consuming of each phase
in a video processing loop or similar scenarios.
Args:
window (int): The sliding window size to calculate the running average
of the time consuming.
Example:
>>> from mmpose.utils import StopWatch
>>> import time
>>> stop_watch = StopWatch(window=10)
>>> with stop_watch.timeit('total'):
>>> time.sleep(0.1)
>>> # 'timeit' support nested use
>>> with stop_watch.timeit('phase1'):
>>> time.sleep(0.1)
>>> with stop_watch.timeit('phase2'):
>>> time.sleep(0.2)
>>> time.sleep(0.2)
>>> report = stop_watch.report()
"""
def __init__(self, window=1):
self.window = window
self._record = defaultdict(partial(RunningAverage, window=self.window))
self._timer_stack = []
def timeit(self, timer_name='_FPS_'):
"""Timing a code snippet with an assigned name.
Args:
timer_name (str): The unique name of the interested code snippet to
handle multiple timers and generate reports. Note that '_FPS_'
is a special key that the measurement will be in `fps` instead
of `millisecond`. Also see `report` and `report_strings`.
Default: '_FPS_'.
Note:
This function should always be used in a `with` statement, as shown
in the example.
"""
self._timer_stack.append((timer_name, Timer()))
try:
yield
finally:
timer_name, timer = self._timer_stack.pop()
self._record[timer_name].update(timer.since_start())
def report(self, key=None):
"""Report timing information.
Returns:
dict: The key is the timer name and the value is the \
corresponding average time consuming.
"""
result = {
name: r.average() * 1000.
for name, r in self._record.items()
}
if '_FPS_' in result:
result['_FPS_'] = 1000. / result.pop('_FPS_')
if key is None:
return result
return result[key]
def report_strings(self):
"""Report timing information in texture strings.
Returns:
list(str): Each element is the information string of a timed \
event, in format of '{timer_name}: {time_in_ms}'. \
Specially, if timer_name is '_FPS_', the result will \
be converted to fps.
"""
result = self.report()
strings = []
if '_FPS_' in result:
strings.append(f'FPS: {result["_FPS_"]:>5.1f}')
strings += [f'{name}: {val:>3.0f}' for name, val in result.items()]
return strings
def reset(self):
self._record = defaultdict(list)
self._active_timer_stack = []
def display():
print('Thread "display" started')
stop_watch = StopWatch(window=10)
# initialize result status
ts_inference = None # timestamp of the latest inference result
fps_inference = 0. # infenrece FPS
t_delay_inference = 0. # inference result time delay
mesh_results = None
t_info = [] # upstream time information (list[str])
# initialize visualization and output
text_color = (228, 183, 61) # text color to show time/system information
vid_out = None # video writer
# show instructions
print('Keyboard shortcuts: ')
print('"v": Toggle the visualization of bounding boxes and meshes.')
print('"Q", "q" or Esc: Exit.')
while True:
with stop_watch.timeit('_FPS_'):
# acquire a frame from buffer
ts_input, frame = frame_buffer.get()
# input ending signal
if ts_input is None:
break
img = frame
# get mesh estimation results
if len(mesh_result_queue) > 0:
with mesh_result_queue_mutex:
_result = mesh_result_queue.popleft()
_ts_input, t_info, mesh_results = _result
_ts = time.time()
if ts_inference is not None:
fps_inference = 1.0 / (_ts - ts_inference)
ts_inference = _ts
t_delay_inference = (_ts - _ts_input) * 1000
if mesh_results:
pred_cams = mesh_results[0]['camera']
verts = mesh_results[0]['vertices']
bboxes_xyxy = mesh_results[0]['bbox']
verts, _ = convert_verts_to_cam_coord(
verts, pred_cams, bboxes_xyxy, focal_length=5000.)
# show bounding boxes
mmcv.imshow_bboxes(
img,
bboxes_xyxy[None],
colors='green',
top_k=-1,
thickness=2,
show=False)
# visualize smpl
if isinstance(verts, np.ndarray):
verts = torch.tensor(verts).to(args.device).squeeze()
img = renderer(verts, img)
# delay control
if args.display_delay > 0:
t_sleep = args.display_delay * 0.001 - (time.time() - ts_input)
print(t_sleep)
if t_sleep > 0:
time.sleep(t_sleep)
t_delay = (time.time() - ts_input) * 1000
# show time information
t_info_display = stop_watch.report_strings() # display fps
t_info_display.append(f'Inference FPS: {fps_inference:>5.1f}')
t_info_display.append(f'Delay: {t_delay:>3.0f}')
t_info_display.append(
f'Inference Delay: {t_delay_inference:>3.0f}')
t_info_str = ' | '.join(t_info_display + t_info)
cv2.putText(img, t_info_str, (20, 20), cv2.FONT_HERSHEY_DUPLEX,
0.3, text_color, 1)
# collect system information
sys_info = [
f'RES: {img.shape[1]}x{img.shape[0]}',
f'Buffer: {frame_buffer.qsize()}/{frame_buffer.maxsize}'
]
if psutil_proc is not None:
sys_info += [
f'CPU: {psutil_proc.cpu_percent():.1f}%',
f'MEM: {psutil_proc.memory_percent():.1f}%'
]
sys_info_str = ' | '.join(sys_info)
cv2.putText(img, sys_info_str, (20, 40), cv2.FONT_HERSHEY_DUPLEX,
0.3, text_color, 1)
# save the output video frame
if args.output is not None:
if vid_out is None:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = args.out_video_fps
frame_size = (img.shape[1], img.shape[0])
vid_out = cv2.VideoWriter(args.output, fourcc, fps,
frame_size)
vid_out.write(img)
# display
cv2.imshow('mmhuman3d webcam demo', img)
keyboard_input = cv2.waitKey(1)
if keyboard_input in (27, ord('q'), ord('Q')):
break
cv2.destroyAllWindows()
if vid_out is not None:
vid_out.release()
event_exit.set() | null |
14,255 | import argparse
import copy
import os
import os.path as osp
import shutil
from pathlib import Path
import cv2
import mmcv
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from mmhuman3d.apis import init_model
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_vibe
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.data.datasets import build_dataset
from mmhuman3d.utils.demo_utils import (
convert_crop_cam_to_orig_img,
prepare_frames,
process_mmdet_results,
)
from mmhuman3d.utils.ffmpeg_utils import video_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def process_tracking_results(tracking_results_all_frames):
"""Process mmtracking results."""
tracklet = []
final_results = []
max_instance = 0
for frame_id, tracking_results in enumerate(tracking_results_all_frames):
num_person = len(tracking_results)
if num_person > max_instance:
max_instance = num_person
for result in tracking_results:
tracklet.append(frame_id)
final_results.append([result])
return tracklet, final_results, max_instance
def process_mmdet_results(mmdet_results, cat_id=1, bbox_thr=None):
"""Process mmdet results, and return a list of bboxes.
Args:
mmdet_results (list|tuple): mmdet results.
bbox_thr (float): threshold for bounding boxes.
cat_id (int): category id (default: 1 for human)
Returns:
person_results (list): a list of detected bounding boxes
"""
if isinstance(mmdet_results, tuple):
det_results = mmdet_results[0]
else:
det_results = mmdet_results
bboxes = det_results[cat_id - 1]
person_results = []
bboxes = np.array(bboxes)
if bbox_thr is not None:
assert bboxes.shape[-1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
for bbox in bboxes:
person = {}
person['bbox'] = bbox
person_results.append(person)
return person_results
def video_to_images(input_path: str,
output_folder: str,
resolution: Optional[Union[Tuple[int, int],
Tuple[float, float]]] = None,
img_format: str = '%06d.png',
start: int = 0,
end: Optional[int] = None,
disable_log: bool = False) -> None:
"""Convert a video to a folder of images.
Args:
input_path (str): video file path
output_folder (str): output folder to store the images
resolution (Optional[Tuple[int, int]], optional):
(height, width) of output. defaults to None.
img_format (str, optional): format of images to be read.
Defaults to '%06d.png'.
start (int, optional): start frame index. Inclusive.
If < 0, will be converted to frame_index range in [0, frame_num].
Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
If None, all frames from start till the last frame are included.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check the input path
FileNotFoundError: check the output path
Returns:
None
"""
check_input_path(
input_path,
allowed_suffix=['.mp4'],
tag='input video',
path_type='file')
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
info = vid_info_reader(input_path)
num_frames = int(info['nb_frames'])
start = (min(start, num_frames - 1) + num_frames) % num_frames
end = (min(end, num_frames - 1) +
num_frames) % num_frames if end is not None else num_frames
command = [
'ffmpeg', '-i', input_path, '-filter_complex',
f'[0]trim=start_frame={start}:end_frame={end}[v0]', '-map', '[v0]',
'-f', 'image2', '-v', 'error', '-start_number', '0', '-threads', '1',
f'{output_folder}/{img_format}'
]
if resolution:
height, width = resolution
command.insert(3, '-s')
command.insert(4, '%dx%d' % (width, height))
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
def prepare_data_with_mmpose_detection(args, frames_iter):
det_model = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
pose_model = init_pose_model(
args.pose_config, args.pose_checkpoint, device=args.device.lower())
dataset = pose_model.cfg.data['test']['type']
# optional
return_heatmap = False
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
next_id = 0
pose_results = []
all_results = []
for frame_id, img in tqdm(
enumerate(mmcv.track_iter_progress(frames_iter))):
pose_results_last = pose_results
mmdet_results = inference_detector(det_model, img)
# keep the person class bounding boxes.
person_results = process_mmdet_results(mmdet_results)
pose_results, returned_outputs = inference_top_down_pose_model(
pose_model,
img,
person_results,
bbox_thr=args.mmpose_bbox_thr,
format='xyxy',
dataset=dataset,
return_heatmap=return_heatmap,
outputs=output_layer_names)
# get track id for each person instance
pose_results, next_id = get_track_id(pose_results, pose_results_last,
next_id)
all_results.append(pose_results.copy())
joints2d = []
person_id_list = []
wb_kps = {
'joints2d_lhand': [],
'joints2d_rhand': [],
'joints2d_face': [],
}
frames_idx, final_results, max_instance = process_tracking_results(
all_results)
for results in final_results:
joints2d.append(results[0]['keypoints'])
person_id_list.append(results[0]['track_id'])
wb_kps['joints2d_lhand'].append(results[0]['keypoints'][91:112])
wb_kps['joints2d_rhand'].append(results[0]['keypoints'][112:133])
wb_kps['joints2d_face'].append(results[0]['keypoints'][23:91])
if Path(args.input_path).is_file():
image_folder = osp.join(args.output_path, 'images')
os.makedirs(image_folder, exist_ok=True)
video_to_images(args.input_path, image_folder)
elif Path(args.input_path).is_dir():
image_folder = args.input_path
return joints2d, frames_idx, wb_kps, image_folder, max_instance | null |
14,256 | import argparse
import copy
import os
import os.path as osp
import shutil
from pathlib import Path
import cv2
import mmcv
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from mmhuman3d.apis import init_model
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_vibe
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.data.datasets import build_dataset
from mmhuman3d.utils.demo_utils import (
convert_crop_cam_to_orig_img,
prepare_frames,
process_mmdet_results,
)
from mmhuman3d.utils.ffmpeg_utils import video_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def video_to_images(input_path: str,
output_folder: str,
resolution: Optional[Union[Tuple[int, int],
Tuple[float, float]]] = None,
img_format: str = '%06d.png',
start: int = 0,
end: Optional[int] = None,
disable_log: bool = False) -> None:
"""Convert a video to a folder of images.
Args:
input_path (str): video file path
output_folder (str): output folder to store the images
resolution (Optional[Tuple[int, int]], optional):
(height, width) of output. defaults to None.
img_format (str, optional): format of images to be read.
Defaults to '%06d.png'.
start (int, optional): start frame index. Inclusive.
If < 0, will be converted to frame_index range in [0, frame_num].
Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
If None, all frames from start till the last frame are included.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check the input path
FileNotFoundError: check the output path
Returns:
None
"""
check_input_path(
input_path,
allowed_suffix=['.mp4'],
tag='input video',
path_type='file')
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
info = vid_info_reader(input_path)
num_frames = int(info['nb_frames'])
start = (min(start, num_frames - 1) + num_frames) % num_frames
end = (min(end, num_frames - 1) +
num_frames) % num_frames if end is not None else num_frames
command = [
'ffmpeg', '-i', input_path, '-filter_complex',
f'[0]trim=start_frame={start}:end_frame={end}[v0]', '-map', '[v0]',
'-f', 'image2', '-v', 'error', '-start_number', '0', '-threads', '1',
f'{output_folder}/{img_format}'
]
if resolution:
height, width = resolution
command.insert(3, '-s')
command.insert(4, '%dx%d' % (width, height))
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
def prepare_data_with_pifpaf_detection(args, frames_iter):
max_instance = 0
num_frames = len(frames_iter)
# pifpaf person detection
pp_args = copy.deepcopy(args)
pp_args.force_complete_pose = True
ppdecoder.configure(pp_args)
ppnetwork.Factory.configure(pp_args)
ppnetwork.Factory.checkpoint = pp_args.openpifpaf_checkpoint
Predictor.configure(pp_args)
Stream.configure(pp_args)
Predictor.batch_size = 1
Predictor.loader_workers = 1
predictor = Predictor()
if Path(args.input_path).is_file():
image_folder = osp.join(args.output_path, 'images')
os.makedirs(image_folder, exist_ok=True)
video_to_images(args.input_path, image_folder)
capture = Stream(args.input_path, preprocess=predictor.preprocess)
capture = predictor.dataset(capture)
elif Path(args.input_path).is_dir():
image_folder = args.input_path
image_file_names = sorted([
osp.join(args.input_path, x) for x in os.listdir(args.input_path)
if x.endswith('.png') or x.endswith('.jpg')
])
capture = predictor.images(image_file_names)
tracking_results = {}
for preds, _, meta in tqdm(capture, total=num_frames):
num_person = 0
for pid, ann in enumerate(preds):
if ann.score > args.openpifpaf_threshold:
num_person += 1
frame_i = meta['frame_i'] - 1 if 'frame_i' in meta else meta[
'dataset_index']
file_name = meta[
'file_name'] if 'file_name' in meta else image_folder
person_id = file_name.split('/')[-1].split(
'.')[0] + '_f' + str(frame_i) + '_p' + str(pid)
det_wb_kps = ann.data
det_face_kps = det_wb_kps[23:91]
tracking_results[person_id] = {
'frames': [frame_i],
'joints2d': [det_wb_kps[:17]],
'joints2d_lhand': [det_wb_kps[91:112]],
'joints2d_rhand': [det_wb_kps[112:133]],
'joints2d_face':
[np.concatenate([det_face_kps[17:], det_face_kps[:17]])],
}
if num_person > max_instance:
max_instance = num_person
joints2d = []
frames = []
wb_kps = {
'joints2d_lhand': [],
'joints2d_rhand': [],
'joints2d_face': [],
}
person_id_list = list(tracking_results.keys())
for person_id in person_id_list:
joints2d.extend(tracking_results[person_id]['joints2d'])
wb_kps['joints2d_lhand'].extend(
tracking_results[person_id]['joints2d_lhand'])
wb_kps['joints2d_rhand'].extend(
tracking_results[person_id]['joints2d_rhand'])
wb_kps['joints2d_face'].extend(
tracking_results[person_id]['joints2d_face'])
frames.extend(tracking_results[person_id]['frames'])
return joints2d, frames, wb_kps, image_folder, max_instance | null |
14,257 | import argparse
import copy
import os
import os.path as osp
import shutil
from pathlib import Path
import cv2
import mmcv
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from mmhuman3d.apis import init_model
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_vibe
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.data.datasets import build_dataset
from mmhuman3d.utils.demo_utils import (
convert_crop_cam_to_orig_img,
prepare_frames,
process_mmdet_results,
)
from mmhuman3d.utils.ffmpeg_utils import video_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def init_openpifpaf(parser):
ppnetwork.Factory.cli(parser)
ppdecoder.cli(parser)
Predictor.cli(parser)
Stream.cli(parser) | null |
14,258 | import os
import os.path as osp
import shutil
from argparse import ArgumentParser
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import (
feature_extract,
inference_image_based_model,
init_model,
)
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_hmr
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.utils.demo_utils import (
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
)
from mmhuman3d.utils.ffmpeg_utils import array_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def get_detection_result(args, frames_iter, mesh_model, extractor):
person_det_model = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
frame_id_list = []
result_list = []
for i, frame in enumerate(mmcv.track_iter_progress(frames_iter)):
mmdet_results = inference_detector(person_det_model, frame)
# keep the person class bounding boxes.
results = process_mmdet_results(
mmdet_results, cat_id=args.det_cat_id, bbox_thr=args.bbox_thr)
# extract features from the input video or image sequences
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator' \
and extractor is not None:
results = feature_extract(
extractor, frame, results, args.bbox_thr, format='xyxy')
# drop the frame with no detected results
if results == []:
continue
# vis bboxes
if args.draw_bbox:
bboxes = [res['bbox'] for res in results]
bboxes = np.vstack(bboxes)
mmcv.imshow_bboxes(
frame, bboxes, top_k=-1, thickness=2, show=False)
frame_id_list.append(i)
result_list.append(results)
return frame_id_list, result_list
def visualize_smpl_hmr(cam_transl,
bbox=None,
kp2d=None,
focal_length=5000,
det_width=224,
det_height=224,
bbox_format='xyxy',
**kwargs) -> None:
"""Simplest way to visualize HMR or SPIN or Smplify pred smpl with origin
frames and predicted cameras."""
if kp2d is not None:
bbox = convert_kp2d_to_bbox(kp2d, bbox_format=bbox_format)
Ks = convert_bbox_to_intrinsic(bbox, bbox_format=bbox_format)
K = torch.Tensor(
get_default_hmr_intrinsic(
focal_length=focal_length,
det_height=det_height,
det_width=det_width))
func = partial(
render_smpl,
projection='perspective',
convention='opencv',
in_ndc=False,
K=None,
R=None,
orig_cam=None,
)
if isinstance(cam_transl, np.ndarray):
cam_transl = torch.Tensor(cam_transl)
T = torch.cat([
cam_transl[..., [1]], cam_transl[..., [2]], 2 * focal_length /
(det_width * cam_transl[..., [0]] + 1e-9)
], -1)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(Ks=Ks, K=K, T=T, **kwargs)
class HumanData(dict):
logger = None
SUPPORTED_KEYS = _HumanData_SUPPORTED_KEYS
WARNED_KEYS = []
def __new__(cls: _HumanData, *args: Any, **kwargs: Any) -> _HumanData:
"""New an instance of HumanData.
Args:
cls (HumanData): HumanData class.
Returns:
HumanData: An instance of HumanData.
"""
ret_human_data = super().__new__(cls, args, kwargs)
setattr(ret_human_data, '__data_len__', -1)
setattr(ret_human_data, '__key_strict__', False)
setattr(ret_human_data, '__keypoints_compressed__', False)
return ret_human_data
def set_logger(cls, logger: Union[logging.Logger, str, None] = None):
"""Set logger of HumanData class.
Args:
logger (logging.Logger | str | None, optional):
The way to print summary.
See `mmcv.utils.print_log()` for details.
Defaults to None.
"""
cls.logger = logger
def fromfile(cls, npz_path: str) -> _HumanData:
"""Construct a HumanData instance from an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
Returns:
HumanData:
A HumanData instance load from file.
"""
ret_human_data = cls()
ret_human_data.load(npz_path)
return ret_human_data
def new(cls,
source_dict: dict = None,
key_strict: bool = False) -> _HumanData:
"""Construct a HumanData instance from a dict.
Args:
source_dict (dict, optional):
A dict with items in HumanData fashion.
Defaults to None.
key_strict (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to False.
Returns:
HumanData:
A HumanData instance.
"""
if source_dict is None:
ret_human_data = cls()
else:
ret_human_data = cls(source_dict)
ret_human_data.set_key_strict(key_strict)
return ret_human_data
def get_key_strict(self) -> bool:
"""Get value of attribute key_strict.
Returns:
bool:
Whether to raise error when setting unsupported keys.
"""
return self.__key_strict__
def set_key_strict(self, value: bool):
"""Set value of attribute key_strict.
Args:
value (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to True.
"""
former__key_strict__ = self.__key_strict__
self.__key_strict__ = value
if former__key_strict__ is False and \
value is True:
self.pop_unsupported_items()
def check_keypoints_compressed(self) -> bool:
"""Check whether the keypoints are compressed.
Returns:
bool:
Whether the keypoints are compressed.
"""
return self.__keypoints_compressed__
def load(self, npz_path: str):
"""Load data from npz_path and update them to self.
Args:
npz_path (str):
Path to a dumped npz file.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
with np.load(npz_path, allow_pickle=True) as npz_file:
tmp_data_dict = dict(npz_file)
for key, value in list(tmp_data_dict.items()):
if isinstance(value, np.ndarray) and\
len(value.shape) == 0:
# value is not an ndarray before dump
value = value.item()
elif key in supported_keys and\
type(value) != supported_keys[key]['type']:
value = supported_keys[key]['type'](value)
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def dump(self, npz_path: str, overwrite: bool = True):
"""Dump keys and items to an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.npz'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(npz_path, ['.npz']):
raise ValueError('Not an npz file.')
if not overwrite:
if check_path_existence(npz_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
np.savez_compressed(npz_path, **dict_to_dump)
def get_sliced_cache(self, slice_size=10) -> List:
"""Slice the whole HumanData into pieces for HumanDataCacheWriter.
Args:
slice_size (int, optional):
The length of each unit in HumanData cache.
Defaults to 10.
Returns:
List:
Two dicts for HumanDataCacheWriter.
Init HumanDataCacheWriter by HumanDataCacheWriter(**Returns[0])
and set data by
human_data_cache_writer.update_sliced_dict(Returns[1]).
"""
keypoints_info = {}
non_sliced_data = {}
sliced_data = {}
slice_num = ceil(self.__data_len__ / slice_size)
for slice_index in range(slice_num):
sliced_data[str(slice_index)] = {}
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# no dim to slice
if dim is None:
if key.startswith('keypoints') and\
(key.endswith('_mask') or
key.endswith('_convention')):
keypoints_info[key] = self[key]
else:
non_sliced_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
non_sliced_sub_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
non_sliced_sub_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_range = slice(slice_start, slice_end)
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_range
)
if key not in sliced_data[str(slice_index)]:
sliced_data[str(slice_index)][key] = {}
sliced_data[str(slice_index)][key][sub_key] = \
sliced_sub_value
if len(non_sliced_sub_dict) > 0:
non_sliced_data[key] = non_sliced_sub_dict
else:
value = self.get_raw_value(key)
# slice as ndarray
if isinstance(value, np.ndarray):
slice_list = [
slice(None),
] * len(value.shape)
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_list[dim] = slice(slice_start, slice_end)
sliced_value = value[tuple(slice_list)]
sliced_data[str(slice_index)][key] = sliced_value
# slice as list/tuple
else:
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
sliced_value = value[slice(slice_start, slice_end)]
sliced_data[str(slice_index)][key] = sliced_value
writer_args_dict = {
'slice_size': slice_size,
'keypoints_info': keypoints_info,
'data_len': self.data_len,
'non_sliced_data': non_sliced_data,
'key_strict': self.get_key_strict()
}
return writer_args_dict, sliced_data
def to(self,
device: Optional[Union[torch.device, str]] = _CPU_DEVICE,
dtype: Optional[torch.dtype] = None,
non_blocking: Optional[bool] = False,
copy: Optional[bool] = False,
memory_format: Optional[torch.memory_format] = None) -> dict:
"""Convert values in numpy.ndarray type to torch.Tensor, and move
Tensors to the target device. All keys will exist in the returned dict.
Args:
device (Union[torch.device, str], optional):
A specified device. Defaults to CPU_DEVICE.
dtype (torch.dtype, optional):
The data type of the expected torch.Tensor.
If dtype is None, it is decided according to numpy.ndarry.
Defaults to None.
non_blocking (bool, optional):
When non_blocking, tries to convert asynchronously with
respect to the host if possible, e.g.,
converting a CPU Tensor with pinned memory to a CUDA Tensor.
Defaults to False.
copy (bool, optional):
When copy is set, a new Tensor is created even when
the Tensor already matches the desired conversion.
No matter what value copy is, Tensor constructed from numpy
will not share the same memory with the source numpy.ndarray.
Defaults to False.
memory_format (torch.memory_format, optional):
The desired memory format of returned Tensor.
Not supported by pytorch-cpu.
Defaults to None.
Returns:
dict:
A dict with all numpy.ndarray values converted into
torch.Tensor and all Tensors moved to the target device.
"""
ret_dict = {}
for key in self.keys():
raw_value = self.get_raw_value(key)
tensor_value = None
if isinstance(raw_value, np.ndarray):
tensor_value = torch.from_numpy(raw_value).clone()
elif isinstance(raw_value, torch.Tensor):
tensor_value = raw_value
if tensor_value is None:
ret_dict[key] = raw_value
else:
if memory_format is None:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy)
else:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy,
memory_format=memory_format)
return ret_dict
def __getitem__(self, key: _KT) -> _VT:
"""Get value defined by HumanData. This function will be called by
self[key]. In keypoints_compressed mode, if the key contains
'keypoints', an array with zero-padding at absent keypoint will be
returned. Call self.get_raw_value(k) to get value without padding.
Args:
key (_KT):
Key in HumanData.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
if self.__keypoints_compressed__:
mask_key = f'{key}_mask'
if key in self and \
isinstance(value, np.ndarray) and \
'keypoints' in key and \
mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
value = \
self.__class__.__add_zero_pad__(value, mask_array)
return value
def get_raw_value(self, key: _KT) -> _VT:
"""Get raw value from the dict. It acts the same as
dict.__getitem__(k).
Args:
key (_KT):
Key in dict.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
return value
def get_value_in_shape(self,
key: _KT,
shape: Union[list, tuple],
padding_constant: int = 0) -> np.ndarray:
"""Get value in a specific shape. For each dim, if the required shape
is smaller than current shape, ndarray will be sliced. Otherwise, it
will be padded with padding_constant at the end.
Args:
key (_KT):
Key in dict. The value of this key must be
an instance of numpy.ndarray.
shape (Union[list, tuple]):
Shape of the returned array. Its length
must be equal to value.ndim. Set -1 for
a dimension if you do not want to edit it.
padding_constant (int, optional):
The value to set the padded values for each axis.
Defaults to 0.
Raises:
ValueError:
A value in shape is neither positive integer nor -1.
Returns:
np.ndarray:
An array in required shape.
"""
value = self.get_raw_value(key)
assert isinstance(value, np.ndarray)
assert value.ndim == len(shape)
pad_width_list = []
slice_list = []
for dim_index in range(len(shape)):
if shape[dim_index] == -1:
# no pad or slice
pad_width_list.append((0, 0))
slice_list.append(slice(None))
elif shape[dim_index] > 0:
# valid shape value
wid = shape[dim_index] - value.shape[dim_index]
if wid > 0:
pad_width_list.append((0, wid))
else:
pad_width_list.append((0, 0))
slice_list.append(slice(0, shape[dim_index]))
else:
# invalid
raise ValueError
pad_value = np.pad(
value,
pad_width=pad_width_list,
mode='constant',
constant_values=padding_constant)
return pad_value[tuple(slice_list)]
def get_slice(self, stop: int):
"""Slice [0, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int):
"""Slice [start, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int, step: int):
"""Slice [start, stop, step] of all sliceable values."""
...
def get_slice(self,
arg_0: int,
arg_1: Union[int, Any] = None,
step: int = 1) -> _HumanData:
"""Slice all sliceable values along major_dim dimension.
Args:
arg_0 (int):
When arg_1 is None, arg_0 is stop and start=0.
When arg_1 is not None, arg_0 is start.
arg_1 (Union[int, Any], optional):
None or where to stop.
Defaults to None.
step (int, optional):
Length of step. Defaults to 1.
Returns:
HumanData:
A new HumanData instance with sliced values.
"""
ret_human_data = \
HumanData.new(key_strict=self.get_key_strict())
if arg_1 is None:
start = 0
stop = arg_0
else:
start = arg_0
stop = arg_1
slice_index = slice(start, stop, step)
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# keys not expected be sliced
if dim is None:
ret_human_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
sliced_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
sliced_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_index)
sliced_dict[sub_key] = sliced_sub_value
ret_human_data[key] = sliced_dict
else:
value = self[key]
sliced_value = \
HumanData.__get_sliced_result__(
value, dim, slice_index)
ret_human_data[key] = sliced_value
# check keypoints compressed
if self.check_keypoints_compressed():
ret_human_data.compress_keypoints_by_mask()
return ret_human_data
def __get_slice_dim__(self) -> dict:
"""For each key in this HumanData, get the dimension for slicing. 0 for
default, if no other value specified.
Returns:
dict:
Keys are self.keys().
Values indicate where to slice.
None for not expected to be sliced or
failed.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
ret_dict = {}
for key in self.keys():
# keys not expected be sliced
if key in supported_keys and \
'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is None:
ret_dict[key] = None
else:
value = self[key]
if isinstance(value, dict) and len(value) > 0:
ret_dict[key] = {}
for sub_key in value.keys():
try:
sub_value_len = len(value[sub_key])
if 'dim' in value:
ret_dict[key][sub_key] = value['dim']
elif sub_value_len != self.__data_len__:
ret_dict[key][sub_key] = None
else:
ret_dict[key][sub_key] = 0
except TypeError:
ret_dict[key][sub_key] = None
continue
# instance cannot be sliced without len method
try:
value_len = len(value)
except TypeError:
ret_dict[key] = None
continue
# slice on dim 0 by default
slice_dim = 0
if key in supported_keys and \
'dim' in supported_keys[key]:
slice_dim = \
supported_keys[key]['dim']
data_len = value_len if slice_dim == 0 \
else value.shape[slice_dim]
# dim not for slice
if data_len != self.__data_len__:
ret_dict[key] = None
continue
else:
ret_dict[key] = slice_dim
return ret_dict
def __setitem__(self, key: _KT, val: _VT) -> None:
"""Set self[key] to value. Only be called when using
human_data[key] = val. Methods like update won't call __setitem__.
In keypoints_compressed mode, if the key contains 'keypoints',
and f'{key}_mask' is in self.keys(), invalid zeros
will be removed before setting value.
Args:
key (_KT):
Key in HumanData.
Better be an element in HumanData.SUPPORTED_KEYS.
If not, an Error will be raised in key_strict mode.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
ValueError:
self.check_keypoints_compressed() is True and
mask of a keypoint item is missing.
"""
self.__check_key__(key)
self.__check_value__(key, val)
# if it can be compressed by mask
if self.__keypoints_compressed__:
class_logger = self.__class__.logger
if 'keypoints' in key and \
'_mask' in key:
msg = 'Mask cannot be modified ' +\
'in keypoints_compressed mode.'
print_log(msg=msg, logger=class_logger, level=logging.WARN)
return
elif isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
mask_key = f'{key}_mask'
if mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
val = \
self.__class__.__remove_zero_pad__(val, mask_array)
else:
msg = f'Mask for {key} has not been set.' +\
f' Please set {mask_key} before compression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise ValueError
dict.__setitem__(self, key, val)
def set_raw_value(self, key: _KT, val: _VT) -> None:
"""Set the raw value of self[key] to val after key check. It acts the
same as dict.__setitem__(self, key, val) if the key satisfied
constraints.
Args:
key (_KT):
Key in dict.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
"""
self.__check_key__(key)
self.__check_value__(key, val)
dict.__setitem__(self, key, val)
def pop_unsupported_items(self) -> None:
"""Find every item with a key not in HumanData.SUPPORTED_KEYS, and pop
it to save memory."""
for key in list(self.keys()):
if key not in self.__class__.SUPPORTED_KEYS:
self.pop(key)
def __check_key__(self, key: Any) -> _KeyCheck:
"""Check whether the key matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
Returns:
_KeyCheck:
PASS, WARN or ERROR.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
"""
ret_key_check = _KeyCheck.PASS
if self.get_key_strict():
if key not in self.__class__.SUPPORTED_KEYS:
ret_key_check = _KeyCheck.ERROR
else:
if key not in self.__class__.SUPPORTED_KEYS and \
key not in self.__class__.WARNED_KEYS:
# log warning message at the first time
ret_key_check = _KeyCheck.WARN
self.__class__.WARNED_KEYS.append(key)
if ret_key_check == _KeyCheck.ERROR:
raise KeyError(self.__class__.__get_key_error_msg__(key))
elif ret_key_check == _KeyCheck.WARN:
class_logger = self.__class__.logger
if class_logger == 'silent':
pass
else:
print_log(
msg=self.__class__.__get_key_warn_msg__(key),
logger=class_logger,
level=logging.WARN)
return ret_key_check
def __check_value__(self, key: Any, val: Any) -> bool:
"""Check whether the value matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
True for matched, ortherwise False.
Raises:
ValueError:
Value is supported but doesn't match definition.
"""
ret_bool = self.__check_value_type__(key, val) and\
self.__check_value_shape__(key, val) and\
self.__check_value_len__(key, val)
if not ret_bool:
raise ValueError(self.__class__.__get_value_error_msg__())
return ret_bool
def __check_value_type__(self, key: Any, val: Any) -> bool:
"""Check whether the type of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If type doesn't match, return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check type
if type(val) != supported_keys[key]['type']:
ret_bool = False
if not ret_bool:
expected_type = supported_keys[key]['type']
err_msg = 'Type check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
err_msg += f'expected type={expected_type}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def __check_value_shape__(self, key: Any, val: Any) -> bool:
"""Check whether the shape of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If expected shape is defined and doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check shape
if 'shape' in supported_keys[key]:
val_shape = val.shape
for shape_ind in range(len(supported_keys[key]['shape'])):
# length not match
if shape_ind >= len(val_shape):
ret_bool = False
break
expect_val = supported_keys[key]['shape'][shape_ind]
# value not match
if expect_val > 0 and \
expect_val != val_shape[shape_ind]:
ret_bool = False
break
if not ret_bool:
expected_shape = str(supported_keys[key]['shape'])
expected_shape = expected_shape.replace('-1', 'Any')
err_msg = 'Shape check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val.shape={val_shape}\n'
err_msg += f'expected shape={expected_shape}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def data_len(self) -> int:
"""Get the temporal length of this HumanData instance.
Returns:
int:
Number of frames related to this instance.
"""
return self.__data_len__
def data_len(self, value: int):
"""Set the temporal length of this HumanData instance.
Args:
value (int):
Number of frames related to this instance.
"""
self.__data_len__ = value
def __check_value_len__(self, key: Any, val: Any) -> bool:
"""Check whether the temporal length of val matches other values.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If temporal dim is defined and temporal length doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check temporal length
if 'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is not None:
val_slice_dim = supported_keys[key]['dim']
if supported_keys[key]['type'] == dict:
slice_key = supported_keys[key]['slice_key']
val_data_len = val[slice_key].shape[val_slice_dim]
else:
val_data_len = val.shape[val_slice_dim]
if self.data_len < 0:
# no data_len yet, assign a new one
self.data_len = val_data_len
else:
# check if val_data_len matches recorded data_len
if self.data_len != val_data_len:
ret_bool = False
if not ret_bool:
err_msg = 'Temporal check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val\'s data_len={val_data_len}\n'
err_msg += f'expected data_len={self.data_len}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def generate_mask_from_confidence(self, keys=None) -> None:
"""Generate mask from keypoints' confidence. Keypoints that have zero
confidence in all occurrences will have a zero mask. Note that the last
value of the keypoint is assumed to be confidence.
Args:
keys: None, str, or list of str.
None: all keys with `keypoint` in it will have mask
generated from their confidence.
str: key of the keypoint, the mask has name f'{key}_name'
list of str: a list of keys of the keypoints.
Generate mask for multiple keypoints.
Defaults to None.
Returns:
None
Raises:
KeyError:
A key is not not found
"""
if keys is None:
keys = []
for key in self.keys():
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
keys.append(key)
elif isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
for key in keys:
assert isinstance(key, str)
else:
raise TypeError(f'`Keys` must be None, str, or list of str, '
f'got {type(keys)}.')
update_dict = {}
for kpt_key in keys:
kpt_array = self.get_raw_value(kpt_key)
num_joints = kpt_array.shape[-2]
# if all conf of a joint are zero, this joint is masked
joint_conf = kpt_array[..., -1].reshape(-1, num_joints)
mask_array = (joint_conf > 0).astype(np.uint8).max(axis=0)
assert len(mask_array) == num_joints
# generate mask
update_dict[f'{kpt_key}_mask'] = mask_array
self.update(update_dict)
def compress_keypoints_by_mask(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be removed and f'{key}_mask' will be locked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is False
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
msg = f'Mask for {key} has not been set.' +\
f'Please set {mask_key} before compression.'
raise KeyError(msg)
compressed_dict = {}
for kpt_key, mask_key in key_pairs:
kpt_array = self.get_raw_value(kpt_key)
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = \
self.__class__.__remove_zero_pad__(kpt_array, mask_array)
compressed_dict[kpt_key] = compressed_kpt
# set value after all pairs are compressed
self.update(compressed_dict)
self.__keypoints_compressed__ = True
def decompress_keypoints(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be inserted to the right places and f'{key}_mask'
will be unlocked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is True
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
class_logger = self.__class__.logger
msg = f'Mask for {key} has not been found.' +\
f'Please remove {key} before decompression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise KeyError
decompressed_dict = {}
for kpt_key, mask_key in key_pairs:
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = self.get_raw_value(kpt_key)
kpt_array = \
self.__class__.__add_zero_pad__(compressed_kpt, mask_array)
decompressed_dict[kpt_key] = kpt_array
# set value after all pairs are decompressed
self.update(decompressed_dict)
self.__keypoints_compressed__ = False
def dump_by_pickle(self, pkl_path: str, overwrite: bool = True) -> None:
"""Dump keys and items to a pickle file. It's a secondary dump method,
when a HumanData instance is too large to be dumped by self.dump()
Args:
pkl_path (str):
Path to a dumped pickle file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.pkl'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(pkl_path, ['.pkl']):
raise ValueError('Not an pkl file.')
if not overwrite:
if check_path_existence(pkl_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
with open(pkl_path, 'wb') as f_writeb:
pickle.dump(
dict_to_dump, f_writeb, protocol=pickle.HIGHEST_PROTOCOL)
def load_by_pickle(self, pkl_path: str) -> None:
"""Load data from pkl_path and update them to self.
When a HumanData Instance was dumped by
self.dump_by_pickle(), use this to load.
Args:
npz_path (str):
Path to a dumped npz file.
"""
with open(pkl_path, 'rb') as f_readb:
tmp_data_dict = pickle.load(f_readb)
for key, value in list(tmp_data_dict.items()):
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def __set_default_values__(self) -> None:
"""For older versions of HumanData, call this method to apply missing
values (also attributes)."""
supported_keys = self.__class__.SUPPORTED_KEYS
if self.__data_len__ == -1:
for key in supported_keys:
if key in self and \
'dim' in supported_keys[key] and\
supported_keys[key]['dim'] is not None:
if 'slice_key' in supported_keys[key] and\
supported_keys[key]['type'] == dict:
sub_key = supported_keys[key]['slice_key']
slice_dim = supported_keys[key]['dim']
self.__data_len__ = \
self[key][sub_key].shape[slice_dim]
else:
slice_dim = supported_keys[key]['dim']
self.__data_len__ = self[key].shape[slice_dim]
break
for key in list(self.keys()):
convention_key = f'{key}_convention'
if key.startswith('keypoints') and \
not key.endswith('_mask') and \
not key.endswith('_convention') and \
convention_key not in self:
self[convention_key] = 'human_data'
def concatenate(cls, human_data_0: _HumanData,
human_data_1: _HumanData) -> _HumanData:
"""Concatenate two human_data. All keys will be kept it the returned
human_data. If either value from human_data_0 or human_data_1 matches
data_len from its HumanData, the two values will be concatenated as a
single value. If not, postfix will be added to the key to specify
source of the value.
Args:
human_data_0 (_HumanData)
human_data_1 (_HumanData)
Returns:
_HumanData:
A new human_data instance with all concatenated data.
"""
ret_human_data = cls.new(key_strict=False)
set_0 = set(human_data_0.keys())
set_1 = set(human_data_1.keys())
common_keys = set_0.intersection(set_1)
dim_dict_0 = human_data_0.__get_slice_dim__()
dim_dict_1 = human_data_1.__get_slice_dim__()
for key in common_keys:
value_0 = human_data_0[key]
value_1 = human_data_1[key]
# align type
value_0 = list(value_0) if isinstance(value_0, tuple)\
else value_0
value_1 = list(value_1) if isinstance(value_1, tuple)\
else value_1
assert type(value_0) == type(value_1)
# align convention
if key.startswith('keypoints') and\
key.endswith('_convention'):
assert value_0 == value_1
ret_human_data[key] = value_0
continue
# mask_0 and mask_1
elif key.startswith('keypoints') and\
key.endswith('_mask'):
new_mask = value_0 * value_1
ret_human_data[key] = new_mask
continue
# go through the sub dict
if isinstance(value_0, dict):
sub_dict = {}
for sub_key, sub_value_0 in value_0.items():
# only found in value_0
if sub_key not in value_1:
sub_dict[sub_key] = sub_value_0
# found in both values
else:
sub_value_1 = value_1[sub_key]
concat_sub_dict = cls.__concat_value__(
key=sub_key,
value_0=sub_value_0,
dim_0=dim_dict_0[key][sub_key],
value_1=sub_value_1,
dim_1=dim_dict_1[key][sub_key])
sub_dict.update(concat_sub_dict)
for sub_key, sub_value_1 in value_1.items():
if sub_key not in value_0:
sub_dict[sub_key] = sub_value_1
ret_human_data[key] = sub_dict
# try concat
else:
concat_dict = cls.__concat_value__(
key=key,
value_0=value_0,
dim_0=dim_dict_0[key],
value_1=value_1,
dim_1=dim_dict_1[key])
ret_human_data.update(concat_dict)
# check exclusive keys
for key, value in human_data_0.items():
if key not in common_keys:
# value not for concat and slice
if dim_dict_0[key] is None:
ret_human_data[key] = value
# value aligned with data_len of HumanData_0
else:
ret_human_data[f'{key}_0'] = value
for key, value in human_data_1.items():
if key not in common_keys:
# same as above
if dim_dict_1[key] is None:
ret_human_data[key] = value
else:
ret_human_data[f'{key}_1'] = value
return ret_human_data
def __concat_value__(cls, key: Any, value_0: Any, value_1: Any,
dim_0: Union[None, int], dim_1: Union[None,
int]) -> dict:
"""Concat two values from two different HumanData.
Args:
key (Any):
The common key of the two values.
value_0 (Any):
Value from 0.
value_1 (Any):
Value from 1.
dim_0 (Union[None, int]):
The dim for concat and slice. None for N/A.
dim_1 (Union[None, int]):
The dim for concat and slice. None for N/A.
Returns:
dict:
Dict for concatenated result.
"""
ret_dict = {}
if dim_0 is None or dim_1 is None:
ret_dict[f'{key}_0'] = value_0
ret_dict[f'{key}_1'] = value_1
elif isinstance(value_0, list):
ret_dict[key] = value_0 + value_1
# elif isinstance(value_0, np.ndarray):
else:
ret_dict[key] = np.concatenate((value_0, value_1), axis=dim_0)
return ret_dict
def __add_zero_pad__(cls, compressed_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Pad zeros to a compressed keypoints array.
Args:
compressed_array (np.ndarray):
A compressed keypoints array.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A keypoints array in full-size.
"""
assert mask_array.sum() == compressed_array.shape[1]
data_len, _, dim = compressed_array.shape
mask_len = mask_array.shape[0]
ret_value = np.zeros(
shape=[data_len, mask_len, dim], dtype=compressed_array.dtype)
valid_mask_index = np.where(mask_array == 1)[0]
ret_value[:, valid_mask_index, :] = compressed_array
return ret_value
def __remove_zero_pad__(cls, zero_pad_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Remove zero-padding from a full-size keypoints array.
Args:
zero_pad_array (np.ndarray):
A keypoints array in full-size.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A compressed keypoints array.
"""
assert mask_array.shape[0] == zero_pad_array.shape[1]
valid_mask_index = np.where(mask_array == 1)[0]
ret_value = np.take(zero_pad_array, valid_mask_index, axis=1)
return ret_value
def __get_key_warn_msg__(cls, key: Any) -> str:
"""Get the warning message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The warning message.
"""
class_name = cls.__name__
warn_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Ignore this if you know exactly' +\
' what you are doing.\n' +\
'Otherwise, Call self.set_key_strict(True)' +\
' to avoid wrong keys.\n'
return warn_message + suggestion_message
def __get_key_error_msg__(cls, key: Any) -> str:
"""Get the error message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The error message.
"""
class_name = cls.__name__
absent_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Call self.set_key_strict(False)' +\
' to allow unsupported keys.\n'
return absent_message + suggestion_message
def __get_value_error_msg__(cls) -> str:
"""Get the error message when a value fails the check.
Returns:
str:
The error message.
"""
error_message = \
'An supported value doesn\'t ' +\
'match definition.\n'
suggestion_message = \
'See error log for details.\n'
return error_message + suggestion_message
def __get_sliced_result__(
cls, input_data: Union[np.ndarray, list, tuple], slice_dim: int,
slice_range: slice) -> Union[np.ndarray, list, tuple]:
"""Slice input_data along slice_dim with slice_range.
Args:
input_data (Union[np.ndarray, list, tuple]):
Data to be sliced.
slice_dim (int):
Dimension to be sliced.
slice_range (slice):
An instance of class slice.
Returns:
Union[np.ndarray, list, tuple]:
A slice of input_data.
"""
if isinstance(input_data, np.ndarray):
slice_list = [
slice(None),
] * len(input_data.shape)
slice_list[slice_dim] = slice_range
sliced_data = input_data[tuple(slice_list)]
else:
sliced_data = \
input_data[slice_range]
return sliced_data
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol','smoothnet',
'smoothnet_windowsize8','smoothnet_windowsize16',
'smoothnet_windowsize32','smoothnet_windowsize64'].
Defaults to 'savgol'. 'smoothnet' is default with windowsize=8.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if smooth_type == 'smoothnet':
smooth_type = 'smoothnet_windowsize8'
assert smooth_type in [
'oneeuro', 'gaus1d', 'savgol', 'smoothnet_windowsize8',
'smoothnet_windowsize16', 'smoothnet_windowsize32',
'smoothnet_windowsize64'
]
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def array_to_images(
image_array: np.ndarray,
output_folder: str,
img_format: str = '%06d.png',
resolution: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None,
disable_log: bool = False,
) -> None:
"""Convert an array to images directly.
Args:
image_array (np.ndarray): shape should be (f * h * w * 3).
output_folder (str): output folder for the images.
img_format (str, optional): format of the images.
Defaults to '%06d.png'.
resolution (Optional[Union[Tuple[int, int], Tuple[float, float]]],
optional): resolution(height, width) of output.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check output folder.
TypeError: check input array.
Returns:
None
"""
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
if not isinstance(image_array, np.ndarray):
raise TypeError('Input should be np.ndarray.')
assert image_array.ndim == 4
assert image_array.shape[-1] == 3
if resolution:
height, width = resolution
else:
height, width = image_array.shape[1], image_array.shape[2]
command = [
'ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f',
'rawvideo',
'-s',
f'{int(width)}x{int(height)}', # size of one frame
'-pix_fmt',
'bgr24', # bgr24 for matching OpenCV
'-loglevel',
'error',
'-threads',
'4',
'-i',
'-', # The input comes from a pipe
'-f',
'image2',
'-start_number',
'0',
os.path.join(output_folder, img_format),
]
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=10**8,
close_fds=True)
if process.stdin is None or process.stderr is None:
raise BrokenPipeError('No buffer received.')
index = 0
while True:
if index >= image_array.shape[0]:
break
process.stdin.write(image_array[index].tobytes())
index += 1
process.stdin.close()
process.stderr.close()
process.wait()
def rotmat_to_aa(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to axis angles.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_quaternion, quaternion_to_axis_angle])
return t(matrix)
The provided code snippet includes necessary dependencies for implementing the `single_person_with_mmdet` function. Write a Python function `def single_person_with_mmdet(args, frames_iter)` to solve the following problem:
Estimate smplx parameters from single-person images with mmdetection Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames
Here is the function:
def single_person_with_mmdet(args, frames_iter):
"""Estimate smplx parameters from single-person
images with mmdetection
Args:
args (object): object of argparse.Namespace.
frames_iter (np.ndarray,): prepared frames
"""
mesh_model, extractor = init_model(
args.mesh_reg_config,
args.mesh_reg_checkpoint,
device=args.device.lower())
smplx_results = dict(
global_orient=[],
body_pose=[],
betas=[],
left_hand_pose=[],
right_hand_pose=[],
jaw_pose=[],
expression=[])
pred_cams, bboxes_xyxy = [], []
frame_id_list, result_list = get_detection_result(args, frames_iter,
mesh_model, extractor)
frame_num = len(frame_id_list)
for i, result in enumerate(mmcv.track_iter_progress(result_list)):
frame_id = frame_id_list[i]
if mesh_model.cfg.model.type == 'SMPLXImageBodyModelEstimator':
mesh_results = inference_image_based_model(
mesh_model,
frames_iter[frame_id],
result,
bbox_thr=args.bbox_thr,
format='xyxy')
else:
raise Exception(
f'{mesh_model.cfg.model.type} is not supported yet')
for key in smplx_results:
smplx_results[key].append(
mesh_results[0]['param'][key].cpu().numpy())
pred_cams.append(mesh_results[0]['camera'])
bboxes_xyxy.append(mesh_results[0]['bbox'])
for key in smplx_results:
smplx_results[key] = np.array(smplx_results[key])
pred_cams = np.array(pred_cams)
bboxes_xyxy = np.array(bboxes_xyxy)
# release GPU memory
del mesh_model
del extractor
torch.cuda.empty_cache()
# smooth
if args.smooth_type is not None:
for key in smplx_results:
if key not in ['betas', 'expression']:
dim = smplx_results[key].shape[1]
smplx_results[key] = smooth_process(
smplx_results[key].reshape(frame_num, -1, dim, 9),
smooth_type=args.smooth_type).reshape(
frame_num, dim, 3, 3)
pred_cams = smooth_process(
pred_cams[:, np.newaxis],
smooth_type=args.smooth_type).reshape(frame_num, 3)
if smplx_results['body_pose'].shape[1:] == (21, 3, 3):
for key in smplx_results:
if key not in ['betas', 'expression']:
smplx_results[key] = rotmat_to_aa(smplx_results[key])
else:
raise Exception('Wrong shape of `body_pose`')
fullpose = np.concatenate(
(
smplx_results['global_orient'].reshape(frame_num, 1, 3),
smplx_results['body_pose'].reshape(frame_num, 21, 3),
smplx_results['jaw_pose'].reshape(frame_num, 1, 3),
# Use zero for leye_pose and reye_pose
np.zeros((frame_num, 2, 3), dtype=smplx_results['jaw_pose'].dtype),
smplx_results['left_hand_pose'].reshape(frame_num, 15, 3),
smplx_results['right_hand_pose'].reshape(frame_num, 15, 3),
),
axis=1)
if args.output is not None:
os.makedirs(args.output, exist_ok=True)
human_data = HumanData()
smplx = {}
smplx['fullpose'] = fullpose
smplx['betas'] = smplx_results['betas']
human_data['smplx'] = smplx
human_data['pred_cams'] = pred_cams
human_data.dump(osp.join(args.output, 'inference_result.npz'))
if args.show_path is not None:
frames_folder = osp.join(args.show_path, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list], output_folder=frames_folder)
# create body model
body_model_config = dict(
type='smplx',
num_betas=10,
use_face_contour=True,
use_pca=False,
flat_hand_mean=True,
model_path=args.body_model_dir,
keypoint_src='smplx',
keypoint_dst='smplx',
)
visualize_smpl_hmr(
poses=fullpose.reshape(-1, 1, 165),
betas=smplx_results['betas'],
cam_transl=pred_cams,
bbox=bboxes_xyxy,
output_path=os.path.join(args.show_path, 'smplx.mp4'),
render_choice=args.render_choice,
resolution=frames_iter[0].shape[:2],
origin_frames=frames_folder,
body_model_config=body_model_config,
overwrite=True)
shutil.rmtree(frames_folder) | Estimate smplx parameters from single-person images with mmdetection Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames |
14,259 | import os
import os.path as osp
import shutil
from argparse import ArgumentParser
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import (
feature_extract,
inference_image_based_model,
init_model,
)
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_hmr
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.utils.demo_utils import (
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
)
from mmhuman3d.utils.ffmpeg_utils import array_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def get_tracking_result(args, frames_iter, mesh_model, extractor):
tracking_model = init_tracking_model(
args.tracking_config, None, device=args.device.lower())
max_track_id = 0
max_instance = 0
result_list = []
frame_id_list = []
for i, frame in enumerate(mmcv.track_iter_progress(frames_iter)):
mmtracking_results = inference_mot(tracking_model, frame, frame_id=i)
# keep the person class bounding boxes.
result, max_track_id, instance_num = \
process_mmtracking_results(
mmtracking_results,
max_track_id=max_track_id,
bbox_thr=args.bbox_thr)
# extract features from the input video or image sequences
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator' \
and extractor is not None:
result = feature_extract(
extractor, frame, result, args.bbox_thr, format='xyxy')
# drop the frame with no detected results
if result == []:
continue
# update max_instance
if instance_num > max_instance:
max_instance = instance_num
# vis bboxes
if args.draw_bbox:
bboxes = [res['bbox'] for res in result]
bboxes = np.vstack(bboxes)
mmcv.imshow_bboxes(
frame, bboxes, top_k=-1, thickness=2, show=False)
result_list.append(result)
frame_id_list.append(i)
return max_track_id, max_instance, frame_id_list, result_list
def visualize_smpl_hmr(cam_transl,
bbox=None,
kp2d=None,
focal_length=5000,
det_width=224,
det_height=224,
bbox_format='xyxy',
**kwargs) -> None:
"""Simplest way to visualize HMR or SPIN or Smplify pred smpl with origin
frames and predicted cameras."""
if kp2d is not None:
bbox = convert_kp2d_to_bbox(kp2d, bbox_format=bbox_format)
Ks = convert_bbox_to_intrinsic(bbox, bbox_format=bbox_format)
K = torch.Tensor(
get_default_hmr_intrinsic(
focal_length=focal_length,
det_height=det_height,
det_width=det_width))
func = partial(
render_smpl,
projection='perspective',
convention='opencv',
in_ndc=False,
K=None,
R=None,
orig_cam=None,
)
if isinstance(cam_transl, np.ndarray):
cam_transl = torch.Tensor(cam_transl)
T = torch.cat([
cam_transl[..., [1]], cam_transl[..., [2]], 2 * focal_length /
(det_width * cam_transl[..., [0]] + 1e-9)
], -1)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(Ks=Ks, K=K, T=T, **kwargs)
class HumanData(dict):
logger = None
SUPPORTED_KEYS = _HumanData_SUPPORTED_KEYS
WARNED_KEYS = []
def __new__(cls: _HumanData, *args: Any, **kwargs: Any) -> _HumanData:
"""New an instance of HumanData.
Args:
cls (HumanData): HumanData class.
Returns:
HumanData: An instance of HumanData.
"""
ret_human_data = super().__new__(cls, args, kwargs)
setattr(ret_human_data, '__data_len__', -1)
setattr(ret_human_data, '__key_strict__', False)
setattr(ret_human_data, '__keypoints_compressed__', False)
return ret_human_data
def set_logger(cls, logger: Union[logging.Logger, str, None] = None):
"""Set logger of HumanData class.
Args:
logger (logging.Logger | str | None, optional):
The way to print summary.
See `mmcv.utils.print_log()` for details.
Defaults to None.
"""
cls.logger = logger
def fromfile(cls, npz_path: str) -> _HumanData:
"""Construct a HumanData instance from an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
Returns:
HumanData:
A HumanData instance load from file.
"""
ret_human_data = cls()
ret_human_data.load(npz_path)
return ret_human_data
def new(cls,
source_dict: dict = None,
key_strict: bool = False) -> _HumanData:
"""Construct a HumanData instance from a dict.
Args:
source_dict (dict, optional):
A dict with items in HumanData fashion.
Defaults to None.
key_strict (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to False.
Returns:
HumanData:
A HumanData instance.
"""
if source_dict is None:
ret_human_data = cls()
else:
ret_human_data = cls(source_dict)
ret_human_data.set_key_strict(key_strict)
return ret_human_data
def get_key_strict(self) -> bool:
"""Get value of attribute key_strict.
Returns:
bool:
Whether to raise error when setting unsupported keys.
"""
return self.__key_strict__
def set_key_strict(self, value: bool):
"""Set value of attribute key_strict.
Args:
value (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to True.
"""
former__key_strict__ = self.__key_strict__
self.__key_strict__ = value
if former__key_strict__ is False and \
value is True:
self.pop_unsupported_items()
def check_keypoints_compressed(self) -> bool:
"""Check whether the keypoints are compressed.
Returns:
bool:
Whether the keypoints are compressed.
"""
return self.__keypoints_compressed__
def load(self, npz_path: str):
"""Load data from npz_path and update them to self.
Args:
npz_path (str):
Path to a dumped npz file.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
with np.load(npz_path, allow_pickle=True) as npz_file:
tmp_data_dict = dict(npz_file)
for key, value in list(tmp_data_dict.items()):
if isinstance(value, np.ndarray) and\
len(value.shape) == 0:
# value is not an ndarray before dump
value = value.item()
elif key in supported_keys and\
type(value) != supported_keys[key]['type']:
value = supported_keys[key]['type'](value)
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def dump(self, npz_path: str, overwrite: bool = True):
"""Dump keys and items to an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.npz'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(npz_path, ['.npz']):
raise ValueError('Not an npz file.')
if not overwrite:
if check_path_existence(npz_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
np.savez_compressed(npz_path, **dict_to_dump)
def get_sliced_cache(self, slice_size=10) -> List:
"""Slice the whole HumanData into pieces for HumanDataCacheWriter.
Args:
slice_size (int, optional):
The length of each unit in HumanData cache.
Defaults to 10.
Returns:
List:
Two dicts for HumanDataCacheWriter.
Init HumanDataCacheWriter by HumanDataCacheWriter(**Returns[0])
and set data by
human_data_cache_writer.update_sliced_dict(Returns[1]).
"""
keypoints_info = {}
non_sliced_data = {}
sliced_data = {}
slice_num = ceil(self.__data_len__ / slice_size)
for slice_index in range(slice_num):
sliced_data[str(slice_index)] = {}
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# no dim to slice
if dim is None:
if key.startswith('keypoints') and\
(key.endswith('_mask') or
key.endswith('_convention')):
keypoints_info[key] = self[key]
else:
non_sliced_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
non_sliced_sub_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
non_sliced_sub_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_range = slice(slice_start, slice_end)
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_range
)
if key not in sliced_data[str(slice_index)]:
sliced_data[str(slice_index)][key] = {}
sliced_data[str(slice_index)][key][sub_key] = \
sliced_sub_value
if len(non_sliced_sub_dict) > 0:
non_sliced_data[key] = non_sliced_sub_dict
else:
value = self.get_raw_value(key)
# slice as ndarray
if isinstance(value, np.ndarray):
slice_list = [
slice(None),
] * len(value.shape)
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_list[dim] = slice(slice_start, slice_end)
sliced_value = value[tuple(slice_list)]
sliced_data[str(slice_index)][key] = sliced_value
# slice as list/tuple
else:
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
sliced_value = value[slice(slice_start, slice_end)]
sliced_data[str(slice_index)][key] = sliced_value
writer_args_dict = {
'slice_size': slice_size,
'keypoints_info': keypoints_info,
'data_len': self.data_len,
'non_sliced_data': non_sliced_data,
'key_strict': self.get_key_strict()
}
return writer_args_dict, sliced_data
def to(self,
device: Optional[Union[torch.device, str]] = _CPU_DEVICE,
dtype: Optional[torch.dtype] = None,
non_blocking: Optional[bool] = False,
copy: Optional[bool] = False,
memory_format: Optional[torch.memory_format] = None) -> dict:
"""Convert values in numpy.ndarray type to torch.Tensor, and move
Tensors to the target device. All keys will exist in the returned dict.
Args:
device (Union[torch.device, str], optional):
A specified device. Defaults to CPU_DEVICE.
dtype (torch.dtype, optional):
The data type of the expected torch.Tensor.
If dtype is None, it is decided according to numpy.ndarry.
Defaults to None.
non_blocking (bool, optional):
When non_blocking, tries to convert asynchronously with
respect to the host if possible, e.g.,
converting a CPU Tensor with pinned memory to a CUDA Tensor.
Defaults to False.
copy (bool, optional):
When copy is set, a new Tensor is created even when
the Tensor already matches the desired conversion.
No matter what value copy is, Tensor constructed from numpy
will not share the same memory with the source numpy.ndarray.
Defaults to False.
memory_format (torch.memory_format, optional):
The desired memory format of returned Tensor.
Not supported by pytorch-cpu.
Defaults to None.
Returns:
dict:
A dict with all numpy.ndarray values converted into
torch.Tensor and all Tensors moved to the target device.
"""
ret_dict = {}
for key in self.keys():
raw_value = self.get_raw_value(key)
tensor_value = None
if isinstance(raw_value, np.ndarray):
tensor_value = torch.from_numpy(raw_value).clone()
elif isinstance(raw_value, torch.Tensor):
tensor_value = raw_value
if tensor_value is None:
ret_dict[key] = raw_value
else:
if memory_format is None:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy)
else:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy,
memory_format=memory_format)
return ret_dict
def __getitem__(self, key: _KT) -> _VT:
"""Get value defined by HumanData. This function will be called by
self[key]. In keypoints_compressed mode, if the key contains
'keypoints', an array with zero-padding at absent keypoint will be
returned. Call self.get_raw_value(k) to get value without padding.
Args:
key (_KT):
Key in HumanData.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
if self.__keypoints_compressed__:
mask_key = f'{key}_mask'
if key in self and \
isinstance(value, np.ndarray) and \
'keypoints' in key and \
mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
value = \
self.__class__.__add_zero_pad__(value, mask_array)
return value
def get_raw_value(self, key: _KT) -> _VT:
"""Get raw value from the dict. It acts the same as
dict.__getitem__(k).
Args:
key (_KT):
Key in dict.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
return value
def get_value_in_shape(self,
key: _KT,
shape: Union[list, tuple],
padding_constant: int = 0) -> np.ndarray:
"""Get value in a specific shape. For each dim, if the required shape
is smaller than current shape, ndarray will be sliced. Otherwise, it
will be padded with padding_constant at the end.
Args:
key (_KT):
Key in dict. The value of this key must be
an instance of numpy.ndarray.
shape (Union[list, tuple]):
Shape of the returned array. Its length
must be equal to value.ndim. Set -1 for
a dimension if you do not want to edit it.
padding_constant (int, optional):
The value to set the padded values for each axis.
Defaults to 0.
Raises:
ValueError:
A value in shape is neither positive integer nor -1.
Returns:
np.ndarray:
An array in required shape.
"""
value = self.get_raw_value(key)
assert isinstance(value, np.ndarray)
assert value.ndim == len(shape)
pad_width_list = []
slice_list = []
for dim_index in range(len(shape)):
if shape[dim_index] == -1:
# no pad or slice
pad_width_list.append((0, 0))
slice_list.append(slice(None))
elif shape[dim_index] > 0:
# valid shape value
wid = shape[dim_index] - value.shape[dim_index]
if wid > 0:
pad_width_list.append((0, wid))
else:
pad_width_list.append((0, 0))
slice_list.append(slice(0, shape[dim_index]))
else:
# invalid
raise ValueError
pad_value = np.pad(
value,
pad_width=pad_width_list,
mode='constant',
constant_values=padding_constant)
return pad_value[tuple(slice_list)]
def get_slice(self, stop: int):
"""Slice [0, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int):
"""Slice [start, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int, step: int):
"""Slice [start, stop, step] of all sliceable values."""
...
def get_slice(self,
arg_0: int,
arg_1: Union[int, Any] = None,
step: int = 1) -> _HumanData:
"""Slice all sliceable values along major_dim dimension.
Args:
arg_0 (int):
When arg_1 is None, arg_0 is stop and start=0.
When arg_1 is not None, arg_0 is start.
arg_1 (Union[int, Any], optional):
None or where to stop.
Defaults to None.
step (int, optional):
Length of step. Defaults to 1.
Returns:
HumanData:
A new HumanData instance with sliced values.
"""
ret_human_data = \
HumanData.new(key_strict=self.get_key_strict())
if arg_1 is None:
start = 0
stop = arg_0
else:
start = arg_0
stop = arg_1
slice_index = slice(start, stop, step)
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# keys not expected be sliced
if dim is None:
ret_human_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
sliced_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
sliced_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_index)
sliced_dict[sub_key] = sliced_sub_value
ret_human_data[key] = sliced_dict
else:
value = self[key]
sliced_value = \
HumanData.__get_sliced_result__(
value, dim, slice_index)
ret_human_data[key] = sliced_value
# check keypoints compressed
if self.check_keypoints_compressed():
ret_human_data.compress_keypoints_by_mask()
return ret_human_data
def __get_slice_dim__(self) -> dict:
"""For each key in this HumanData, get the dimension for slicing. 0 for
default, if no other value specified.
Returns:
dict:
Keys are self.keys().
Values indicate where to slice.
None for not expected to be sliced or
failed.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
ret_dict = {}
for key in self.keys():
# keys not expected be sliced
if key in supported_keys and \
'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is None:
ret_dict[key] = None
else:
value = self[key]
if isinstance(value, dict) and len(value) > 0:
ret_dict[key] = {}
for sub_key in value.keys():
try:
sub_value_len = len(value[sub_key])
if 'dim' in value:
ret_dict[key][sub_key] = value['dim']
elif sub_value_len != self.__data_len__:
ret_dict[key][sub_key] = None
else:
ret_dict[key][sub_key] = 0
except TypeError:
ret_dict[key][sub_key] = None
continue
# instance cannot be sliced without len method
try:
value_len = len(value)
except TypeError:
ret_dict[key] = None
continue
# slice on dim 0 by default
slice_dim = 0
if key in supported_keys and \
'dim' in supported_keys[key]:
slice_dim = \
supported_keys[key]['dim']
data_len = value_len if slice_dim == 0 \
else value.shape[slice_dim]
# dim not for slice
if data_len != self.__data_len__:
ret_dict[key] = None
continue
else:
ret_dict[key] = slice_dim
return ret_dict
def __setitem__(self, key: _KT, val: _VT) -> None:
"""Set self[key] to value. Only be called when using
human_data[key] = val. Methods like update won't call __setitem__.
In keypoints_compressed mode, if the key contains 'keypoints',
and f'{key}_mask' is in self.keys(), invalid zeros
will be removed before setting value.
Args:
key (_KT):
Key in HumanData.
Better be an element in HumanData.SUPPORTED_KEYS.
If not, an Error will be raised in key_strict mode.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
ValueError:
self.check_keypoints_compressed() is True and
mask of a keypoint item is missing.
"""
self.__check_key__(key)
self.__check_value__(key, val)
# if it can be compressed by mask
if self.__keypoints_compressed__:
class_logger = self.__class__.logger
if 'keypoints' in key and \
'_mask' in key:
msg = 'Mask cannot be modified ' +\
'in keypoints_compressed mode.'
print_log(msg=msg, logger=class_logger, level=logging.WARN)
return
elif isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
mask_key = f'{key}_mask'
if mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
val = \
self.__class__.__remove_zero_pad__(val, mask_array)
else:
msg = f'Mask for {key} has not been set.' +\
f' Please set {mask_key} before compression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise ValueError
dict.__setitem__(self, key, val)
def set_raw_value(self, key: _KT, val: _VT) -> None:
"""Set the raw value of self[key] to val after key check. It acts the
same as dict.__setitem__(self, key, val) if the key satisfied
constraints.
Args:
key (_KT):
Key in dict.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
"""
self.__check_key__(key)
self.__check_value__(key, val)
dict.__setitem__(self, key, val)
def pop_unsupported_items(self) -> None:
"""Find every item with a key not in HumanData.SUPPORTED_KEYS, and pop
it to save memory."""
for key in list(self.keys()):
if key not in self.__class__.SUPPORTED_KEYS:
self.pop(key)
def __check_key__(self, key: Any) -> _KeyCheck:
"""Check whether the key matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
Returns:
_KeyCheck:
PASS, WARN or ERROR.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
"""
ret_key_check = _KeyCheck.PASS
if self.get_key_strict():
if key not in self.__class__.SUPPORTED_KEYS:
ret_key_check = _KeyCheck.ERROR
else:
if key not in self.__class__.SUPPORTED_KEYS and \
key not in self.__class__.WARNED_KEYS:
# log warning message at the first time
ret_key_check = _KeyCheck.WARN
self.__class__.WARNED_KEYS.append(key)
if ret_key_check == _KeyCheck.ERROR:
raise KeyError(self.__class__.__get_key_error_msg__(key))
elif ret_key_check == _KeyCheck.WARN:
class_logger = self.__class__.logger
if class_logger == 'silent':
pass
else:
print_log(
msg=self.__class__.__get_key_warn_msg__(key),
logger=class_logger,
level=logging.WARN)
return ret_key_check
def __check_value__(self, key: Any, val: Any) -> bool:
"""Check whether the value matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
True for matched, ortherwise False.
Raises:
ValueError:
Value is supported but doesn't match definition.
"""
ret_bool = self.__check_value_type__(key, val) and\
self.__check_value_shape__(key, val) and\
self.__check_value_len__(key, val)
if not ret_bool:
raise ValueError(self.__class__.__get_value_error_msg__())
return ret_bool
def __check_value_type__(self, key: Any, val: Any) -> bool:
"""Check whether the type of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If type doesn't match, return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check type
if type(val) != supported_keys[key]['type']:
ret_bool = False
if not ret_bool:
expected_type = supported_keys[key]['type']
err_msg = 'Type check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
err_msg += f'expected type={expected_type}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def __check_value_shape__(self, key: Any, val: Any) -> bool:
"""Check whether the shape of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If expected shape is defined and doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check shape
if 'shape' in supported_keys[key]:
val_shape = val.shape
for shape_ind in range(len(supported_keys[key]['shape'])):
# length not match
if shape_ind >= len(val_shape):
ret_bool = False
break
expect_val = supported_keys[key]['shape'][shape_ind]
# value not match
if expect_val > 0 and \
expect_val != val_shape[shape_ind]:
ret_bool = False
break
if not ret_bool:
expected_shape = str(supported_keys[key]['shape'])
expected_shape = expected_shape.replace('-1', 'Any')
err_msg = 'Shape check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val.shape={val_shape}\n'
err_msg += f'expected shape={expected_shape}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def data_len(self) -> int:
"""Get the temporal length of this HumanData instance.
Returns:
int:
Number of frames related to this instance.
"""
return self.__data_len__
def data_len(self, value: int):
"""Set the temporal length of this HumanData instance.
Args:
value (int):
Number of frames related to this instance.
"""
self.__data_len__ = value
def __check_value_len__(self, key: Any, val: Any) -> bool:
"""Check whether the temporal length of val matches other values.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If temporal dim is defined and temporal length doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check temporal length
if 'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is not None:
val_slice_dim = supported_keys[key]['dim']
if supported_keys[key]['type'] == dict:
slice_key = supported_keys[key]['slice_key']
val_data_len = val[slice_key].shape[val_slice_dim]
else:
val_data_len = val.shape[val_slice_dim]
if self.data_len < 0:
# no data_len yet, assign a new one
self.data_len = val_data_len
else:
# check if val_data_len matches recorded data_len
if self.data_len != val_data_len:
ret_bool = False
if not ret_bool:
err_msg = 'Temporal check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val\'s data_len={val_data_len}\n'
err_msg += f'expected data_len={self.data_len}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def generate_mask_from_confidence(self, keys=None) -> None:
"""Generate mask from keypoints' confidence. Keypoints that have zero
confidence in all occurrences will have a zero mask. Note that the last
value of the keypoint is assumed to be confidence.
Args:
keys: None, str, or list of str.
None: all keys with `keypoint` in it will have mask
generated from their confidence.
str: key of the keypoint, the mask has name f'{key}_name'
list of str: a list of keys of the keypoints.
Generate mask for multiple keypoints.
Defaults to None.
Returns:
None
Raises:
KeyError:
A key is not not found
"""
if keys is None:
keys = []
for key in self.keys():
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
keys.append(key)
elif isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
for key in keys:
assert isinstance(key, str)
else:
raise TypeError(f'`Keys` must be None, str, or list of str, '
f'got {type(keys)}.')
update_dict = {}
for kpt_key in keys:
kpt_array = self.get_raw_value(kpt_key)
num_joints = kpt_array.shape[-2]
# if all conf of a joint are zero, this joint is masked
joint_conf = kpt_array[..., -1].reshape(-1, num_joints)
mask_array = (joint_conf > 0).astype(np.uint8).max(axis=0)
assert len(mask_array) == num_joints
# generate mask
update_dict[f'{kpt_key}_mask'] = mask_array
self.update(update_dict)
def compress_keypoints_by_mask(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be removed and f'{key}_mask' will be locked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is False
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
msg = f'Mask for {key} has not been set.' +\
f'Please set {mask_key} before compression.'
raise KeyError(msg)
compressed_dict = {}
for kpt_key, mask_key in key_pairs:
kpt_array = self.get_raw_value(kpt_key)
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = \
self.__class__.__remove_zero_pad__(kpt_array, mask_array)
compressed_dict[kpt_key] = compressed_kpt
# set value after all pairs are compressed
self.update(compressed_dict)
self.__keypoints_compressed__ = True
def decompress_keypoints(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be inserted to the right places and f'{key}_mask'
will be unlocked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is True
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
class_logger = self.__class__.logger
msg = f'Mask for {key} has not been found.' +\
f'Please remove {key} before decompression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise KeyError
decompressed_dict = {}
for kpt_key, mask_key in key_pairs:
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = self.get_raw_value(kpt_key)
kpt_array = \
self.__class__.__add_zero_pad__(compressed_kpt, mask_array)
decompressed_dict[kpt_key] = kpt_array
# set value after all pairs are decompressed
self.update(decompressed_dict)
self.__keypoints_compressed__ = False
def dump_by_pickle(self, pkl_path: str, overwrite: bool = True) -> None:
"""Dump keys and items to a pickle file. It's a secondary dump method,
when a HumanData instance is too large to be dumped by self.dump()
Args:
pkl_path (str):
Path to a dumped pickle file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.pkl'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(pkl_path, ['.pkl']):
raise ValueError('Not an pkl file.')
if not overwrite:
if check_path_existence(pkl_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
with open(pkl_path, 'wb') as f_writeb:
pickle.dump(
dict_to_dump, f_writeb, protocol=pickle.HIGHEST_PROTOCOL)
def load_by_pickle(self, pkl_path: str) -> None:
"""Load data from pkl_path and update them to self.
When a HumanData Instance was dumped by
self.dump_by_pickle(), use this to load.
Args:
npz_path (str):
Path to a dumped npz file.
"""
with open(pkl_path, 'rb') as f_readb:
tmp_data_dict = pickle.load(f_readb)
for key, value in list(tmp_data_dict.items()):
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def __set_default_values__(self) -> None:
"""For older versions of HumanData, call this method to apply missing
values (also attributes)."""
supported_keys = self.__class__.SUPPORTED_KEYS
if self.__data_len__ == -1:
for key in supported_keys:
if key in self and \
'dim' in supported_keys[key] and\
supported_keys[key]['dim'] is not None:
if 'slice_key' in supported_keys[key] and\
supported_keys[key]['type'] == dict:
sub_key = supported_keys[key]['slice_key']
slice_dim = supported_keys[key]['dim']
self.__data_len__ = \
self[key][sub_key].shape[slice_dim]
else:
slice_dim = supported_keys[key]['dim']
self.__data_len__ = self[key].shape[slice_dim]
break
for key in list(self.keys()):
convention_key = f'{key}_convention'
if key.startswith('keypoints') and \
not key.endswith('_mask') and \
not key.endswith('_convention') and \
convention_key not in self:
self[convention_key] = 'human_data'
def concatenate(cls, human_data_0: _HumanData,
human_data_1: _HumanData) -> _HumanData:
"""Concatenate two human_data. All keys will be kept it the returned
human_data. If either value from human_data_0 or human_data_1 matches
data_len from its HumanData, the two values will be concatenated as a
single value. If not, postfix will be added to the key to specify
source of the value.
Args:
human_data_0 (_HumanData)
human_data_1 (_HumanData)
Returns:
_HumanData:
A new human_data instance with all concatenated data.
"""
ret_human_data = cls.new(key_strict=False)
set_0 = set(human_data_0.keys())
set_1 = set(human_data_1.keys())
common_keys = set_0.intersection(set_1)
dim_dict_0 = human_data_0.__get_slice_dim__()
dim_dict_1 = human_data_1.__get_slice_dim__()
for key in common_keys:
value_0 = human_data_0[key]
value_1 = human_data_1[key]
# align type
value_0 = list(value_0) if isinstance(value_0, tuple)\
else value_0
value_1 = list(value_1) if isinstance(value_1, tuple)\
else value_1
assert type(value_0) == type(value_1)
# align convention
if key.startswith('keypoints') and\
key.endswith('_convention'):
assert value_0 == value_1
ret_human_data[key] = value_0
continue
# mask_0 and mask_1
elif key.startswith('keypoints') and\
key.endswith('_mask'):
new_mask = value_0 * value_1
ret_human_data[key] = new_mask
continue
# go through the sub dict
if isinstance(value_0, dict):
sub_dict = {}
for sub_key, sub_value_0 in value_0.items():
# only found in value_0
if sub_key not in value_1:
sub_dict[sub_key] = sub_value_0
# found in both values
else:
sub_value_1 = value_1[sub_key]
concat_sub_dict = cls.__concat_value__(
key=sub_key,
value_0=sub_value_0,
dim_0=dim_dict_0[key][sub_key],
value_1=sub_value_1,
dim_1=dim_dict_1[key][sub_key])
sub_dict.update(concat_sub_dict)
for sub_key, sub_value_1 in value_1.items():
if sub_key not in value_0:
sub_dict[sub_key] = sub_value_1
ret_human_data[key] = sub_dict
# try concat
else:
concat_dict = cls.__concat_value__(
key=key,
value_0=value_0,
dim_0=dim_dict_0[key],
value_1=value_1,
dim_1=dim_dict_1[key])
ret_human_data.update(concat_dict)
# check exclusive keys
for key, value in human_data_0.items():
if key not in common_keys:
# value not for concat and slice
if dim_dict_0[key] is None:
ret_human_data[key] = value
# value aligned with data_len of HumanData_0
else:
ret_human_data[f'{key}_0'] = value
for key, value in human_data_1.items():
if key not in common_keys:
# same as above
if dim_dict_1[key] is None:
ret_human_data[key] = value
else:
ret_human_data[f'{key}_1'] = value
return ret_human_data
def __concat_value__(cls, key: Any, value_0: Any, value_1: Any,
dim_0: Union[None, int], dim_1: Union[None,
int]) -> dict:
"""Concat two values from two different HumanData.
Args:
key (Any):
The common key of the two values.
value_0 (Any):
Value from 0.
value_1 (Any):
Value from 1.
dim_0 (Union[None, int]):
The dim for concat and slice. None for N/A.
dim_1 (Union[None, int]):
The dim for concat and slice. None for N/A.
Returns:
dict:
Dict for concatenated result.
"""
ret_dict = {}
if dim_0 is None or dim_1 is None:
ret_dict[f'{key}_0'] = value_0
ret_dict[f'{key}_1'] = value_1
elif isinstance(value_0, list):
ret_dict[key] = value_0 + value_1
# elif isinstance(value_0, np.ndarray):
else:
ret_dict[key] = np.concatenate((value_0, value_1), axis=dim_0)
return ret_dict
def __add_zero_pad__(cls, compressed_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Pad zeros to a compressed keypoints array.
Args:
compressed_array (np.ndarray):
A compressed keypoints array.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A keypoints array in full-size.
"""
assert mask_array.sum() == compressed_array.shape[1]
data_len, _, dim = compressed_array.shape
mask_len = mask_array.shape[0]
ret_value = np.zeros(
shape=[data_len, mask_len, dim], dtype=compressed_array.dtype)
valid_mask_index = np.where(mask_array == 1)[0]
ret_value[:, valid_mask_index, :] = compressed_array
return ret_value
def __remove_zero_pad__(cls, zero_pad_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Remove zero-padding from a full-size keypoints array.
Args:
zero_pad_array (np.ndarray):
A keypoints array in full-size.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A compressed keypoints array.
"""
assert mask_array.shape[0] == zero_pad_array.shape[1]
valid_mask_index = np.where(mask_array == 1)[0]
ret_value = np.take(zero_pad_array, valid_mask_index, axis=1)
return ret_value
def __get_key_warn_msg__(cls, key: Any) -> str:
"""Get the warning message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The warning message.
"""
class_name = cls.__name__
warn_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Ignore this if you know exactly' +\
' what you are doing.\n' +\
'Otherwise, Call self.set_key_strict(True)' +\
' to avoid wrong keys.\n'
return warn_message + suggestion_message
def __get_key_error_msg__(cls, key: Any) -> str:
"""Get the error message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The error message.
"""
class_name = cls.__name__
absent_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Call self.set_key_strict(False)' +\
' to allow unsupported keys.\n'
return absent_message + suggestion_message
def __get_value_error_msg__(cls) -> str:
"""Get the error message when a value fails the check.
Returns:
str:
The error message.
"""
error_message = \
'An supported value doesn\'t ' +\
'match definition.\n'
suggestion_message = \
'See error log for details.\n'
return error_message + suggestion_message
def __get_sliced_result__(
cls, input_data: Union[np.ndarray, list, tuple], slice_dim: int,
slice_range: slice) -> Union[np.ndarray, list, tuple]:
"""Slice input_data along slice_dim with slice_range.
Args:
input_data (Union[np.ndarray, list, tuple]):
Data to be sliced.
slice_dim (int):
Dimension to be sliced.
slice_range (slice):
An instance of class slice.
Returns:
Union[np.ndarray, list, tuple]:
A slice of input_data.
"""
if isinstance(input_data, np.ndarray):
slice_list = [
slice(None),
] * len(input_data.shape)
slice_list[slice_dim] = slice_range
sliced_data = input_data[tuple(slice_list)]
else:
sliced_data = \
input_data[slice_range]
return sliced_data
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol','smoothnet',
'smoothnet_windowsize8','smoothnet_windowsize16',
'smoothnet_windowsize32','smoothnet_windowsize64'].
Defaults to 'savgol'. 'smoothnet' is default with windowsize=8.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if smooth_type == 'smoothnet':
smooth_type = 'smoothnet_windowsize8'
assert smooth_type in [
'oneeuro', 'gaus1d', 'savgol', 'smoothnet_windowsize8',
'smoothnet_windowsize16', 'smoothnet_windowsize32',
'smoothnet_windowsize64'
]
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def array_to_images(
image_array: np.ndarray,
output_folder: str,
img_format: str = '%06d.png',
resolution: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None,
disable_log: bool = False,
) -> None:
"""Convert an array to images directly.
Args:
image_array (np.ndarray): shape should be (f * h * w * 3).
output_folder (str): output folder for the images.
img_format (str, optional): format of the images.
Defaults to '%06d.png'.
resolution (Optional[Union[Tuple[int, int], Tuple[float, float]]],
optional): resolution(height, width) of output.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check output folder.
TypeError: check input array.
Returns:
None
"""
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
if not isinstance(image_array, np.ndarray):
raise TypeError('Input should be np.ndarray.')
assert image_array.ndim == 4
assert image_array.shape[-1] == 3
if resolution:
height, width = resolution
else:
height, width = image_array.shape[1], image_array.shape[2]
command = [
'ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f',
'rawvideo',
'-s',
f'{int(width)}x{int(height)}', # size of one frame
'-pix_fmt',
'bgr24', # bgr24 for matching OpenCV
'-loglevel',
'error',
'-threads',
'4',
'-i',
'-', # The input comes from a pipe
'-f',
'image2',
'-start_number',
'0',
os.path.join(output_folder, img_format),
]
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=10**8,
close_fds=True)
if process.stdin is None or process.stderr is None:
raise BrokenPipeError('No buffer received.')
index = 0
while True:
if index >= image_array.shape[0]:
break
process.stdin.write(image_array[index].tobytes())
index += 1
process.stdin.close()
process.stderr.close()
process.wait()
def rotmat_to_aa(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to axis angles.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_quaternion, quaternion_to_axis_angle])
return t(matrix)
The provided code snippet includes necessary dependencies for implementing the `multi_person_with_mmtracking` function. Write a Python function `def multi_person_with_mmtracking(args, frames_iter)` to solve the following problem:
Estimate smplx parameters from multi-person images with mmtracking Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames
Here is the function:
def multi_person_with_mmtracking(args, frames_iter):
"""Estimate smplx parameters from multi-person
images with mmtracking
Args:
args (object): object of argparse.Namespace.
frames_iter (np.ndarray,): prepared frames
"""
mesh_model, extractor = init_model(
args.mesh_reg_config,
args.mesh_reg_checkpoint,
device=args.device.lower())
max_track_id, max_instance, frame_id_list, result_list = \
get_tracking_result(args, frames_iter, mesh_model, extractor)
frame_num = len(frame_id_list)
smplx_results = dict(
global_orient=np.zeros([frame_num, max_track_id + 1, 1, 3, 3]),
body_pose=np.zeros([frame_num, max_track_id + 1, 21, 3, 3]),
betas=np.zeros([frame_num, max_track_id + 1, 10]),
left_hand_pose=np.zeros([frame_num, max_track_id + 1, 15, 3, 3]),
right_hand_pose=np.zeros([frame_num, max_track_id + 1, 15, 3, 3]),
jaw_pose=np.zeros([frame_num, max_track_id + 1, 1, 3, 3]),
expression=np.zeros([frame_num, max_track_id + 1, 10]))
pred_cams = np.zeros([frame_num, max_track_id + 1, 3])
bboxes_xyxy = np.zeros([frame_num, max_track_id + 1, 5])
track_ids_lists = []
for i, result in enumerate(mmcv.track_iter_progress(result_list)):
frame_id = frame_id_list[i]
if mesh_model.cfg.model.type == 'SMPLXImageBodyModelEstimator':
mesh_results = inference_image_based_model(
mesh_model,
frames_iter[frame_id],
result,
bbox_thr=args.bbox_thr,
format='xyxy')
else:
raise Exception(
f'{mesh_model.cfg.model.type} is not supported yet')
track_ids = []
for mesh_result in mesh_results:
instance_id = mesh_result['track_id']
bboxes_xyxy[i, instance_id] = mesh_result['bbox']
pred_cams[i, instance_id] = mesh_result['camera']
for key in smplx_results:
smplx_results[key][
i, instance_id] = mesh_result['param'][key].cpu().numpy()
track_ids.append(instance_id)
track_ids_lists.append(track_ids)
# release GPU memory
del mesh_model
del extractor
torch.cuda.empty_cache()
# smooth
if args.smooth_type is not None:
for key in smplx_results:
if key not in ['betas', 'expression']:
dim = smplx_results[key].shape[2]
smplx_results[key] = smooth_process(
smplx_results[key].reshape(frame_num, -1, dim, 9),
smooth_type=args.smooth_type).reshape(
frame_num, -1, dim, 3, 3)
pred_cams = smooth_process(
pred_cams[:, np.newaxis],
smooth_type=args.smooth_type).reshape(frame_num, -1, 3)
if smplx_results['body_pose'].shape[2:] == (21, 3, 3):
for key in smplx_results:
if key not in ['betas', 'expression']:
smplx_results[key] = rotmat_to_aa(smplx_results[key])
else:
raise Exception('Wrong shape of `body_pose`')
fullpose = np.concatenate(
(
smplx_results['global_orient'],
smplx_results['body_pose'],
smplx_results['jaw_pose'],
# Use zero for leye_pose and reye_pose
np.zeros((frame_num, max_track_id + 1, 2, 3),
dtype=smplx_results['jaw_pose'].dtype),
smplx_results['left_hand_pose'],
smplx_results['right_hand_pose'],
),
axis=2)
if args.output is not None:
os.makedirs(args.output, exist_ok=True)
human_data = HumanData()
smplx = {}
smplx['fullpose'] = fullpose
smplx['betas'] = smplx_results['betas']
human_data['smplx'] = smplx
human_data['pred_cams'] = pred_cams
human_data.dump(osp.join(args.output, 'inference_result.npz'))
# To compress vertices array
compressed_cams = np.zeros([frame_num, max_instance, 3])
compressed_bboxs = np.zeros([frame_num, max_instance, 5])
compressed_fullpose = np.zeros([frame_num, max_instance, 55, 3])
compressed_betas = np.zeros([frame_num, max_instance, 10])
for i, track_ids_list in enumerate(track_ids_lists):
instance_num = len(track_ids_list)
compressed_cams[i, :instance_num] = pred_cams[i, track_ids_list]
compressed_bboxs[i, :instance_num] = bboxes_xyxy[i, track_ids_list]
compressed_fullpose[i, :instance_num] = fullpose[i, track_ids_list]
compressed_betas[i, :instance_num] = smplx_results['betas'][
i, track_ids_list]
assert len(frame_id_list) > 0
if args.show_path is not None:
frames_folder = osp.join(args.show_path, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list], output_folder=frames_folder)
# create body model
body_model_config = dict(
type='smplx',
num_betas=10,
use_face_contour=True,
use_pca=False,
flat_hand_mean=True,
model_path=args.body_model_dir,
keypoint_src='smplx',
keypoint_dst='smplx',
)
visualize_smpl_hmr(
poses=compressed_fullpose.reshape(-1, max_instance, 165),
betas=compressed_betas,
cam_transl=compressed_cams,
bbox=compressed_bboxs,
output_path=os.path.join(args.show_path, 'smplx.mp4'),
render_choice=args.render_choice,
resolution=frames_iter[0].shape[:2],
origin_frames=frames_folder,
body_model_config=body_model_config,
overwrite=True,
read_frames_batch=True)
shutil.rmtree(frames_folder) | Estimate smplx parameters from multi-person images with mmtracking Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames |
14,260 | import os
import os.path as osp
import shutil
import warnings
from argparse import ArgumentParser
from pathlib import Path
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import (
feature_extract,
inference_image_based_model,
inference_video_based_model,
init_model,
)
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_hmr
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.utils.demo_utils import (
extract_feature_sequence,
get_speed_up_interval,
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
speed_up_interpolate,
speed_up_process,
)
from mmhuman3d.utils.ffmpeg_utils import array_to_images
def process_mmtracking_results(mmtracking_results,
max_track_id,
bbox_thr=None):
"""Process mmtracking results.
Args:
mmtracking_results ([list]): mmtracking_results.
bbox_thr (float): threshold for bounding boxes.
max_track_id (int): the maximum track id.
Returns:
person_results ([list]): a list of tracked bounding boxes
max_track_id (int): the maximum track id.
instance_num (int): the number of instance.
"""
person_results = []
# 'track_results' is changed to 'track_bboxes'
# in https://github.com/open-mmlab/mmtracking/pull/300
if 'track_bboxes' in mmtracking_results:
tracking_results = mmtracking_results['track_bboxes'][0]
elif 'track_results' in mmtracking_results:
tracking_results = mmtracking_results['track_results'][0]
tracking_results = np.array(tracking_results)
if bbox_thr is not None:
assert tracking_results.shape[-1] == 6
valid_idx = np.where(tracking_results[:, 5] > bbox_thr)[0]
tracking_results = tracking_results[valid_idx]
for track in tracking_results:
person = {}
person['track_id'] = int(track[0])
if max_track_id < int(track[0]):
max_track_id = int(track[0])
person['bbox'] = track[1:]
person_results.append(person)
person_results = sorted(person_results, key=lambda x: x.get('track_id', 0))
instance_num = len(person_results)
return person_results, max_track_id, instance_num
def get_tracking_result(args, frames_iter, mesh_model, extractor):
tracking_model = init_tracking_model(
args.tracking_config, None, device=args.device.lower())
max_track_id = 0
max_instance = 0
result_list = []
frame_id_list = []
for i, frame in enumerate(mmcv.track_iter_progress(frames_iter)):
mmtracking_results = inference_mot(tracking_model, frame, frame_id=i)
# keep the person class bounding boxes.
result, max_track_id, instance_num = \
process_mmtracking_results(
mmtracking_results,
max_track_id=max_track_id,
bbox_thr=args.bbox_thr)
# extract features from the input video or image sequences
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator' \
and extractor is not None:
result = feature_extract(
extractor, frame, result, args.bbox_thr, format='xyxy')
# drop the frame with no detected results
if result == []:
continue
# update max_instance
if instance_num > max_instance:
max_instance = instance_num
# vis bboxes
if args.draw_bbox:
bboxes = [res['bbox'] for res in result]
bboxes = np.vstack(bboxes)
mmcv.imshow_bboxes(
frame, bboxes, top_k=-1, thickness=2, show=False)
result_list.append(result)
frame_id_list.append(i)
return max_track_id, max_instance, frame_id_list, result_list | null |
14,261 | import os
import os.path as osp
import shutil
import warnings
from argparse import ArgumentParser
from pathlib import Path
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import (
feature_extract,
inference_image_based_model,
inference_video_based_model,
init_model,
)
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_hmr
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.utils.demo_utils import (
extract_feature_sequence,
get_speed_up_interval,
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
speed_up_interpolate,
speed_up_process,
)
from mmhuman3d.utils.ffmpeg_utils import array_to_images
def get_detection_result(args, frames_iter, mesh_model, extractor):
person_det_model = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
frame_id_list = []
result_list = []
pre_bbox = None
for i, frame in enumerate(mmcv.track_iter_progress(frames_iter)):
mmdet_results = inference_detector(person_det_model, frame)
# keep the person class bounding boxes.
results = process_mmdet_results(
mmdet_results, cat_id=args.det_cat_id, bbox_thr=args.bbox_thr)
# smooth
if pre_bbox is not None:
cur_bbox = results[0]['bbox']
dist_tl = np.array([(cur_bbox[0] - pre_bbox[0])**2,
(cur_bbox[1] - pre_bbox[1])**2])
delta_tl = np.array(dist_tl /
(np.array(pre_bbox[:2]) + 1e-7)).sum()
ratio_tl = nonlinearWeight(delta_tl, 0, 0.2, 0.8, 120, 1)
dist_br = np.array([(cur_bbox[2] - pre_bbox[2])**2,
(cur_bbox[3] - pre_bbox[3])**2])
delta_br = np.array(dist_br /
(np.array(pre_bbox[2:4]) + 1e-7)).sum()
ratio_br = nonlinearWeight(delta_br, 0, 0.2, 0.8, 120, 1)
results[0]['bbox'] = np.array([
ratio_tl * cur_bbox[0] + (1 - ratio_tl) * pre_bbox[0],
ratio_tl * cur_bbox[1] + (1 - ratio_tl) * pre_bbox[1],
ratio_br * cur_bbox[2] + (1 - ratio_br) * pre_bbox[2],
ratio_br * cur_bbox[3] + (1 - ratio_br) * pre_bbox[3],
cur_bbox[4]
])
pre_bbox = results[0]['bbox']
# extract features from the input video or image sequences
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator' \
and extractor is not None:
results = feature_extract(
extractor, frame, results, args.bbox_thr, format='xyxy')
# drop the frame with no detected results
if results == []:
continue
# vis bboxes
if args.draw_bbox:
bboxes = [res['bbox'] for res in results]
bboxes = np.vstack(bboxes)
mmcv.imshow_bboxes(
frame, bboxes, top_k=-1, thickness=2, show=False)
frame_id_list.append(i)
result_list.append(results)
frame_num = len(result_list)
x = np.array([i[0]['bbox'] for i in result_list])
x = smooth_process(
x[:, np.newaxis], smooth_type='savgol').reshape(frame_num, 5)
for idx, result in enumerate(result_list):
result[0]['bbox'] = x[idx, :]
return frame_id_list, result_list
def visualize_smpl_hmr(cam_transl,
bbox=None,
kp2d=None,
focal_length=5000,
det_width=224,
det_height=224,
bbox_format='xyxy',
**kwargs) -> None:
"""Simplest way to visualize HMR or SPIN or Smplify pred smpl with origin
frames and predicted cameras."""
if kp2d is not None:
bbox = convert_kp2d_to_bbox(kp2d, bbox_format=bbox_format)
Ks = convert_bbox_to_intrinsic(bbox, bbox_format=bbox_format)
K = torch.Tensor(
get_default_hmr_intrinsic(
focal_length=focal_length,
det_height=det_height,
det_width=det_width))
func = partial(
render_smpl,
projection='perspective',
convention='opencv',
in_ndc=False,
K=None,
R=None,
orig_cam=None,
)
if isinstance(cam_transl, np.ndarray):
cam_transl = torch.Tensor(cam_transl)
T = torch.cat([
cam_transl[..., [1]], cam_transl[..., [2]], 2 * focal_length /
(det_width * cam_transl[..., [0]] + 1e-9)
], -1)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(Ks=Ks, K=K, T=T, **kwargs)
class HumanData(dict):
logger = None
SUPPORTED_KEYS = _HumanData_SUPPORTED_KEYS
WARNED_KEYS = []
def __new__(cls: _HumanData, *args: Any, **kwargs: Any) -> _HumanData:
"""New an instance of HumanData.
Args:
cls (HumanData): HumanData class.
Returns:
HumanData: An instance of HumanData.
"""
ret_human_data = super().__new__(cls, args, kwargs)
setattr(ret_human_data, '__data_len__', -1)
setattr(ret_human_data, '__key_strict__', False)
setattr(ret_human_data, '__keypoints_compressed__', False)
return ret_human_data
def set_logger(cls, logger: Union[logging.Logger, str, None] = None):
"""Set logger of HumanData class.
Args:
logger (logging.Logger | str | None, optional):
The way to print summary.
See `mmcv.utils.print_log()` for details.
Defaults to None.
"""
cls.logger = logger
def fromfile(cls, npz_path: str) -> _HumanData:
"""Construct a HumanData instance from an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
Returns:
HumanData:
A HumanData instance load from file.
"""
ret_human_data = cls()
ret_human_data.load(npz_path)
return ret_human_data
def new(cls,
source_dict: dict = None,
key_strict: bool = False) -> _HumanData:
"""Construct a HumanData instance from a dict.
Args:
source_dict (dict, optional):
A dict with items in HumanData fashion.
Defaults to None.
key_strict (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to False.
Returns:
HumanData:
A HumanData instance.
"""
if source_dict is None:
ret_human_data = cls()
else:
ret_human_data = cls(source_dict)
ret_human_data.set_key_strict(key_strict)
return ret_human_data
def get_key_strict(self) -> bool:
"""Get value of attribute key_strict.
Returns:
bool:
Whether to raise error when setting unsupported keys.
"""
return self.__key_strict__
def set_key_strict(self, value: bool):
"""Set value of attribute key_strict.
Args:
value (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to True.
"""
former__key_strict__ = self.__key_strict__
self.__key_strict__ = value
if former__key_strict__ is False and \
value is True:
self.pop_unsupported_items()
def check_keypoints_compressed(self) -> bool:
"""Check whether the keypoints are compressed.
Returns:
bool:
Whether the keypoints are compressed.
"""
return self.__keypoints_compressed__
def load(self, npz_path: str):
"""Load data from npz_path and update them to self.
Args:
npz_path (str):
Path to a dumped npz file.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
with np.load(npz_path, allow_pickle=True) as npz_file:
tmp_data_dict = dict(npz_file)
for key, value in list(tmp_data_dict.items()):
if isinstance(value, np.ndarray) and\
len(value.shape) == 0:
# value is not an ndarray before dump
value = value.item()
elif key in supported_keys and\
type(value) != supported_keys[key]['type']:
value = supported_keys[key]['type'](value)
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def dump(self, npz_path: str, overwrite: bool = True):
"""Dump keys and items to an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.npz'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(npz_path, ['.npz']):
raise ValueError('Not an npz file.')
if not overwrite:
if check_path_existence(npz_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
np.savez_compressed(npz_path, **dict_to_dump)
def get_sliced_cache(self, slice_size=10) -> List:
"""Slice the whole HumanData into pieces for HumanDataCacheWriter.
Args:
slice_size (int, optional):
The length of each unit in HumanData cache.
Defaults to 10.
Returns:
List:
Two dicts for HumanDataCacheWriter.
Init HumanDataCacheWriter by HumanDataCacheWriter(**Returns[0])
and set data by
human_data_cache_writer.update_sliced_dict(Returns[1]).
"""
keypoints_info = {}
non_sliced_data = {}
sliced_data = {}
slice_num = ceil(self.__data_len__ / slice_size)
for slice_index in range(slice_num):
sliced_data[str(slice_index)] = {}
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# no dim to slice
if dim is None:
if key.startswith('keypoints') and\
(key.endswith('_mask') or
key.endswith('_convention')):
keypoints_info[key] = self[key]
else:
non_sliced_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
non_sliced_sub_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
non_sliced_sub_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_range = slice(slice_start, slice_end)
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_range
)
if key not in sliced_data[str(slice_index)]:
sliced_data[str(slice_index)][key] = {}
sliced_data[str(slice_index)][key][sub_key] = \
sliced_sub_value
if len(non_sliced_sub_dict) > 0:
non_sliced_data[key] = non_sliced_sub_dict
else:
value = self.get_raw_value(key)
# slice as ndarray
if isinstance(value, np.ndarray):
slice_list = [
slice(None),
] * len(value.shape)
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_list[dim] = slice(slice_start, slice_end)
sliced_value = value[tuple(slice_list)]
sliced_data[str(slice_index)][key] = sliced_value
# slice as list/tuple
else:
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
sliced_value = value[slice(slice_start, slice_end)]
sliced_data[str(slice_index)][key] = sliced_value
writer_args_dict = {
'slice_size': slice_size,
'keypoints_info': keypoints_info,
'data_len': self.data_len,
'non_sliced_data': non_sliced_data,
'key_strict': self.get_key_strict()
}
return writer_args_dict, sliced_data
def to(self,
device: Optional[Union[torch.device, str]] = _CPU_DEVICE,
dtype: Optional[torch.dtype] = None,
non_blocking: Optional[bool] = False,
copy: Optional[bool] = False,
memory_format: Optional[torch.memory_format] = None) -> dict:
"""Convert values in numpy.ndarray type to torch.Tensor, and move
Tensors to the target device. All keys will exist in the returned dict.
Args:
device (Union[torch.device, str], optional):
A specified device. Defaults to CPU_DEVICE.
dtype (torch.dtype, optional):
The data type of the expected torch.Tensor.
If dtype is None, it is decided according to numpy.ndarry.
Defaults to None.
non_blocking (bool, optional):
When non_blocking, tries to convert asynchronously with
respect to the host if possible, e.g.,
converting a CPU Tensor with pinned memory to a CUDA Tensor.
Defaults to False.
copy (bool, optional):
When copy is set, a new Tensor is created even when
the Tensor already matches the desired conversion.
No matter what value copy is, Tensor constructed from numpy
will not share the same memory with the source numpy.ndarray.
Defaults to False.
memory_format (torch.memory_format, optional):
The desired memory format of returned Tensor.
Not supported by pytorch-cpu.
Defaults to None.
Returns:
dict:
A dict with all numpy.ndarray values converted into
torch.Tensor and all Tensors moved to the target device.
"""
ret_dict = {}
for key in self.keys():
raw_value = self.get_raw_value(key)
tensor_value = None
if isinstance(raw_value, np.ndarray):
tensor_value = torch.from_numpy(raw_value).clone()
elif isinstance(raw_value, torch.Tensor):
tensor_value = raw_value
if tensor_value is None:
ret_dict[key] = raw_value
else:
if memory_format is None:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy)
else:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy,
memory_format=memory_format)
return ret_dict
def __getitem__(self, key: _KT) -> _VT:
"""Get value defined by HumanData. This function will be called by
self[key]. In keypoints_compressed mode, if the key contains
'keypoints', an array with zero-padding at absent keypoint will be
returned. Call self.get_raw_value(k) to get value without padding.
Args:
key (_KT):
Key in HumanData.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
if self.__keypoints_compressed__:
mask_key = f'{key}_mask'
if key in self and \
isinstance(value, np.ndarray) and \
'keypoints' in key and \
mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
value = \
self.__class__.__add_zero_pad__(value, mask_array)
return value
def get_raw_value(self, key: _KT) -> _VT:
"""Get raw value from the dict. It acts the same as
dict.__getitem__(k).
Args:
key (_KT):
Key in dict.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
return value
def get_value_in_shape(self,
key: _KT,
shape: Union[list, tuple],
padding_constant: int = 0) -> np.ndarray:
"""Get value in a specific shape. For each dim, if the required shape
is smaller than current shape, ndarray will be sliced. Otherwise, it
will be padded with padding_constant at the end.
Args:
key (_KT):
Key in dict. The value of this key must be
an instance of numpy.ndarray.
shape (Union[list, tuple]):
Shape of the returned array. Its length
must be equal to value.ndim. Set -1 for
a dimension if you do not want to edit it.
padding_constant (int, optional):
The value to set the padded values for each axis.
Defaults to 0.
Raises:
ValueError:
A value in shape is neither positive integer nor -1.
Returns:
np.ndarray:
An array in required shape.
"""
value = self.get_raw_value(key)
assert isinstance(value, np.ndarray)
assert value.ndim == len(shape)
pad_width_list = []
slice_list = []
for dim_index in range(len(shape)):
if shape[dim_index] == -1:
# no pad or slice
pad_width_list.append((0, 0))
slice_list.append(slice(None))
elif shape[dim_index] > 0:
# valid shape value
wid = shape[dim_index] - value.shape[dim_index]
if wid > 0:
pad_width_list.append((0, wid))
else:
pad_width_list.append((0, 0))
slice_list.append(slice(0, shape[dim_index]))
else:
# invalid
raise ValueError
pad_value = np.pad(
value,
pad_width=pad_width_list,
mode='constant',
constant_values=padding_constant)
return pad_value[tuple(slice_list)]
def get_slice(self, stop: int):
"""Slice [0, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int):
"""Slice [start, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int, step: int):
"""Slice [start, stop, step] of all sliceable values."""
...
def get_slice(self,
arg_0: int,
arg_1: Union[int, Any] = None,
step: int = 1) -> _HumanData:
"""Slice all sliceable values along major_dim dimension.
Args:
arg_0 (int):
When arg_1 is None, arg_0 is stop and start=0.
When arg_1 is not None, arg_0 is start.
arg_1 (Union[int, Any], optional):
None or where to stop.
Defaults to None.
step (int, optional):
Length of step. Defaults to 1.
Returns:
HumanData:
A new HumanData instance with sliced values.
"""
ret_human_data = \
HumanData.new(key_strict=self.get_key_strict())
if arg_1 is None:
start = 0
stop = arg_0
else:
start = arg_0
stop = arg_1
slice_index = slice(start, stop, step)
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# keys not expected be sliced
if dim is None:
ret_human_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
sliced_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
sliced_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_index)
sliced_dict[sub_key] = sliced_sub_value
ret_human_data[key] = sliced_dict
else:
value = self[key]
sliced_value = \
HumanData.__get_sliced_result__(
value, dim, slice_index)
ret_human_data[key] = sliced_value
# check keypoints compressed
if self.check_keypoints_compressed():
ret_human_data.compress_keypoints_by_mask()
return ret_human_data
def __get_slice_dim__(self) -> dict:
"""For each key in this HumanData, get the dimension for slicing. 0 for
default, if no other value specified.
Returns:
dict:
Keys are self.keys().
Values indicate where to slice.
None for not expected to be sliced or
failed.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
ret_dict = {}
for key in self.keys():
# keys not expected be sliced
if key in supported_keys and \
'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is None:
ret_dict[key] = None
else:
value = self[key]
if isinstance(value, dict) and len(value) > 0:
ret_dict[key] = {}
for sub_key in value.keys():
try:
sub_value_len = len(value[sub_key])
if 'dim' in value:
ret_dict[key][sub_key] = value['dim']
elif sub_value_len != self.__data_len__:
ret_dict[key][sub_key] = None
else:
ret_dict[key][sub_key] = 0
except TypeError:
ret_dict[key][sub_key] = None
continue
# instance cannot be sliced without len method
try:
value_len = len(value)
except TypeError:
ret_dict[key] = None
continue
# slice on dim 0 by default
slice_dim = 0
if key in supported_keys and \
'dim' in supported_keys[key]:
slice_dim = \
supported_keys[key]['dim']
data_len = value_len if slice_dim == 0 \
else value.shape[slice_dim]
# dim not for slice
if data_len != self.__data_len__:
ret_dict[key] = None
continue
else:
ret_dict[key] = slice_dim
return ret_dict
def __setitem__(self, key: _KT, val: _VT) -> None:
"""Set self[key] to value. Only be called when using
human_data[key] = val. Methods like update won't call __setitem__.
In keypoints_compressed mode, if the key contains 'keypoints',
and f'{key}_mask' is in self.keys(), invalid zeros
will be removed before setting value.
Args:
key (_KT):
Key in HumanData.
Better be an element in HumanData.SUPPORTED_KEYS.
If not, an Error will be raised in key_strict mode.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
ValueError:
self.check_keypoints_compressed() is True and
mask of a keypoint item is missing.
"""
self.__check_key__(key)
self.__check_value__(key, val)
# if it can be compressed by mask
if self.__keypoints_compressed__:
class_logger = self.__class__.logger
if 'keypoints' in key and \
'_mask' in key:
msg = 'Mask cannot be modified ' +\
'in keypoints_compressed mode.'
print_log(msg=msg, logger=class_logger, level=logging.WARN)
return
elif isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
mask_key = f'{key}_mask'
if mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
val = \
self.__class__.__remove_zero_pad__(val, mask_array)
else:
msg = f'Mask for {key} has not been set.' +\
f' Please set {mask_key} before compression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise ValueError
dict.__setitem__(self, key, val)
def set_raw_value(self, key: _KT, val: _VT) -> None:
"""Set the raw value of self[key] to val after key check. It acts the
same as dict.__setitem__(self, key, val) if the key satisfied
constraints.
Args:
key (_KT):
Key in dict.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
"""
self.__check_key__(key)
self.__check_value__(key, val)
dict.__setitem__(self, key, val)
def pop_unsupported_items(self) -> None:
"""Find every item with a key not in HumanData.SUPPORTED_KEYS, and pop
it to save memory."""
for key in list(self.keys()):
if key not in self.__class__.SUPPORTED_KEYS:
self.pop(key)
def __check_key__(self, key: Any) -> _KeyCheck:
"""Check whether the key matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
Returns:
_KeyCheck:
PASS, WARN or ERROR.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
"""
ret_key_check = _KeyCheck.PASS
if self.get_key_strict():
if key not in self.__class__.SUPPORTED_KEYS:
ret_key_check = _KeyCheck.ERROR
else:
if key not in self.__class__.SUPPORTED_KEYS and \
key not in self.__class__.WARNED_KEYS:
# log warning message at the first time
ret_key_check = _KeyCheck.WARN
self.__class__.WARNED_KEYS.append(key)
if ret_key_check == _KeyCheck.ERROR:
raise KeyError(self.__class__.__get_key_error_msg__(key))
elif ret_key_check == _KeyCheck.WARN:
class_logger = self.__class__.logger
if class_logger == 'silent':
pass
else:
print_log(
msg=self.__class__.__get_key_warn_msg__(key),
logger=class_logger,
level=logging.WARN)
return ret_key_check
def __check_value__(self, key: Any, val: Any) -> bool:
"""Check whether the value matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
True for matched, ortherwise False.
Raises:
ValueError:
Value is supported but doesn't match definition.
"""
ret_bool = self.__check_value_type__(key, val) and\
self.__check_value_shape__(key, val) and\
self.__check_value_len__(key, val)
if not ret_bool:
raise ValueError(self.__class__.__get_value_error_msg__())
return ret_bool
def __check_value_type__(self, key: Any, val: Any) -> bool:
"""Check whether the type of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If type doesn't match, return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check type
if type(val) != supported_keys[key]['type']:
ret_bool = False
if not ret_bool:
expected_type = supported_keys[key]['type']
err_msg = 'Type check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
err_msg += f'expected type={expected_type}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def __check_value_shape__(self, key: Any, val: Any) -> bool:
"""Check whether the shape of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If expected shape is defined and doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check shape
if 'shape' in supported_keys[key]:
val_shape = val.shape
for shape_ind in range(len(supported_keys[key]['shape'])):
# length not match
if shape_ind >= len(val_shape):
ret_bool = False
break
expect_val = supported_keys[key]['shape'][shape_ind]
# value not match
if expect_val > 0 and \
expect_val != val_shape[shape_ind]:
ret_bool = False
break
if not ret_bool:
expected_shape = str(supported_keys[key]['shape'])
expected_shape = expected_shape.replace('-1', 'Any')
err_msg = 'Shape check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val.shape={val_shape}\n'
err_msg += f'expected shape={expected_shape}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def data_len(self) -> int:
"""Get the temporal length of this HumanData instance.
Returns:
int:
Number of frames related to this instance.
"""
return self.__data_len__
def data_len(self, value: int):
"""Set the temporal length of this HumanData instance.
Args:
value (int):
Number of frames related to this instance.
"""
self.__data_len__ = value
def __check_value_len__(self, key: Any, val: Any) -> bool:
"""Check whether the temporal length of val matches other values.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If temporal dim is defined and temporal length doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check temporal length
if 'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is not None:
val_slice_dim = supported_keys[key]['dim']
if supported_keys[key]['type'] == dict:
slice_key = supported_keys[key]['slice_key']
val_data_len = val[slice_key].shape[val_slice_dim]
else:
val_data_len = val.shape[val_slice_dim]
if self.data_len < 0:
# no data_len yet, assign a new one
self.data_len = val_data_len
else:
# check if val_data_len matches recorded data_len
if self.data_len != val_data_len:
ret_bool = False
if not ret_bool:
err_msg = 'Temporal check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val\'s data_len={val_data_len}\n'
err_msg += f'expected data_len={self.data_len}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def generate_mask_from_confidence(self, keys=None) -> None:
"""Generate mask from keypoints' confidence. Keypoints that have zero
confidence in all occurrences will have a zero mask. Note that the last
value of the keypoint is assumed to be confidence.
Args:
keys: None, str, or list of str.
None: all keys with `keypoint` in it will have mask
generated from their confidence.
str: key of the keypoint, the mask has name f'{key}_name'
list of str: a list of keys of the keypoints.
Generate mask for multiple keypoints.
Defaults to None.
Returns:
None
Raises:
KeyError:
A key is not not found
"""
if keys is None:
keys = []
for key in self.keys():
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
keys.append(key)
elif isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
for key in keys:
assert isinstance(key, str)
else:
raise TypeError(f'`Keys` must be None, str, or list of str, '
f'got {type(keys)}.')
update_dict = {}
for kpt_key in keys:
kpt_array = self.get_raw_value(kpt_key)
num_joints = kpt_array.shape[-2]
# if all conf of a joint are zero, this joint is masked
joint_conf = kpt_array[..., -1].reshape(-1, num_joints)
mask_array = (joint_conf > 0).astype(np.uint8).max(axis=0)
assert len(mask_array) == num_joints
# generate mask
update_dict[f'{kpt_key}_mask'] = mask_array
self.update(update_dict)
def compress_keypoints_by_mask(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be removed and f'{key}_mask' will be locked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is False
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
msg = f'Mask for {key} has not been set.' +\
f'Please set {mask_key} before compression.'
raise KeyError(msg)
compressed_dict = {}
for kpt_key, mask_key in key_pairs:
kpt_array = self.get_raw_value(kpt_key)
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = \
self.__class__.__remove_zero_pad__(kpt_array, mask_array)
compressed_dict[kpt_key] = compressed_kpt
# set value after all pairs are compressed
self.update(compressed_dict)
self.__keypoints_compressed__ = True
def decompress_keypoints(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be inserted to the right places and f'{key}_mask'
will be unlocked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is True
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
class_logger = self.__class__.logger
msg = f'Mask for {key} has not been found.' +\
f'Please remove {key} before decompression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise KeyError
decompressed_dict = {}
for kpt_key, mask_key in key_pairs:
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = self.get_raw_value(kpt_key)
kpt_array = \
self.__class__.__add_zero_pad__(compressed_kpt, mask_array)
decompressed_dict[kpt_key] = kpt_array
# set value after all pairs are decompressed
self.update(decompressed_dict)
self.__keypoints_compressed__ = False
def dump_by_pickle(self, pkl_path: str, overwrite: bool = True) -> None:
"""Dump keys and items to a pickle file. It's a secondary dump method,
when a HumanData instance is too large to be dumped by self.dump()
Args:
pkl_path (str):
Path to a dumped pickle file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.pkl'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(pkl_path, ['.pkl']):
raise ValueError('Not an pkl file.')
if not overwrite:
if check_path_existence(pkl_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
with open(pkl_path, 'wb') as f_writeb:
pickle.dump(
dict_to_dump, f_writeb, protocol=pickle.HIGHEST_PROTOCOL)
def load_by_pickle(self, pkl_path: str) -> None:
"""Load data from pkl_path and update them to self.
When a HumanData Instance was dumped by
self.dump_by_pickle(), use this to load.
Args:
npz_path (str):
Path to a dumped npz file.
"""
with open(pkl_path, 'rb') as f_readb:
tmp_data_dict = pickle.load(f_readb)
for key, value in list(tmp_data_dict.items()):
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def __set_default_values__(self) -> None:
"""For older versions of HumanData, call this method to apply missing
values (also attributes)."""
supported_keys = self.__class__.SUPPORTED_KEYS
if self.__data_len__ == -1:
for key in supported_keys:
if key in self and \
'dim' in supported_keys[key] and\
supported_keys[key]['dim'] is not None:
if 'slice_key' in supported_keys[key] and\
supported_keys[key]['type'] == dict:
sub_key = supported_keys[key]['slice_key']
slice_dim = supported_keys[key]['dim']
self.__data_len__ = \
self[key][sub_key].shape[slice_dim]
else:
slice_dim = supported_keys[key]['dim']
self.__data_len__ = self[key].shape[slice_dim]
break
for key in list(self.keys()):
convention_key = f'{key}_convention'
if key.startswith('keypoints') and \
not key.endswith('_mask') and \
not key.endswith('_convention') and \
convention_key not in self:
self[convention_key] = 'human_data'
def concatenate(cls, human_data_0: _HumanData,
human_data_1: _HumanData) -> _HumanData:
"""Concatenate two human_data. All keys will be kept it the returned
human_data. If either value from human_data_0 or human_data_1 matches
data_len from its HumanData, the two values will be concatenated as a
single value. If not, postfix will be added to the key to specify
source of the value.
Args:
human_data_0 (_HumanData)
human_data_1 (_HumanData)
Returns:
_HumanData:
A new human_data instance with all concatenated data.
"""
ret_human_data = cls.new(key_strict=False)
set_0 = set(human_data_0.keys())
set_1 = set(human_data_1.keys())
common_keys = set_0.intersection(set_1)
dim_dict_0 = human_data_0.__get_slice_dim__()
dim_dict_1 = human_data_1.__get_slice_dim__()
for key in common_keys:
value_0 = human_data_0[key]
value_1 = human_data_1[key]
# align type
value_0 = list(value_0) if isinstance(value_0, tuple)\
else value_0
value_1 = list(value_1) if isinstance(value_1, tuple)\
else value_1
assert type(value_0) == type(value_1)
# align convention
if key.startswith('keypoints') and\
key.endswith('_convention'):
assert value_0 == value_1
ret_human_data[key] = value_0
continue
# mask_0 and mask_1
elif key.startswith('keypoints') and\
key.endswith('_mask'):
new_mask = value_0 * value_1
ret_human_data[key] = new_mask
continue
# go through the sub dict
if isinstance(value_0, dict):
sub_dict = {}
for sub_key, sub_value_0 in value_0.items():
# only found in value_0
if sub_key not in value_1:
sub_dict[sub_key] = sub_value_0
# found in both values
else:
sub_value_1 = value_1[sub_key]
concat_sub_dict = cls.__concat_value__(
key=sub_key,
value_0=sub_value_0,
dim_0=dim_dict_0[key][sub_key],
value_1=sub_value_1,
dim_1=dim_dict_1[key][sub_key])
sub_dict.update(concat_sub_dict)
for sub_key, sub_value_1 in value_1.items():
if sub_key not in value_0:
sub_dict[sub_key] = sub_value_1
ret_human_data[key] = sub_dict
# try concat
else:
concat_dict = cls.__concat_value__(
key=key,
value_0=value_0,
dim_0=dim_dict_0[key],
value_1=value_1,
dim_1=dim_dict_1[key])
ret_human_data.update(concat_dict)
# check exclusive keys
for key, value in human_data_0.items():
if key not in common_keys:
# value not for concat and slice
if dim_dict_0[key] is None:
ret_human_data[key] = value
# value aligned with data_len of HumanData_0
else:
ret_human_data[f'{key}_0'] = value
for key, value in human_data_1.items():
if key not in common_keys:
# same as above
if dim_dict_1[key] is None:
ret_human_data[key] = value
else:
ret_human_data[f'{key}_1'] = value
return ret_human_data
def __concat_value__(cls, key: Any, value_0: Any, value_1: Any,
dim_0: Union[None, int], dim_1: Union[None,
int]) -> dict:
"""Concat two values from two different HumanData.
Args:
key (Any):
The common key of the two values.
value_0 (Any):
Value from 0.
value_1 (Any):
Value from 1.
dim_0 (Union[None, int]):
The dim for concat and slice. None for N/A.
dim_1 (Union[None, int]):
The dim for concat and slice. None for N/A.
Returns:
dict:
Dict for concatenated result.
"""
ret_dict = {}
if dim_0 is None or dim_1 is None:
ret_dict[f'{key}_0'] = value_0
ret_dict[f'{key}_1'] = value_1
elif isinstance(value_0, list):
ret_dict[key] = value_0 + value_1
# elif isinstance(value_0, np.ndarray):
else:
ret_dict[key] = np.concatenate((value_0, value_1), axis=dim_0)
return ret_dict
def __add_zero_pad__(cls, compressed_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Pad zeros to a compressed keypoints array.
Args:
compressed_array (np.ndarray):
A compressed keypoints array.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A keypoints array in full-size.
"""
assert mask_array.sum() == compressed_array.shape[1]
data_len, _, dim = compressed_array.shape
mask_len = mask_array.shape[0]
ret_value = np.zeros(
shape=[data_len, mask_len, dim], dtype=compressed_array.dtype)
valid_mask_index = np.where(mask_array == 1)[0]
ret_value[:, valid_mask_index, :] = compressed_array
return ret_value
def __remove_zero_pad__(cls, zero_pad_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Remove zero-padding from a full-size keypoints array.
Args:
zero_pad_array (np.ndarray):
A keypoints array in full-size.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A compressed keypoints array.
"""
assert mask_array.shape[0] == zero_pad_array.shape[1]
valid_mask_index = np.where(mask_array == 1)[0]
ret_value = np.take(zero_pad_array, valid_mask_index, axis=1)
return ret_value
def __get_key_warn_msg__(cls, key: Any) -> str:
"""Get the warning message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The warning message.
"""
class_name = cls.__name__
warn_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Ignore this if you know exactly' +\
' what you are doing.\n' +\
'Otherwise, Call self.set_key_strict(True)' +\
' to avoid wrong keys.\n'
return warn_message + suggestion_message
def __get_key_error_msg__(cls, key: Any) -> str:
"""Get the error message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The error message.
"""
class_name = cls.__name__
absent_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Call self.set_key_strict(False)' +\
' to allow unsupported keys.\n'
return absent_message + suggestion_message
def __get_value_error_msg__(cls) -> str:
"""Get the error message when a value fails the check.
Returns:
str:
The error message.
"""
error_message = \
'An supported value doesn\'t ' +\
'match definition.\n'
suggestion_message = \
'See error log for details.\n'
return error_message + suggestion_message
def __get_sliced_result__(
cls, input_data: Union[np.ndarray, list, tuple], slice_dim: int,
slice_range: slice) -> Union[np.ndarray, list, tuple]:
"""Slice input_data along slice_dim with slice_range.
Args:
input_data (Union[np.ndarray, list, tuple]):
Data to be sliced.
slice_dim (int):
Dimension to be sliced.
slice_range (slice):
An instance of class slice.
Returns:
Union[np.ndarray, list, tuple]:
A slice of input_data.
"""
if isinstance(input_data, np.ndarray):
slice_list = [
slice(None),
] * len(input_data.shape)
slice_list[slice_dim] = slice_range
sliced_data = input_data[tuple(slice_list)]
else:
sliced_data = \
input_data[slice_range]
return sliced_data
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol','smoothnet',
'smoothnet_windowsize8','smoothnet_windowsize16',
'smoothnet_windowsize32','smoothnet_windowsize64'].
Defaults to 'savgol'. 'smoothnet' is default with windowsize=8.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if smooth_type == 'smoothnet':
smooth_type = 'smoothnet_windowsize8'
assert smooth_type in [
'oneeuro', 'gaus1d', 'savgol', 'smoothnet_windowsize8',
'smoothnet_windowsize16', 'smoothnet_windowsize32',
'smoothnet_windowsize64'
]
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def speed_up_process(x,
speed_up_type='deciwatch',
cfg_base_dir='configs/_base_/post_processing/'):
"""Speed up the process with the specified speed up type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir.
Defaults to 'configs/_base_/post_processing/'
Raises:
ValueError: check the input speed up type.
Returns:
np.ndarray: Completed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.clone()
assert x.ndim == 4 or x.ndim == 5
cfg_dict = cfg['speed_up_cfg']
cfg_dict['device'] = x.device
speed_up_func = build_post_processing(cfg_dict)
if x.ndim == 5:
for i in range(x.shape[1]):
x[:, i] = speed_up_func(x[:, i])
elif x.ndim == 4:
x = speed_up_func(x)
return np.array(x.cpu())
def get_speed_up_interval(speed_up_type,
cfg_base_dir='configs/_base_/post_processing/'):
"""Get the interval of specific speed up type.
Args:
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input speed up type.
Returns:
int: speed up interval
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
return cfg['speed_up_cfg']['interval']
def speed_up_interpolate(selected_frames, speed_up_frames, smpl_poses,
smpl_betas, pred_cams, bboxes_xyxy):
"""Interpolate smpl_betas, pred_cams, and bboxes_xyxyx for speed up.
Args:
selected_frames (np.ndarray): Shape should be (selected frame number).
speed_up_frames (int): Total speed up frame number
smpl_poses (np.ndarray): selected frame smpl poses parameter
smpl_betas (np.ndarray): selected frame smpl shape paeameter
pred_cams (np.ndarray): selected frame camera parameter
bboxes_xyxy (np.ndarray): selected frame bbox
Returns:
smpl_poses (np.ndarray): interpolated frame smpl poses parameter
smpl_betas (np.ndarray): interpolated frame smpl shape paeameter
pred_cams (np.ndarray): interpolated frame camera parameter
bboxes_xyxy (np.ndarray): interpolated frame bbox
"""
selected_frames = selected_frames[selected_frames <= speed_up_frames]
pred_cams[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, pred_cams[selected_frames, :], kind='linear', axis=0)(
np.arange(0, max(selected_frames)))
bboxes_xyxy[:speed_up_frames, :] = interpolate.interp1d(
selected_frames,
bboxes_xyxy[selected_frames, :],
kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
smpl_betas[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, smpl_betas[selected_frames, :], kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
return smpl_poses, smpl_betas, pred_cams, bboxes_xyxy
def extract_feature_sequence(extracted_results,
frame_idx,
causal,
seq_len,
step=1):
"""Extract the target frame from person results, and pad the sequence to a
fixed length.
Args:
extracted_results (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the feature information of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the first frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[Dict]]: Multi-frame feature extraction results stored in a
nested list with a length of seq_len.
int: The target frame index in the padded sequence.
"""
if causal:
frames_left = 0
frames_right = seq_len - 1
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(extracted_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
extracted_results_seq = [extracted_results[0]] * pad_left + \
extracted_results[start:end:step] + [extracted_results[-1]] * pad_right
return extracted_results_seq
def array_to_images(
image_array: np.ndarray,
output_folder: str,
img_format: str = '%06d.png',
resolution: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None,
disable_log: bool = False,
) -> None:
"""Convert an array to images directly.
Args:
image_array (np.ndarray): shape should be (f * h * w * 3).
output_folder (str): output folder for the images.
img_format (str, optional): format of the images.
Defaults to '%06d.png'.
resolution (Optional[Union[Tuple[int, int], Tuple[float, float]]],
optional): resolution(height, width) of output.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check output folder.
TypeError: check input array.
Returns:
None
"""
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
if not isinstance(image_array, np.ndarray):
raise TypeError('Input should be np.ndarray.')
assert image_array.ndim == 4
assert image_array.shape[-1] == 3
if resolution:
height, width = resolution
else:
height, width = image_array.shape[1], image_array.shape[2]
command = [
'ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f',
'rawvideo',
'-s',
f'{int(width)}x{int(height)}', # size of one frame
'-pix_fmt',
'bgr24', # bgr24 for matching OpenCV
'-loglevel',
'error',
'-threads',
'4',
'-i',
'-', # The input comes from a pipe
'-f',
'image2',
'-start_number',
'0',
os.path.join(output_folder, img_format),
]
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=10**8,
close_fds=True)
if process.stdin is None or process.stderr is None:
raise BrokenPipeError('No buffer received.')
index = 0
while True:
if index >= image_array.shape[0]:
break
process.stdin.write(image_array[index].tobytes())
index += 1
process.stdin.close()
process.stderr.close()
process.wait()
The provided code snippet includes necessary dependencies for implementing the `single_person_with_mmdet` function. Write a Python function `def single_person_with_mmdet(args, frames_iter)` to solve the following problem:
Estimate smpl parameters from single-person images with mmdetection Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames
Here is the function:
def single_person_with_mmdet(args, frames_iter):
"""Estimate smpl parameters from single-person
images with mmdetection
Args:
args (object): object of argparse.Namespace.
frames_iter (np.ndarray,): prepared frames
"""
mesh_model, extractor = init_model(
args.mesh_reg_config,
args.mesh_reg_checkpoint,
device=args.device.lower())
pred_cams, verts, smpl_poses, smpl_betas, bboxes_xyxy = \
[], [], [], [], []
frame_id_list, result_list = \
get_detection_result(args, frames_iter, mesh_model, extractor)
frame_num = len(frame_id_list)
# speed up
if args.speed_up_type:
speed_up_interval = get_speed_up_interval(args.speed_up_type)
speed_up_frames = (frame_num -
1) // speed_up_interval * speed_up_interval
for i, result in enumerate(mmcv.track_iter_progress(result_list)):
frame_id = frame_id_list[i]
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator':
if args.speed_up_type:
warnings.warn(
'Video based models do not support speed up. '
'By default we will inference with original speed.',
UserWarning)
feature_results_seq = extract_feature_sequence(
result_list, frame_idx=i, causal=True, seq_len=16, step=1)
mesh_results = inference_video_based_model(
mesh_model,
extracted_results=feature_results_seq,
with_track_id=False)
elif mesh_model.cfg.model.type == 'ImageBodyModelEstimator':
if args.speed_up_type and i % speed_up_interval != 0 \
and i <= speed_up_frames:
mesh_results = [{
'bbox': np.zeros((5)),
'camera': np.zeros((3)),
'smpl_pose': np.zeros((24, 3, 3)),
'smpl_beta': np.zeros((10)),
'vertices': np.zeros((6890, 3)),
'keypoints_3d': np.zeros((17, 3)),
}]
else:
mesh_results = inference_image_based_model(
mesh_model,
frames_iter[frame_id],
result,
bbox_thr=args.bbox_thr,
format='xyxy')
else:
raise Exception(
f'{mesh_model.cfg.model.type} is not supported yet')
smpl_betas.append(mesh_results[0]['smpl_beta'])
smpl_pose = mesh_results[0]['smpl_pose']
smpl_poses.append(smpl_pose)
pred_cams.append(mesh_results[0]['camera'])
verts.append(mesh_results[0]['vertices'])
bboxes_xyxy.append(mesh_results[0]['bbox'])
smpl_poses = np.array(smpl_poses)
smpl_betas = np.array(smpl_betas)
pred_cams = np.array(pred_cams)
verts = np.array(verts)
bboxes_xyxy = np.array(bboxes_xyxy)
# release GPU memory
del mesh_model
del extractor
torch.cuda.empty_cache()
# speed up
if args.speed_up_type:
smpl_poses = speed_up_process(
torch.tensor(smpl_poses).to(args.device.lower()),
args.speed_up_type)
selected_frames = np.arange(0, len(frames_iter), speed_up_interval)
smpl_poses, smpl_betas, pred_cams, bboxes_xyxy = speed_up_interpolate(
selected_frames, speed_up_frames, smpl_poses, smpl_betas,
pred_cams, bboxes_xyxy)
# smooth
if args.smooth_type is not None:
smpl_poses = smooth_process(
smpl_poses.reshape(frame_num, 24, 9),
smooth_type=args.smooth_type).reshape(frame_num, 24, 3, 3)
verts = smooth_process(verts, smooth_type=args.smooth_type)
pred_cams = smooth_process(
pred_cams[:, np.newaxis],
smooth_type=args.smooth_type).reshape(frame_num, 3)
if args.output is not None:
body_pose_, global_orient_, smpl_betas_, verts_, pred_cams_, \
bboxes_xyxy_, image_path_, person_id_, frame_id_ = \
[], [], [], [], [], [], [], [], []
human_data = HumanData()
frames_folder = osp.join(args.output, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list], output_folder=frames_folder)
for i, img_i in enumerate(sorted(os.listdir(frames_folder))):
body_pose_.append(smpl_poses[i][1:])
global_orient_.append(smpl_poses[i][:1])
smpl_betas_.append(smpl_betas[i])
verts_.append(verts[i])
pred_cams_.append(pred_cams[i])
bboxes_xyxy_.append(bboxes_xyxy[i])
image_path_.append(os.path.join('images', img_i))
person_id_.append(0)
frame_id_.append(frame_id_list[i])
smpl = {}
smpl['body_pose'] = np.array(body_pose_).reshape((-1, 23, 3))
smpl['global_orient'] = np.array(global_orient_).reshape((-1, 3))
smpl['betas'] = np.array(smpl_betas_).reshape((-1, 10))
human_data['smpl'] = smpl
human_data['verts'] = verts_
human_data['pred_cams'] = pred_cams_
human_data['bboxes_xyxy'] = bboxes_xyxy_
human_data['image_path'] = image_path_
human_data['person_id'] = person_id_
human_data['frame_id'] = frame_id_
human_data.dump(osp.join(args.output, 'inference_result.npz'))
if args.show_path is not None:
if args.output is not None:
frames_folder = os.path.join(args.output, 'images')
else:
frames_folder = osp.join(Path(args.show_path).parent, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list],
output_folder=frames_folder)
body_model_config = dict(model_path='data/body_models', type='star')
visualize_smpl_hmr(
poses=smpl_poses,
betas=smpl_betas,
cam_transl=pred_cams,
bbox=bboxes_xyxy,
output_path=args.show_path,
render_choice=args.render_choice,
resolution=frames_iter[0].shape[:2],
origin_frames=frames_folder,
body_model_config=body_model_config,
overwrite=True,
palette=args.palette,
read_frames_batch=True)
if args.output is None:
shutil.rmtree(frames_folder) | Estimate smpl parameters from single-person images with mmdetection Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames |
14,262 | import os
import os.path as osp
import shutil
import warnings
from argparse import ArgumentParser
from pathlib import Path
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import (
feature_extract,
inference_image_based_model,
inference_video_based_model,
init_model,
)
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_hmr
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.utils.demo_utils import (
extract_feature_sequence,
get_speed_up_interval,
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
speed_up_interpolate,
speed_up_process,
)
from mmhuman3d.utils.ffmpeg_utils import array_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def get_detection_result(args, frames_iter, mesh_model, extractor):
person_det_model = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
frame_id_list = []
result_list = []
for i, frame in enumerate(mmcv.track_iter_progress(frames_iter)):
mmdet_results = inference_detector(person_det_model, frame)
# keep the person class bounding boxes.
results = process_mmdet_results(
mmdet_results, cat_id=args.det_cat_id, bbox_thr=args.bbox_thr)
# extract features from the input video or image sequences
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator' \
and extractor is not None:
results = feature_extract(
extractor, frame, results, args.bbox_thr, format='xyxy')
# drop the frame with no detected results
if results == []:
continue
# vis bboxes
if args.draw_bbox:
bboxes = [res['bbox'] for res in results]
bboxes = np.vstack(bboxes)
mmcv.imshow_bboxes(
frame, bboxes, top_k=-1, thickness=2, show=False)
frame_id_list.append(i)
result_list.append(results)
return frame_id_list, result_list
def visualize_smpl_hmr(cam_transl,
bbox=None,
kp2d=None,
focal_length=5000,
det_width=224,
det_height=224,
bbox_format='xyxy',
**kwargs) -> None:
"""Simplest way to visualize HMR or SPIN or Smplify pred smpl with origin
frames and predicted cameras."""
if kp2d is not None:
bbox = convert_kp2d_to_bbox(kp2d, bbox_format=bbox_format)
Ks = convert_bbox_to_intrinsic(bbox, bbox_format=bbox_format)
K = torch.Tensor(
get_default_hmr_intrinsic(
focal_length=focal_length,
det_height=det_height,
det_width=det_width))
func = partial(
render_smpl,
projection='perspective',
convention='opencv',
in_ndc=False,
K=None,
R=None,
orig_cam=None,
)
if isinstance(cam_transl, np.ndarray):
cam_transl = torch.Tensor(cam_transl)
T = torch.cat([
cam_transl[..., [1]], cam_transl[..., [2]], 2 * focal_length /
(det_width * cam_transl[..., [0]] + 1e-9)
], -1)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(Ks=Ks, K=K, T=T, **kwargs)
class HumanData(dict):
logger = None
SUPPORTED_KEYS = _HumanData_SUPPORTED_KEYS
WARNED_KEYS = []
def __new__(cls: _HumanData, *args: Any, **kwargs: Any) -> _HumanData:
"""New an instance of HumanData.
Args:
cls (HumanData): HumanData class.
Returns:
HumanData: An instance of HumanData.
"""
ret_human_data = super().__new__(cls, args, kwargs)
setattr(ret_human_data, '__data_len__', -1)
setattr(ret_human_data, '__key_strict__', False)
setattr(ret_human_data, '__keypoints_compressed__', False)
return ret_human_data
def set_logger(cls, logger: Union[logging.Logger, str, None] = None):
"""Set logger of HumanData class.
Args:
logger (logging.Logger | str | None, optional):
The way to print summary.
See `mmcv.utils.print_log()` for details.
Defaults to None.
"""
cls.logger = logger
def fromfile(cls, npz_path: str) -> _HumanData:
"""Construct a HumanData instance from an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
Returns:
HumanData:
A HumanData instance load from file.
"""
ret_human_data = cls()
ret_human_data.load(npz_path)
return ret_human_data
def new(cls,
source_dict: dict = None,
key_strict: bool = False) -> _HumanData:
"""Construct a HumanData instance from a dict.
Args:
source_dict (dict, optional):
A dict with items in HumanData fashion.
Defaults to None.
key_strict (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to False.
Returns:
HumanData:
A HumanData instance.
"""
if source_dict is None:
ret_human_data = cls()
else:
ret_human_data = cls(source_dict)
ret_human_data.set_key_strict(key_strict)
return ret_human_data
def get_key_strict(self) -> bool:
"""Get value of attribute key_strict.
Returns:
bool:
Whether to raise error when setting unsupported keys.
"""
return self.__key_strict__
def set_key_strict(self, value: bool):
"""Set value of attribute key_strict.
Args:
value (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to True.
"""
former__key_strict__ = self.__key_strict__
self.__key_strict__ = value
if former__key_strict__ is False and \
value is True:
self.pop_unsupported_items()
def check_keypoints_compressed(self) -> bool:
"""Check whether the keypoints are compressed.
Returns:
bool:
Whether the keypoints are compressed.
"""
return self.__keypoints_compressed__
def load(self, npz_path: str):
"""Load data from npz_path and update them to self.
Args:
npz_path (str):
Path to a dumped npz file.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
with np.load(npz_path, allow_pickle=True) as npz_file:
tmp_data_dict = dict(npz_file)
for key, value in list(tmp_data_dict.items()):
if isinstance(value, np.ndarray) and\
len(value.shape) == 0:
# value is not an ndarray before dump
value = value.item()
elif key in supported_keys and\
type(value) != supported_keys[key]['type']:
value = supported_keys[key]['type'](value)
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def dump(self, npz_path: str, overwrite: bool = True):
"""Dump keys and items to an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.npz'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(npz_path, ['.npz']):
raise ValueError('Not an npz file.')
if not overwrite:
if check_path_existence(npz_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
np.savez_compressed(npz_path, **dict_to_dump)
def get_sliced_cache(self, slice_size=10) -> List:
"""Slice the whole HumanData into pieces for HumanDataCacheWriter.
Args:
slice_size (int, optional):
The length of each unit in HumanData cache.
Defaults to 10.
Returns:
List:
Two dicts for HumanDataCacheWriter.
Init HumanDataCacheWriter by HumanDataCacheWriter(**Returns[0])
and set data by
human_data_cache_writer.update_sliced_dict(Returns[1]).
"""
keypoints_info = {}
non_sliced_data = {}
sliced_data = {}
slice_num = ceil(self.__data_len__ / slice_size)
for slice_index in range(slice_num):
sliced_data[str(slice_index)] = {}
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# no dim to slice
if dim is None:
if key.startswith('keypoints') and\
(key.endswith('_mask') or
key.endswith('_convention')):
keypoints_info[key] = self[key]
else:
non_sliced_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
non_sliced_sub_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
non_sliced_sub_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_range = slice(slice_start, slice_end)
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_range
)
if key not in sliced_data[str(slice_index)]:
sliced_data[str(slice_index)][key] = {}
sliced_data[str(slice_index)][key][sub_key] = \
sliced_sub_value
if len(non_sliced_sub_dict) > 0:
non_sliced_data[key] = non_sliced_sub_dict
else:
value = self.get_raw_value(key)
# slice as ndarray
if isinstance(value, np.ndarray):
slice_list = [
slice(None),
] * len(value.shape)
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_list[dim] = slice(slice_start, slice_end)
sliced_value = value[tuple(slice_list)]
sliced_data[str(slice_index)][key] = sliced_value
# slice as list/tuple
else:
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
sliced_value = value[slice(slice_start, slice_end)]
sliced_data[str(slice_index)][key] = sliced_value
writer_args_dict = {
'slice_size': slice_size,
'keypoints_info': keypoints_info,
'data_len': self.data_len,
'non_sliced_data': non_sliced_data,
'key_strict': self.get_key_strict()
}
return writer_args_dict, sliced_data
def to(self,
device: Optional[Union[torch.device, str]] = _CPU_DEVICE,
dtype: Optional[torch.dtype] = None,
non_blocking: Optional[bool] = False,
copy: Optional[bool] = False,
memory_format: Optional[torch.memory_format] = None) -> dict:
"""Convert values in numpy.ndarray type to torch.Tensor, and move
Tensors to the target device. All keys will exist in the returned dict.
Args:
device (Union[torch.device, str], optional):
A specified device. Defaults to CPU_DEVICE.
dtype (torch.dtype, optional):
The data type of the expected torch.Tensor.
If dtype is None, it is decided according to numpy.ndarry.
Defaults to None.
non_blocking (bool, optional):
When non_blocking, tries to convert asynchronously with
respect to the host if possible, e.g.,
converting a CPU Tensor with pinned memory to a CUDA Tensor.
Defaults to False.
copy (bool, optional):
When copy is set, a new Tensor is created even when
the Tensor already matches the desired conversion.
No matter what value copy is, Tensor constructed from numpy
will not share the same memory with the source numpy.ndarray.
Defaults to False.
memory_format (torch.memory_format, optional):
The desired memory format of returned Tensor.
Not supported by pytorch-cpu.
Defaults to None.
Returns:
dict:
A dict with all numpy.ndarray values converted into
torch.Tensor and all Tensors moved to the target device.
"""
ret_dict = {}
for key in self.keys():
raw_value = self.get_raw_value(key)
tensor_value = None
if isinstance(raw_value, np.ndarray):
tensor_value = torch.from_numpy(raw_value).clone()
elif isinstance(raw_value, torch.Tensor):
tensor_value = raw_value
if tensor_value is None:
ret_dict[key] = raw_value
else:
if memory_format is None:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy)
else:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy,
memory_format=memory_format)
return ret_dict
def __getitem__(self, key: _KT) -> _VT:
"""Get value defined by HumanData. This function will be called by
self[key]. In keypoints_compressed mode, if the key contains
'keypoints', an array with zero-padding at absent keypoint will be
returned. Call self.get_raw_value(k) to get value without padding.
Args:
key (_KT):
Key in HumanData.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
if self.__keypoints_compressed__:
mask_key = f'{key}_mask'
if key in self and \
isinstance(value, np.ndarray) and \
'keypoints' in key and \
mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
value = \
self.__class__.__add_zero_pad__(value, mask_array)
return value
def get_raw_value(self, key: _KT) -> _VT:
"""Get raw value from the dict. It acts the same as
dict.__getitem__(k).
Args:
key (_KT):
Key in dict.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
return value
def get_value_in_shape(self,
key: _KT,
shape: Union[list, tuple],
padding_constant: int = 0) -> np.ndarray:
"""Get value in a specific shape. For each dim, if the required shape
is smaller than current shape, ndarray will be sliced. Otherwise, it
will be padded with padding_constant at the end.
Args:
key (_KT):
Key in dict. The value of this key must be
an instance of numpy.ndarray.
shape (Union[list, tuple]):
Shape of the returned array. Its length
must be equal to value.ndim. Set -1 for
a dimension if you do not want to edit it.
padding_constant (int, optional):
The value to set the padded values for each axis.
Defaults to 0.
Raises:
ValueError:
A value in shape is neither positive integer nor -1.
Returns:
np.ndarray:
An array in required shape.
"""
value = self.get_raw_value(key)
assert isinstance(value, np.ndarray)
assert value.ndim == len(shape)
pad_width_list = []
slice_list = []
for dim_index in range(len(shape)):
if shape[dim_index] == -1:
# no pad or slice
pad_width_list.append((0, 0))
slice_list.append(slice(None))
elif shape[dim_index] > 0:
# valid shape value
wid = shape[dim_index] - value.shape[dim_index]
if wid > 0:
pad_width_list.append((0, wid))
else:
pad_width_list.append((0, 0))
slice_list.append(slice(0, shape[dim_index]))
else:
# invalid
raise ValueError
pad_value = np.pad(
value,
pad_width=pad_width_list,
mode='constant',
constant_values=padding_constant)
return pad_value[tuple(slice_list)]
def get_slice(self, stop: int):
"""Slice [0, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int):
"""Slice [start, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int, step: int):
"""Slice [start, stop, step] of all sliceable values."""
...
def get_slice(self,
arg_0: int,
arg_1: Union[int, Any] = None,
step: int = 1) -> _HumanData:
"""Slice all sliceable values along major_dim dimension.
Args:
arg_0 (int):
When arg_1 is None, arg_0 is stop and start=0.
When arg_1 is not None, arg_0 is start.
arg_1 (Union[int, Any], optional):
None or where to stop.
Defaults to None.
step (int, optional):
Length of step. Defaults to 1.
Returns:
HumanData:
A new HumanData instance with sliced values.
"""
ret_human_data = \
HumanData.new(key_strict=self.get_key_strict())
if arg_1 is None:
start = 0
stop = arg_0
else:
start = arg_0
stop = arg_1
slice_index = slice(start, stop, step)
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# keys not expected be sliced
if dim is None:
ret_human_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
sliced_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
sliced_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_index)
sliced_dict[sub_key] = sliced_sub_value
ret_human_data[key] = sliced_dict
else:
value = self[key]
sliced_value = \
HumanData.__get_sliced_result__(
value, dim, slice_index)
ret_human_data[key] = sliced_value
# check keypoints compressed
if self.check_keypoints_compressed():
ret_human_data.compress_keypoints_by_mask()
return ret_human_data
def __get_slice_dim__(self) -> dict:
"""For each key in this HumanData, get the dimension for slicing. 0 for
default, if no other value specified.
Returns:
dict:
Keys are self.keys().
Values indicate where to slice.
None for not expected to be sliced or
failed.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
ret_dict = {}
for key in self.keys():
# keys not expected be sliced
if key in supported_keys and \
'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is None:
ret_dict[key] = None
else:
value = self[key]
if isinstance(value, dict) and len(value) > 0:
ret_dict[key] = {}
for sub_key in value.keys():
try:
sub_value_len = len(value[sub_key])
if 'dim' in value:
ret_dict[key][sub_key] = value['dim']
elif sub_value_len != self.__data_len__:
ret_dict[key][sub_key] = None
else:
ret_dict[key][sub_key] = 0
except TypeError:
ret_dict[key][sub_key] = None
continue
# instance cannot be sliced without len method
try:
value_len = len(value)
except TypeError:
ret_dict[key] = None
continue
# slice on dim 0 by default
slice_dim = 0
if key in supported_keys and \
'dim' in supported_keys[key]:
slice_dim = \
supported_keys[key]['dim']
data_len = value_len if slice_dim == 0 \
else value.shape[slice_dim]
# dim not for slice
if data_len != self.__data_len__:
ret_dict[key] = None
continue
else:
ret_dict[key] = slice_dim
return ret_dict
def __setitem__(self, key: _KT, val: _VT) -> None:
"""Set self[key] to value. Only be called when using
human_data[key] = val. Methods like update won't call __setitem__.
In keypoints_compressed mode, if the key contains 'keypoints',
and f'{key}_mask' is in self.keys(), invalid zeros
will be removed before setting value.
Args:
key (_KT):
Key in HumanData.
Better be an element in HumanData.SUPPORTED_KEYS.
If not, an Error will be raised in key_strict mode.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
ValueError:
self.check_keypoints_compressed() is True and
mask of a keypoint item is missing.
"""
self.__check_key__(key)
self.__check_value__(key, val)
# if it can be compressed by mask
if self.__keypoints_compressed__:
class_logger = self.__class__.logger
if 'keypoints' in key and \
'_mask' in key:
msg = 'Mask cannot be modified ' +\
'in keypoints_compressed mode.'
print_log(msg=msg, logger=class_logger, level=logging.WARN)
return
elif isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
mask_key = f'{key}_mask'
if mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
val = \
self.__class__.__remove_zero_pad__(val, mask_array)
else:
msg = f'Mask for {key} has not been set.' +\
f' Please set {mask_key} before compression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise ValueError
dict.__setitem__(self, key, val)
def set_raw_value(self, key: _KT, val: _VT) -> None:
"""Set the raw value of self[key] to val after key check. It acts the
same as dict.__setitem__(self, key, val) if the key satisfied
constraints.
Args:
key (_KT):
Key in dict.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
"""
self.__check_key__(key)
self.__check_value__(key, val)
dict.__setitem__(self, key, val)
def pop_unsupported_items(self) -> None:
"""Find every item with a key not in HumanData.SUPPORTED_KEYS, and pop
it to save memory."""
for key in list(self.keys()):
if key not in self.__class__.SUPPORTED_KEYS:
self.pop(key)
def __check_key__(self, key: Any) -> _KeyCheck:
"""Check whether the key matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
Returns:
_KeyCheck:
PASS, WARN or ERROR.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
"""
ret_key_check = _KeyCheck.PASS
if self.get_key_strict():
if key not in self.__class__.SUPPORTED_KEYS:
ret_key_check = _KeyCheck.ERROR
else:
if key not in self.__class__.SUPPORTED_KEYS and \
key not in self.__class__.WARNED_KEYS:
# log warning message at the first time
ret_key_check = _KeyCheck.WARN
self.__class__.WARNED_KEYS.append(key)
if ret_key_check == _KeyCheck.ERROR:
raise KeyError(self.__class__.__get_key_error_msg__(key))
elif ret_key_check == _KeyCheck.WARN:
class_logger = self.__class__.logger
if class_logger == 'silent':
pass
else:
print_log(
msg=self.__class__.__get_key_warn_msg__(key),
logger=class_logger,
level=logging.WARN)
return ret_key_check
def __check_value__(self, key: Any, val: Any) -> bool:
"""Check whether the value matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
True for matched, ortherwise False.
Raises:
ValueError:
Value is supported but doesn't match definition.
"""
ret_bool = self.__check_value_type__(key, val) and\
self.__check_value_shape__(key, val) and\
self.__check_value_len__(key, val)
if not ret_bool:
raise ValueError(self.__class__.__get_value_error_msg__())
return ret_bool
def __check_value_type__(self, key: Any, val: Any) -> bool:
"""Check whether the type of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If type doesn't match, return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check type
if type(val) != supported_keys[key]['type']:
ret_bool = False
if not ret_bool:
expected_type = supported_keys[key]['type']
err_msg = 'Type check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
err_msg += f'expected type={expected_type}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def __check_value_shape__(self, key: Any, val: Any) -> bool:
"""Check whether the shape of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If expected shape is defined and doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check shape
if 'shape' in supported_keys[key]:
val_shape = val.shape
for shape_ind in range(len(supported_keys[key]['shape'])):
# length not match
if shape_ind >= len(val_shape):
ret_bool = False
break
expect_val = supported_keys[key]['shape'][shape_ind]
# value not match
if expect_val > 0 and \
expect_val != val_shape[shape_ind]:
ret_bool = False
break
if not ret_bool:
expected_shape = str(supported_keys[key]['shape'])
expected_shape = expected_shape.replace('-1', 'Any')
err_msg = 'Shape check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val.shape={val_shape}\n'
err_msg += f'expected shape={expected_shape}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def data_len(self) -> int:
"""Get the temporal length of this HumanData instance.
Returns:
int:
Number of frames related to this instance.
"""
return self.__data_len__
def data_len(self, value: int):
"""Set the temporal length of this HumanData instance.
Args:
value (int):
Number of frames related to this instance.
"""
self.__data_len__ = value
def __check_value_len__(self, key: Any, val: Any) -> bool:
"""Check whether the temporal length of val matches other values.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If temporal dim is defined and temporal length doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check temporal length
if 'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is not None:
val_slice_dim = supported_keys[key]['dim']
if supported_keys[key]['type'] == dict:
slice_key = supported_keys[key]['slice_key']
val_data_len = val[slice_key].shape[val_slice_dim]
else:
val_data_len = val.shape[val_slice_dim]
if self.data_len < 0:
# no data_len yet, assign a new one
self.data_len = val_data_len
else:
# check if val_data_len matches recorded data_len
if self.data_len != val_data_len:
ret_bool = False
if not ret_bool:
err_msg = 'Temporal check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val\'s data_len={val_data_len}\n'
err_msg += f'expected data_len={self.data_len}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def generate_mask_from_confidence(self, keys=None) -> None:
"""Generate mask from keypoints' confidence. Keypoints that have zero
confidence in all occurrences will have a zero mask. Note that the last
value of the keypoint is assumed to be confidence.
Args:
keys: None, str, or list of str.
None: all keys with `keypoint` in it will have mask
generated from their confidence.
str: key of the keypoint, the mask has name f'{key}_name'
list of str: a list of keys of the keypoints.
Generate mask for multiple keypoints.
Defaults to None.
Returns:
None
Raises:
KeyError:
A key is not not found
"""
if keys is None:
keys = []
for key in self.keys():
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
keys.append(key)
elif isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
for key in keys:
assert isinstance(key, str)
else:
raise TypeError(f'`Keys` must be None, str, or list of str, '
f'got {type(keys)}.')
update_dict = {}
for kpt_key in keys:
kpt_array = self.get_raw_value(kpt_key)
num_joints = kpt_array.shape[-2]
# if all conf of a joint are zero, this joint is masked
joint_conf = kpt_array[..., -1].reshape(-1, num_joints)
mask_array = (joint_conf > 0).astype(np.uint8).max(axis=0)
assert len(mask_array) == num_joints
# generate mask
update_dict[f'{kpt_key}_mask'] = mask_array
self.update(update_dict)
def compress_keypoints_by_mask(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be removed and f'{key}_mask' will be locked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is False
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
msg = f'Mask for {key} has not been set.' +\
f'Please set {mask_key} before compression.'
raise KeyError(msg)
compressed_dict = {}
for kpt_key, mask_key in key_pairs:
kpt_array = self.get_raw_value(kpt_key)
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = \
self.__class__.__remove_zero_pad__(kpt_array, mask_array)
compressed_dict[kpt_key] = compressed_kpt
# set value after all pairs are compressed
self.update(compressed_dict)
self.__keypoints_compressed__ = True
def decompress_keypoints(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be inserted to the right places and f'{key}_mask'
will be unlocked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is True
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
class_logger = self.__class__.logger
msg = f'Mask for {key} has not been found.' +\
f'Please remove {key} before decompression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise KeyError
decompressed_dict = {}
for kpt_key, mask_key in key_pairs:
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = self.get_raw_value(kpt_key)
kpt_array = \
self.__class__.__add_zero_pad__(compressed_kpt, mask_array)
decompressed_dict[kpt_key] = kpt_array
# set value after all pairs are decompressed
self.update(decompressed_dict)
self.__keypoints_compressed__ = False
def dump_by_pickle(self, pkl_path: str, overwrite: bool = True) -> None:
"""Dump keys and items to a pickle file. It's a secondary dump method,
when a HumanData instance is too large to be dumped by self.dump()
Args:
pkl_path (str):
Path to a dumped pickle file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.pkl'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(pkl_path, ['.pkl']):
raise ValueError('Not an pkl file.')
if not overwrite:
if check_path_existence(pkl_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
with open(pkl_path, 'wb') as f_writeb:
pickle.dump(
dict_to_dump, f_writeb, protocol=pickle.HIGHEST_PROTOCOL)
def load_by_pickle(self, pkl_path: str) -> None:
"""Load data from pkl_path and update them to self.
When a HumanData Instance was dumped by
self.dump_by_pickle(), use this to load.
Args:
npz_path (str):
Path to a dumped npz file.
"""
with open(pkl_path, 'rb') as f_readb:
tmp_data_dict = pickle.load(f_readb)
for key, value in list(tmp_data_dict.items()):
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def __set_default_values__(self) -> None:
"""For older versions of HumanData, call this method to apply missing
values (also attributes)."""
supported_keys = self.__class__.SUPPORTED_KEYS
if self.__data_len__ == -1:
for key in supported_keys:
if key in self and \
'dim' in supported_keys[key] and\
supported_keys[key]['dim'] is not None:
if 'slice_key' in supported_keys[key] and\
supported_keys[key]['type'] == dict:
sub_key = supported_keys[key]['slice_key']
slice_dim = supported_keys[key]['dim']
self.__data_len__ = \
self[key][sub_key].shape[slice_dim]
else:
slice_dim = supported_keys[key]['dim']
self.__data_len__ = self[key].shape[slice_dim]
break
for key in list(self.keys()):
convention_key = f'{key}_convention'
if key.startswith('keypoints') and \
not key.endswith('_mask') and \
not key.endswith('_convention') and \
convention_key not in self:
self[convention_key] = 'human_data'
def concatenate(cls, human_data_0: _HumanData,
human_data_1: _HumanData) -> _HumanData:
"""Concatenate two human_data. All keys will be kept it the returned
human_data. If either value from human_data_0 or human_data_1 matches
data_len from its HumanData, the two values will be concatenated as a
single value. If not, postfix will be added to the key to specify
source of the value.
Args:
human_data_0 (_HumanData)
human_data_1 (_HumanData)
Returns:
_HumanData:
A new human_data instance with all concatenated data.
"""
ret_human_data = cls.new(key_strict=False)
set_0 = set(human_data_0.keys())
set_1 = set(human_data_1.keys())
common_keys = set_0.intersection(set_1)
dim_dict_0 = human_data_0.__get_slice_dim__()
dim_dict_1 = human_data_1.__get_slice_dim__()
for key in common_keys:
value_0 = human_data_0[key]
value_1 = human_data_1[key]
# align type
value_0 = list(value_0) if isinstance(value_0, tuple)\
else value_0
value_1 = list(value_1) if isinstance(value_1, tuple)\
else value_1
assert type(value_0) == type(value_1)
# align convention
if key.startswith('keypoints') and\
key.endswith('_convention'):
assert value_0 == value_1
ret_human_data[key] = value_0
continue
# mask_0 and mask_1
elif key.startswith('keypoints') and\
key.endswith('_mask'):
new_mask = value_0 * value_1
ret_human_data[key] = new_mask
continue
# go through the sub dict
if isinstance(value_0, dict):
sub_dict = {}
for sub_key, sub_value_0 in value_0.items():
# only found in value_0
if sub_key not in value_1:
sub_dict[sub_key] = sub_value_0
# found in both values
else:
sub_value_1 = value_1[sub_key]
concat_sub_dict = cls.__concat_value__(
key=sub_key,
value_0=sub_value_0,
dim_0=dim_dict_0[key][sub_key],
value_1=sub_value_1,
dim_1=dim_dict_1[key][sub_key])
sub_dict.update(concat_sub_dict)
for sub_key, sub_value_1 in value_1.items():
if sub_key not in value_0:
sub_dict[sub_key] = sub_value_1
ret_human_data[key] = sub_dict
# try concat
else:
concat_dict = cls.__concat_value__(
key=key,
value_0=value_0,
dim_0=dim_dict_0[key],
value_1=value_1,
dim_1=dim_dict_1[key])
ret_human_data.update(concat_dict)
# check exclusive keys
for key, value in human_data_0.items():
if key not in common_keys:
# value not for concat and slice
if dim_dict_0[key] is None:
ret_human_data[key] = value
# value aligned with data_len of HumanData_0
else:
ret_human_data[f'{key}_0'] = value
for key, value in human_data_1.items():
if key not in common_keys:
# same as above
if dim_dict_1[key] is None:
ret_human_data[key] = value
else:
ret_human_data[f'{key}_1'] = value
return ret_human_data
def __concat_value__(cls, key: Any, value_0: Any, value_1: Any,
dim_0: Union[None, int], dim_1: Union[None,
int]) -> dict:
"""Concat two values from two different HumanData.
Args:
key (Any):
The common key of the two values.
value_0 (Any):
Value from 0.
value_1 (Any):
Value from 1.
dim_0 (Union[None, int]):
The dim for concat and slice. None for N/A.
dim_1 (Union[None, int]):
The dim for concat and slice. None for N/A.
Returns:
dict:
Dict for concatenated result.
"""
ret_dict = {}
if dim_0 is None or dim_1 is None:
ret_dict[f'{key}_0'] = value_0
ret_dict[f'{key}_1'] = value_1
elif isinstance(value_0, list):
ret_dict[key] = value_0 + value_1
# elif isinstance(value_0, np.ndarray):
else:
ret_dict[key] = np.concatenate((value_0, value_1), axis=dim_0)
return ret_dict
def __add_zero_pad__(cls, compressed_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Pad zeros to a compressed keypoints array.
Args:
compressed_array (np.ndarray):
A compressed keypoints array.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A keypoints array in full-size.
"""
assert mask_array.sum() == compressed_array.shape[1]
data_len, _, dim = compressed_array.shape
mask_len = mask_array.shape[0]
ret_value = np.zeros(
shape=[data_len, mask_len, dim], dtype=compressed_array.dtype)
valid_mask_index = np.where(mask_array == 1)[0]
ret_value[:, valid_mask_index, :] = compressed_array
return ret_value
def __remove_zero_pad__(cls, zero_pad_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Remove zero-padding from a full-size keypoints array.
Args:
zero_pad_array (np.ndarray):
A keypoints array in full-size.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A compressed keypoints array.
"""
assert mask_array.shape[0] == zero_pad_array.shape[1]
valid_mask_index = np.where(mask_array == 1)[0]
ret_value = np.take(zero_pad_array, valid_mask_index, axis=1)
return ret_value
def __get_key_warn_msg__(cls, key: Any) -> str:
"""Get the warning message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The warning message.
"""
class_name = cls.__name__
warn_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Ignore this if you know exactly' +\
' what you are doing.\n' +\
'Otherwise, Call self.set_key_strict(True)' +\
' to avoid wrong keys.\n'
return warn_message + suggestion_message
def __get_key_error_msg__(cls, key: Any) -> str:
"""Get the error message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The error message.
"""
class_name = cls.__name__
absent_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Call self.set_key_strict(False)' +\
' to allow unsupported keys.\n'
return absent_message + suggestion_message
def __get_value_error_msg__(cls) -> str:
"""Get the error message when a value fails the check.
Returns:
str:
The error message.
"""
error_message = \
'An supported value doesn\'t ' +\
'match definition.\n'
suggestion_message = \
'See error log for details.\n'
return error_message + suggestion_message
def __get_sliced_result__(
cls, input_data: Union[np.ndarray, list, tuple], slice_dim: int,
slice_range: slice) -> Union[np.ndarray, list, tuple]:
"""Slice input_data along slice_dim with slice_range.
Args:
input_data (Union[np.ndarray, list, tuple]):
Data to be sliced.
slice_dim (int):
Dimension to be sliced.
slice_range (slice):
An instance of class slice.
Returns:
Union[np.ndarray, list, tuple]:
A slice of input_data.
"""
if isinstance(input_data, np.ndarray):
slice_list = [
slice(None),
] * len(input_data.shape)
slice_list[slice_dim] = slice_range
sliced_data = input_data[tuple(slice_list)]
else:
sliced_data = \
input_data[slice_range]
return sliced_data
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol','smoothnet',
'smoothnet_windowsize8','smoothnet_windowsize16',
'smoothnet_windowsize32','smoothnet_windowsize64'].
Defaults to 'savgol'. 'smoothnet' is default with windowsize=8.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if smooth_type == 'smoothnet':
smooth_type = 'smoothnet_windowsize8'
assert smooth_type in [
'oneeuro', 'gaus1d', 'savgol', 'smoothnet_windowsize8',
'smoothnet_windowsize16', 'smoothnet_windowsize32',
'smoothnet_windowsize64'
]
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def speed_up_process(x,
speed_up_type='deciwatch',
cfg_base_dir='configs/_base_/post_processing/'):
"""Speed up the process with the specified speed up type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir.
Defaults to 'configs/_base_/post_processing/'
Raises:
ValueError: check the input speed up type.
Returns:
np.ndarray: Completed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.clone()
assert x.ndim == 4 or x.ndim == 5
cfg_dict = cfg['speed_up_cfg']
cfg_dict['device'] = x.device
speed_up_func = build_post_processing(cfg_dict)
if x.ndim == 5:
for i in range(x.shape[1]):
x[:, i] = speed_up_func(x[:, i])
elif x.ndim == 4:
x = speed_up_func(x)
return np.array(x.cpu())
def get_speed_up_interval(speed_up_type,
cfg_base_dir='configs/_base_/post_processing/'):
"""Get the interval of specific speed up type.
Args:
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input speed up type.
Returns:
int: speed up interval
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
return cfg['speed_up_cfg']['interval']
def speed_up_interpolate(selected_frames, speed_up_frames, smpl_poses,
smpl_betas, pred_cams, bboxes_xyxy):
"""Interpolate smpl_betas, pred_cams, and bboxes_xyxyx for speed up.
Args:
selected_frames (np.ndarray): Shape should be (selected frame number).
speed_up_frames (int): Total speed up frame number
smpl_poses (np.ndarray): selected frame smpl poses parameter
smpl_betas (np.ndarray): selected frame smpl shape paeameter
pred_cams (np.ndarray): selected frame camera parameter
bboxes_xyxy (np.ndarray): selected frame bbox
Returns:
smpl_poses (np.ndarray): interpolated frame smpl poses parameter
smpl_betas (np.ndarray): interpolated frame smpl shape paeameter
pred_cams (np.ndarray): interpolated frame camera parameter
bboxes_xyxy (np.ndarray): interpolated frame bbox
"""
selected_frames = selected_frames[selected_frames <= speed_up_frames]
pred_cams[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, pred_cams[selected_frames, :], kind='linear', axis=0)(
np.arange(0, max(selected_frames)))
bboxes_xyxy[:speed_up_frames, :] = interpolate.interp1d(
selected_frames,
bboxes_xyxy[selected_frames, :],
kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
smpl_betas[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, smpl_betas[selected_frames, :], kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
return smpl_poses, smpl_betas, pred_cams, bboxes_xyxy
def extract_feature_sequence(extracted_results,
frame_idx,
causal,
seq_len,
step=1):
"""Extract the target frame from person results, and pad the sequence to a
fixed length.
Args:
extracted_results (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the feature information of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the first frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[Dict]]: Multi-frame feature extraction results stored in a
nested list with a length of seq_len.
int: The target frame index in the padded sequence.
"""
if causal:
frames_left = 0
frames_right = seq_len - 1
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(extracted_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
extracted_results_seq = [extracted_results[0]] * pad_left + \
extracted_results[start:end:step] + [extracted_results[-1]] * pad_right
return extracted_results_seq
def array_to_images(
image_array: np.ndarray,
output_folder: str,
img_format: str = '%06d.png',
resolution: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None,
disable_log: bool = False,
) -> None:
"""Convert an array to images directly.
Args:
image_array (np.ndarray): shape should be (f * h * w * 3).
output_folder (str): output folder for the images.
img_format (str, optional): format of the images.
Defaults to '%06d.png'.
resolution (Optional[Union[Tuple[int, int], Tuple[float, float]]],
optional): resolution(height, width) of output.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check output folder.
TypeError: check input array.
Returns:
None
"""
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
if not isinstance(image_array, np.ndarray):
raise TypeError('Input should be np.ndarray.')
assert image_array.ndim == 4
assert image_array.shape[-1] == 3
if resolution:
height, width = resolution
else:
height, width = image_array.shape[1], image_array.shape[2]
command = [
'ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f',
'rawvideo',
'-s',
f'{int(width)}x{int(height)}', # size of one frame
'-pix_fmt',
'bgr24', # bgr24 for matching OpenCV
'-loglevel',
'error',
'-threads',
'4',
'-i',
'-', # The input comes from a pipe
'-f',
'image2',
'-start_number',
'0',
os.path.join(output_folder, img_format),
]
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=10**8,
close_fds=True)
if process.stdin is None or process.stderr is None:
raise BrokenPipeError('No buffer received.')
index = 0
while True:
if index >= image_array.shape[0]:
break
process.stdin.write(image_array[index].tobytes())
index += 1
process.stdin.close()
process.stderr.close()
process.wait()
def rotmat_to_aa(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to axis angles.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_quaternion, quaternion_to_axis_angle])
return t(matrix)
The provided code snippet includes necessary dependencies for implementing the `single_person_with_mmdet` function. Write a Python function `def single_person_with_mmdet(args, frames_iter)` to solve the following problem:
Estimate smpl parameters from single-person images with mmdetection Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames
Here is the function:
def single_person_with_mmdet(args, frames_iter):
"""Estimate smpl parameters from single-person
images with mmdetection
Args:
args (object): object of argparse.Namespace.
frames_iter (np.ndarray,): prepared frames
"""
mesh_model, extractor = init_model(
args.mesh_reg_config,
args.mesh_reg_checkpoint,
device=args.device.lower())
pred_cams, verts, smpl_poses, smpl_betas, bboxes_xyxy = \
[], [], [], [], []
frame_id_list, result_list = \
get_detection_result(args, frames_iter, mesh_model, extractor)
frame_num = len(frame_id_list)
# speed up
if args.speed_up_type:
speed_up_interval = get_speed_up_interval(args.speed_up_type)
speed_up_frames = (frame_num -
1) // speed_up_interval * speed_up_interval
for i, result in enumerate(mmcv.track_iter_progress(result_list)):
frame_id = frame_id_list[i]
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator':
if args.speed_up_type:
warnings.warn(
'Video based models do not support speed up. '
'By default we will inference with original speed.',
UserWarning)
feature_results_seq = extract_feature_sequence(
result_list, frame_idx=i, causal=True, seq_len=16, step=1)
mesh_results = inference_video_based_model(
mesh_model,
extracted_results=feature_results_seq,
with_track_id=False)
elif mesh_model.cfg.model.type == 'ImageBodyModelEstimator':
if args.speed_up_type and i % speed_up_interval != 0\
and i <= speed_up_frames:
mesh_results = [{
'bbox': np.zeros((5)),
'camera': np.zeros((3)),
'smpl_pose': np.zeros((24, 3, 3)),
'smpl_beta': np.zeros((10)),
'vertices': np.zeros((6890, 3)),
'keypoints_3d': np.zeros((17, 3)),
}]
else:
mesh_results = inference_image_based_model(
mesh_model,
frames_iter[frame_id],
result,
bbox_thr=args.bbox_thr,
format='xyxy')
else:
raise Exception(
f'{mesh_model.cfg.model.type} is not supported yet')
smpl_betas.append(mesh_results[0]['smpl_beta'])
smpl_pose = mesh_results[0]['smpl_pose']
smpl_poses.append(smpl_pose)
pred_cams.append(mesh_results[0]['camera'])
verts.append(mesh_results[0]['vertices'])
bboxes_xyxy.append(mesh_results[0]['bbox'])
smpl_poses = np.array(smpl_poses)
smpl_betas = np.array(smpl_betas)
pred_cams = np.array(pred_cams)
verts = np.array(verts)
bboxes_xyxy = np.array(bboxes_xyxy)
# release GPU memory
del mesh_model
del extractor
torch.cuda.empty_cache()
# speed up
if args.speed_up_type:
smpl_poses = speed_up_process(
torch.tensor(smpl_poses).to(args.device.lower()),
args.speed_up_type)
selected_frames = np.arange(0, len(frames_iter), speed_up_interval)
smpl_poses, smpl_betas, pred_cams, bboxes_xyxy = speed_up_interpolate(
selected_frames, speed_up_frames, smpl_poses, smpl_betas,
pred_cams, bboxes_xyxy)
# smooth
if args.smooth_type is not None:
smpl_poses = smooth_process(
smpl_poses.reshape(frame_num, 24, 9),
smooth_type=args.smooth_type).reshape(frame_num, 24, 3, 3)
verts = smooth_process(verts, smooth_type=args.smooth_type)
pred_cams = smooth_process(
pred_cams[:, np.newaxis],
smooth_type=args.smooth_type).reshape(frame_num, 3)
if smpl_poses.shape[1:] == (24, 3, 3):
smpl_poses = rotmat_to_aa(smpl_poses)
elif smpl_poses.shape[1:] == (24, 3):
smpl_poses = smpl_pose
else:
raise Exception(f'Wrong shape of `smpl_pose`: {smpl_pose.shape}')
if args.output is not None:
body_pose_, global_orient_, smpl_betas_, verts_, pred_cams_, \
bboxes_xyxy_, image_path_, person_id_, frame_id_ = \
[], [], [], [], [], [], [], [], []
human_data = HumanData()
frames_folder = osp.join(args.output, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list], output_folder=frames_folder)
for i, img_i in enumerate(sorted(os.listdir(frames_folder))):
body_pose_.append(smpl_poses[i][1:])
global_orient_.append(smpl_poses[i][:1])
smpl_betas_.append(smpl_betas[i])
verts_.append(verts[i])
pred_cams_.append(pred_cams[i])
bboxes_xyxy_.append(bboxes_xyxy[i])
image_path_.append(os.path.join('images', img_i))
person_id_.append(0)
frame_id_.append(frame_id_list[i])
smpl = {}
smpl['body_pose'] = np.array(body_pose_).reshape((-1, 23, 3))
smpl['global_orient'] = np.array(global_orient_).reshape((-1, 3))
smpl['betas'] = np.array(smpl_betas_).reshape((-1, 10))
human_data['smpl'] = smpl
human_data['verts'] = verts_
human_data['pred_cams'] = pred_cams_
human_data['bboxes_xyxy'] = bboxes_xyxy_
human_data['image_path'] = image_path_
human_data['person_id'] = person_id_
human_data['frame_id'] = frame_id_
human_data.dump(osp.join(args.output, 'inference_result.npz'))
if args.show_path is not None:
if args.output is not None:
frames_folder = os.path.join(args.output, 'images')
else:
frames_folder = osp.join(Path(args.show_path).parent, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list],
output_folder=frames_folder)
body_model_config = dict(model_path=args.body_model_dir, type='smpl')
visualize_smpl_hmr(
poses=smpl_poses.reshape(-1, 24 * 3),
betas=smpl_betas,
cam_transl=pred_cams,
bbox=bboxes_xyxy,
output_path=args.show_path,
render_choice=args.render_choice,
resolution=frames_iter[0].shape[:2],
origin_frames=frames_folder,
body_model_config=body_model_config,
overwrite=True,
palette=args.palette,
read_frames_batch=True)
if args.output is None:
shutil.rmtree(frames_folder) | Estimate smpl parameters from single-person images with mmdetection Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames |
14,263 | import os
import os.path as osp
import shutil
import warnings
from argparse import ArgumentParser
from pathlib import Path
import mmcv
import numpy as np
import torch
from mmhuman3d.apis import (
feature_extract,
inference_image_based_model,
inference_video_based_model,
init_model,
)
from mmhuman3d.core.visualization.visualize_smpl import visualize_smpl_hmr
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.utils.demo_utils import (
extract_feature_sequence,
get_speed_up_interval,
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
speed_up_interpolate,
speed_up_process,
)
from mmhuman3d.utils.ffmpeg_utils import array_to_images
from mmhuman3d.utils.transforms import rotmat_to_aa
def get_tracking_result(args, frames_iter, mesh_model, extractor):
tracking_model = init_tracking_model(
args.tracking_config, None, device=args.device.lower())
max_track_id = 0
max_instance = 0
result_list = []
frame_id_list = []
for i, frame in enumerate(mmcv.track_iter_progress(frames_iter)):
mmtracking_results = inference_mot(tracking_model, frame, frame_id=i)
# keep the person class bounding boxes.
result, max_track_id, instance_num = \
process_mmtracking_results(
mmtracking_results,
max_track_id=max_track_id,
bbox_thr=args.bbox_thr)
# extract features from the input video or image sequences
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator' \
and extractor is not None:
result = feature_extract(
extractor, frame, result, args.bbox_thr, format='xyxy')
# drop the frame with no detected results
if result == []:
continue
# update max_instance
if instance_num > max_instance:
max_instance = instance_num
# vis bboxes
if args.draw_bbox:
bboxes = [res['bbox'] for res in result]
bboxes = np.vstack(bboxes)
mmcv.imshow_bboxes(
frame, bboxes, top_k=-1, thickness=2, show=False)
result_list.append(result)
frame_id_list.append(i)
return max_track_id, max_instance, frame_id_list, result_list
def visualize_smpl_hmr(cam_transl,
bbox=None,
kp2d=None,
focal_length=5000,
det_width=224,
det_height=224,
bbox_format='xyxy',
**kwargs) -> None:
"""Simplest way to visualize HMR or SPIN or Smplify pred smpl with origin
frames and predicted cameras."""
if kp2d is not None:
bbox = convert_kp2d_to_bbox(kp2d, bbox_format=bbox_format)
Ks = convert_bbox_to_intrinsic(bbox, bbox_format=bbox_format)
K = torch.Tensor(
get_default_hmr_intrinsic(
focal_length=focal_length,
det_height=det_height,
det_width=det_width))
func = partial(
render_smpl,
projection='perspective',
convention='opencv',
in_ndc=False,
K=None,
R=None,
orig_cam=None,
)
if isinstance(cam_transl, np.ndarray):
cam_transl = torch.Tensor(cam_transl)
T = torch.cat([
cam_transl[..., [1]], cam_transl[..., [2]], 2 * focal_length /
(det_width * cam_transl[..., [0]] + 1e-9)
], -1)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(Ks=Ks, K=K, T=T, **kwargs)
class HumanData(dict):
logger = None
SUPPORTED_KEYS = _HumanData_SUPPORTED_KEYS
WARNED_KEYS = []
def __new__(cls: _HumanData, *args: Any, **kwargs: Any) -> _HumanData:
"""New an instance of HumanData.
Args:
cls (HumanData): HumanData class.
Returns:
HumanData: An instance of HumanData.
"""
ret_human_data = super().__new__(cls, args, kwargs)
setattr(ret_human_data, '__data_len__', -1)
setattr(ret_human_data, '__key_strict__', False)
setattr(ret_human_data, '__keypoints_compressed__', False)
return ret_human_data
def set_logger(cls, logger: Union[logging.Logger, str, None] = None):
"""Set logger of HumanData class.
Args:
logger (logging.Logger | str | None, optional):
The way to print summary.
See `mmcv.utils.print_log()` for details.
Defaults to None.
"""
cls.logger = logger
def fromfile(cls, npz_path: str) -> _HumanData:
"""Construct a HumanData instance from an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
Returns:
HumanData:
A HumanData instance load from file.
"""
ret_human_data = cls()
ret_human_data.load(npz_path)
return ret_human_data
def new(cls,
source_dict: dict = None,
key_strict: bool = False) -> _HumanData:
"""Construct a HumanData instance from a dict.
Args:
source_dict (dict, optional):
A dict with items in HumanData fashion.
Defaults to None.
key_strict (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to False.
Returns:
HumanData:
A HumanData instance.
"""
if source_dict is None:
ret_human_data = cls()
else:
ret_human_data = cls(source_dict)
ret_human_data.set_key_strict(key_strict)
return ret_human_data
def get_key_strict(self) -> bool:
"""Get value of attribute key_strict.
Returns:
bool:
Whether to raise error when setting unsupported keys.
"""
return self.__key_strict__
def set_key_strict(self, value: bool):
"""Set value of attribute key_strict.
Args:
value (bool, optional):
Whether to raise error when setting unsupported keys.
Defaults to True.
"""
former__key_strict__ = self.__key_strict__
self.__key_strict__ = value
if former__key_strict__ is False and \
value is True:
self.pop_unsupported_items()
def check_keypoints_compressed(self) -> bool:
"""Check whether the keypoints are compressed.
Returns:
bool:
Whether the keypoints are compressed.
"""
return self.__keypoints_compressed__
def load(self, npz_path: str):
"""Load data from npz_path and update them to self.
Args:
npz_path (str):
Path to a dumped npz file.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
with np.load(npz_path, allow_pickle=True) as npz_file:
tmp_data_dict = dict(npz_file)
for key, value in list(tmp_data_dict.items()):
if isinstance(value, np.ndarray) and\
len(value.shape) == 0:
# value is not an ndarray before dump
value = value.item()
elif key in supported_keys and\
type(value) != supported_keys[key]['type']:
value = supported_keys[key]['type'](value)
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def dump(self, npz_path: str, overwrite: bool = True):
"""Dump keys and items to an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.npz'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(npz_path, ['.npz']):
raise ValueError('Not an npz file.')
if not overwrite:
if check_path_existence(npz_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
np.savez_compressed(npz_path, **dict_to_dump)
def get_sliced_cache(self, slice_size=10) -> List:
"""Slice the whole HumanData into pieces for HumanDataCacheWriter.
Args:
slice_size (int, optional):
The length of each unit in HumanData cache.
Defaults to 10.
Returns:
List:
Two dicts for HumanDataCacheWriter.
Init HumanDataCacheWriter by HumanDataCacheWriter(**Returns[0])
and set data by
human_data_cache_writer.update_sliced_dict(Returns[1]).
"""
keypoints_info = {}
non_sliced_data = {}
sliced_data = {}
slice_num = ceil(self.__data_len__ / slice_size)
for slice_index in range(slice_num):
sliced_data[str(slice_index)] = {}
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# no dim to slice
if dim is None:
if key.startswith('keypoints') and\
(key.endswith('_mask') or
key.endswith('_convention')):
keypoints_info[key] = self[key]
else:
non_sliced_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
non_sliced_sub_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
non_sliced_sub_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_range = slice(slice_start, slice_end)
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_range
)
if key not in sliced_data[str(slice_index)]:
sliced_data[str(slice_index)][key] = {}
sliced_data[str(slice_index)][key][sub_key] = \
sliced_sub_value
if len(non_sliced_sub_dict) > 0:
non_sliced_data[key] = non_sliced_sub_dict
else:
value = self.get_raw_value(key)
# slice as ndarray
if isinstance(value, np.ndarray):
slice_list = [
slice(None),
] * len(value.shape)
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
slice_list[dim] = slice(slice_start, slice_end)
sliced_value = value[tuple(slice_list)]
sliced_data[str(slice_index)][key] = sliced_value
# slice as list/tuple
else:
for slice_index in range(slice_num):
slice_start = slice_index * slice_size
slice_end = min((slice_index + 1) * slice_size,
self.__data_len__)
sliced_value = value[slice(slice_start, slice_end)]
sliced_data[str(slice_index)][key] = sliced_value
writer_args_dict = {
'slice_size': slice_size,
'keypoints_info': keypoints_info,
'data_len': self.data_len,
'non_sliced_data': non_sliced_data,
'key_strict': self.get_key_strict()
}
return writer_args_dict, sliced_data
def to(self,
device: Optional[Union[torch.device, str]] = _CPU_DEVICE,
dtype: Optional[torch.dtype] = None,
non_blocking: Optional[bool] = False,
copy: Optional[bool] = False,
memory_format: Optional[torch.memory_format] = None) -> dict:
"""Convert values in numpy.ndarray type to torch.Tensor, and move
Tensors to the target device. All keys will exist in the returned dict.
Args:
device (Union[torch.device, str], optional):
A specified device. Defaults to CPU_DEVICE.
dtype (torch.dtype, optional):
The data type of the expected torch.Tensor.
If dtype is None, it is decided according to numpy.ndarry.
Defaults to None.
non_blocking (bool, optional):
When non_blocking, tries to convert asynchronously with
respect to the host if possible, e.g.,
converting a CPU Tensor with pinned memory to a CUDA Tensor.
Defaults to False.
copy (bool, optional):
When copy is set, a new Tensor is created even when
the Tensor already matches the desired conversion.
No matter what value copy is, Tensor constructed from numpy
will not share the same memory with the source numpy.ndarray.
Defaults to False.
memory_format (torch.memory_format, optional):
The desired memory format of returned Tensor.
Not supported by pytorch-cpu.
Defaults to None.
Returns:
dict:
A dict with all numpy.ndarray values converted into
torch.Tensor and all Tensors moved to the target device.
"""
ret_dict = {}
for key in self.keys():
raw_value = self.get_raw_value(key)
tensor_value = None
if isinstance(raw_value, np.ndarray):
tensor_value = torch.from_numpy(raw_value).clone()
elif isinstance(raw_value, torch.Tensor):
tensor_value = raw_value
if tensor_value is None:
ret_dict[key] = raw_value
else:
if memory_format is None:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy)
else:
ret_dict[key] = \
tensor_value.to(device, dtype,
non_blocking, copy,
memory_format=memory_format)
return ret_dict
def __getitem__(self, key: _KT) -> _VT:
"""Get value defined by HumanData. This function will be called by
self[key]. In keypoints_compressed mode, if the key contains
'keypoints', an array with zero-padding at absent keypoint will be
returned. Call self.get_raw_value(k) to get value without padding.
Args:
key (_KT):
Key in HumanData.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
if self.__keypoints_compressed__:
mask_key = f'{key}_mask'
if key in self and \
isinstance(value, np.ndarray) and \
'keypoints' in key and \
mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
value = \
self.__class__.__add_zero_pad__(value, mask_array)
return value
def get_raw_value(self, key: _KT) -> _VT:
"""Get raw value from the dict. It acts the same as
dict.__getitem__(k).
Args:
key (_KT):
Key in dict.
Returns:
_VT:
Value to the key.
"""
value = super().__getitem__(key)
return value
def get_value_in_shape(self,
key: _KT,
shape: Union[list, tuple],
padding_constant: int = 0) -> np.ndarray:
"""Get value in a specific shape. For each dim, if the required shape
is smaller than current shape, ndarray will be sliced. Otherwise, it
will be padded with padding_constant at the end.
Args:
key (_KT):
Key in dict. The value of this key must be
an instance of numpy.ndarray.
shape (Union[list, tuple]):
Shape of the returned array. Its length
must be equal to value.ndim. Set -1 for
a dimension if you do not want to edit it.
padding_constant (int, optional):
The value to set the padded values for each axis.
Defaults to 0.
Raises:
ValueError:
A value in shape is neither positive integer nor -1.
Returns:
np.ndarray:
An array in required shape.
"""
value = self.get_raw_value(key)
assert isinstance(value, np.ndarray)
assert value.ndim == len(shape)
pad_width_list = []
slice_list = []
for dim_index in range(len(shape)):
if shape[dim_index] == -1:
# no pad or slice
pad_width_list.append((0, 0))
slice_list.append(slice(None))
elif shape[dim_index] > 0:
# valid shape value
wid = shape[dim_index] - value.shape[dim_index]
if wid > 0:
pad_width_list.append((0, wid))
else:
pad_width_list.append((0, 0))
slice_list.append(slice(0, shape[dim_index]))
else:
# invalid
raise ValueError
pad_value = np.pad(
value,
pad_width=pad_width_list,
mode='constant',
constant_values=padding_constant)
return pad_value[tuple(slice_list)]
def get_slice(self, stop: int):
"""Slice [0, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int):
"""Slice [start, stop, 1] of all sliceable values."""
...
def get_slice(self, start: int, stop: int, step: int):
"""Slice [start, stop, step] of all sliceable values."""
...
def get_slice(self,
arg_0: int,
arg_1: Union[int, Any] = None,
step: int = 1) -> _HumanData:
"""Slice all sliceable values along major_dim dimension.
Args:
arg_0 (int):
When arg_1 is None, arg_0 is stop and start=0.
When arg_1 is not None, arg_0 is start.
arg_1 (Union[int, Any], optional):
None or where to stop.
Defaults to None.
step (int, optional):
Length of step. Defaults to 1.
Returns:
HumanData:
A new HumanData instance with sliced values.
"""
ret_human_data = \
HumanData.new(key_strict=self.get_key_strict())
if arg_1 is None:
start = 0
stop = arg_0
else:
start = arg_0
stop = arg_1
slice_index = slice(start, stop, step)
dim_dict = self.__get_slice_dim__()
for key, dim in dim_dict.items():
# keys not expected be sliced
if dim is None:
ret_human_data[key] = self[key]
elif isinstance(dim, dict):
value_dict = self.get_raw_value(key)
sliced_dict = {}
for sub_key in value_dict.keys():
sub_value = value_dict[sub_key]
if dim[sub_key] is None:
sliced_dict[sub_key] = sub_value
else:
sub_dim = dim[sub_key]
sliced_sub_value = \
HumanData.__get_sliced_result__(
sub_value, sub_dim, slice_index)
sliced_dict[sub_key] = sliced_sub_value
ret_human_data[key] = sliced_dict
else:
value = self[key]
sliced_value = \
HumanData.__get_sliced_result__(
value, dim, slice_index)
ret_human_data[key] = sliced_value
# check keypoints compressed
if self.check_keypoints_compressed():
ret_human_data.compress_keypoints_by_mask()
return ret_human_data
def __get_slice_dim__(self) -> dict:
"""For each key in this HumanData, get the dimension for slicing. 0 for
default, if no other value specified.
Returns:
dict:
Keys are self.keys().
Values indicate where to slice.
None for not expected to be sliced or
failed.
"""
supported_keys = self.__class__.SUPPORTED_KEYS
ret_dict = {}
for key in self.keys():
# keys not expected be sliced
if key in supported_keys and \
'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is None:
ret_dict[key] = None
else:
value = self[key]
if isinstance(value, dict) and len(value) > 0:
ret_dict[key] = {}
for sub_key in value.keys():
try:
sub_value_len = len(value[sub_key])
if 'dim' in value:
ret_dict[key][sub_key] = value['dim']
elif sub_value_len != self.__data_len__:
ret_dict[key][sub_key] = None
else:
ret_dict[key][sub_key] = 0
except TypeError:
ret_dict[key][sub_key] = None
continue
# instance cannot be sliced without len method
try:
value_len = len(value)
except TypeError:
ret_dict[key] = None
continue
# slice on dim 0 by default
slice_dim = 0
if key in supported_keys and \
'dim' in supported_keys[key]:
slice_dim = \
supported_keys[key]['dim']
data_len = value_len if slice_dim == 0 \
else value.shape[slice_dim]
# dim not for slice
if data_len != self.__data_len__:
ret_dict[key] = None
continue
else:
ret_dict[key] = slice_dim
return ret_dict
def __setitem__(self, key: _KT, val: _VT) -> None:
"""Set self[key] to value. Only be called when using
human_data[key] = val. Methods like update won't call __setitem__.
In keypoints_compressed mode, if the key contains 'keypoints',
and f'{key}_mask' is in self.keys(), invalid zeros
will be removed before setting value.
Args:
key (_KT):
Key in HumanData.
Better be an element in HumanData.SUPPORTED_KEYS.
If not, an Error will be raised in key_strict mode.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
ValueError:
self.check_keypoints_compressed() is True and
mask of a keypoint item is missing.
"""
self.__check_key__(key)
self.__check_value__(key, val)
# if it can be compressed by mask
if self.__keypoints_compressed__:
class_logger = self.__class__.logger
if 'keypoints' in key and \
'_mask' in key:
msg = 'Mask cannot be modified ' +\
'in keypoints_compressed mode.'
print_log(msg=msg, logger=class_logger, level=logging.WARN)
return
elif isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
mask_key = f'{key}_mask'
if mask_key in self:
mask_array = np.asarray(super().__getitem__(mask_key))
val = \
self.__class__.__remove_zero_pad__(val, mask_array)
else:
msg = f'Mask for {key} has not been set.' +\
f' Please set {mask_key} before compression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise ValueError
dict.__setitem__(self, key, val)
def set_raw_value(self, key: _KT, val: _VT) -> None:
"""Set the raw value of self[key] to val after key check. It acts the
same as dict.__setitem__(self, key, val) if the key satisfied
constraints.
Args:
key (_KT):
Key in dict.
val (_VT):
Value to the key.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
ValueError:
Value is supported but doesn't match definition.
"""
self.__check_key__(key)
self.__check_value__(key, val)
dict.__setitem__(self, key, val)
def pop_unsupported_items(self) -> None:
"""Find every item with a key not in HumanData.SUPPORTED_KEYS, and pop
it to save memory."""
for key in list(self.keys()):
if key not in self.__class__.SUPPORTED_KEYS:
self.pop(key)
def __check_key__(self, key: Any) -> _KeyCheck:
"""Check whether the key matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
Returns:
_KeyCheck:
PASS, WARN or ERROR.
Raises:
KeyError:
self.get_key_strict() is True and
key cannot be found in
HumanData.SUPPORTED_KEYS.
"""
ret_key_check = _KeyCheck.PASS
if self.get_key_strict():
if key not in self.__class__.SUPPORTED_KEYS:
ret_key_check = _KeyCheck.ERROR
else:
if key not in self.__class__.SUPPORTED_KEYS and \
key not in self.__class__.WARNED_KEYS:
# log warning message at the first time
ret_key_check = _KeyCheck.WARN
self.__class__.WARNED_KEYS.append(key)
if ret_key_check == _KeyCheck.ERROR:
raise KeyError(self.__class__.__get_key_error_msg__(key))
elif ret_key_check == _KeyCheck.WARN:
class_logger = self.__class__.logger
if class_logger == 'silent':
pass
else:
print_log(
msg=self.__class__.__get_key_warn_msg__(key),
logger=class_logger,
level=logging.WARN)
return ret_key_check
def __check_value__(self, key: Any, val: Any) -> bool:
"""Check whether the value matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
True for matched, ortherwise False.
Raises:
ValueError:
Value is supported but doesn't match definition.
"""
ret_bool = self.__check_value_type__(key, val) and\
self.__check_value_shape__(key, val) and\
self.__check_value_len__(key, val)
if not ret_bool:
raise ValueError(self.__class__.__get_value_error_msg__())
return ret_bool
def __check_value_type__(self, key: Any, val: Any) -> bool:
"""Check whether the type of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If type doesn't match, return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check type
if type(val) != supported_keys[key]['type']:
ret_bool = False
if not ret_bool:
expected_type = supported_keys[key]['type']
err_msg = 'Type check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
err_msg += f'expected type={expected_type}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def __check_value_shape__(self, key: Any, val: Any) -> bool:
"""Check whether the shape of val matches definition in
HumanData.SUPPORTED_KEYS.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If expected shape is defined and doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check shape
if 'shape' in supported_keys[key]:
val_shape = val.shape
for shape_ind in range(len(supported_keys[key]['shape'])):
# length not match
if shape_ind >= len(val_shape):
ret_bool = False
break
expect_val = supported_keys[key]['shape'][shape_ind]
# value not match
if expect_val > 0 and \
expect_val != val_shape[shape_ind]:
ret_bool = False
break
if not ret_bool:
expected_shape = str(supported_keys[key]['shape'])
expected_shape = expected_shape.replace('-1', 'Any')
err_msg = 'Shape check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val.shape={val_shape}\n'
err_msg += f'expected shape={expected_shape}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def data_len(self) -> int:
"""Get the temporal length of this HumanData instance.
Returns:
int:
Number of frames related to this instance.
"""
return self.__data_len__
def data_len(self, value: int):
"""Set the temporal length of this HumanData instance.
Args:
value (int):
Number of frames related to this instance.
"""
self.__data_len__ = value
def __check_value_len__(self, key: Any, val: Any) -> bool:
"""Check whether the temporal length of val matches other values.
Args:
key (Any):
Key in HumanData.
val (Any):
Value to the key.
Returns:
bool:
If temporal dim is defined and temporal length doesn't match,
return False.
Else return True.
"""
ret_bool = True
supported_keys = self.__class__.SUPPORTED_KEYS
# check definition
if key in supported_keys:
# check temporal length
if 'dim' in supported_keys[key] and \
supported_keys[key]['dim'] is not None:
val_slice_dim = supported_keys[key]['dim']
if supported_keys[key]['type'] == dict:
slice_key = supported_keys[key]['slice_key']
val_data_len = val[slice_key].shape[val_slice_dim]
else:
val_data_len = val.shape[val_slice_dim]
if self.data_len < 0:
# no data_len yet, assign a new one
self.data_len = val_data_len
else:
# check if val_data_len matches recorded data_len
if self.data_len != val_data_len:
ret_bool = False
if not ret_bool:
err_msg = 'Temporal check Failed:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'val\'s data_len={val_data_len}\n'
err_msg += f'expected data_len={self.data_len}\n'
print_log(
msg=err_msg, logger=self.__class__.logger, level=logging.ERROR)
return ret_bool
def generate_mask_from_confidence(self, keys=None) -> None:
"""Generate mask from keypoints' confidence. Keypoints that have zero
confidence in all occurrences will have a zero mask. Note that the last
value of the keypoint is assumed to be confidence.
Args:
keys: None, str, or list of str.
None: all keys with `keypoint` in it will have mask
generated from their confidence.
str: key of the keypoint, the mask has name f'{key}_name'
list of str: a list of keys of the keypoints.
Generate mask for multiple keypoints.
Defaults to None.
Returns:
None
Raises:
KeyError:
A key is not not found
"""
if keys is None:
keys = []
for key in self.keys():
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
keys.append(key)
elif isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
for key in keys:
assert isinstance(key, str)
else:
raise TypeError(f'`Keys` must be None, str, or list of str, '
f'got {type(keys)}.')
update_dict = {}
for kpt_key in keys:
kpt_array = self.get_raw_value(kpt_key)
num_joints = kpt_array.shape[-2]
# if all conf of a joint are zero, this joint is masked
joint_conf = kpt_array[..., -1].reshape(-1, num_joints)
mask_array = (joint_conf > 0).astype(np.uint8).max(axis=0)
assert len(mask_array) == num_joints
# generate mask
update_dict[f'{kpt_key}_mask'] = mask_array
self.update(update_dict)
def compress_keypoints_by_mask(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be removed and f'{key}_mask' will be locked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is False
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
msg = f'Mask for {key} has not been set.' +\
f'Please set {mask_key} before compression.'
raise KeyError(msg)
compressed_dict = {}
for kpt_key, mask_key in key_pairs:
kpt_array = self.get_raw_value(kpt_key)
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = \
self.__class__.__remove_zero_pad__(kpt_array, mask_array)
compressed_dict[kpt_key] = compressed_kpt
# set value after all pairs are compressed
self.update(compressed_dict)
self.__keypoints_compressed__ = True
def decompress_keypoints(self) -> None:
"""If a key contains 'keypoints', and f'{key}_mask' is in self.keys(),
invalid zeros will be inserted to the right places and f'{key}_mask'
will be unlocked.
Raises:
KeyError:
A key contains 'keypoints' has been found
but its corresponding mask is missing.
"""
assert self.__keypoints_compressed__ is True
key_pairs = []
for key in self.keys():
mask_key = f'{key}_mask'
val = self.get_raw_value(key)
if isinstance(val, np.ndarray) and \
'keypoints' in key and \
'_mask' not in key:
if mask_key in self:
key_pairs.append([key, mask_key])
else:
class_logger = self.__class__.logger
msg = f'Mask for {key} has not been found.' +\
f'Please remove {key} before decompression.'
print_log(
msg=msg, logger=class_logger, level=logging.ERROR)
raise KeyError
decompressed_dict = {}
for kpt_key, mask_key in key_pairs:
mask_array = np.asarray(self.get_raw_value(mask_key))
compressed_kpt = self.get_raw_value(kpt_key)
kpt_array = \
self.__class__.__add_zero_pad__(compressed_kpt, mask_array)
decompressed_dict[kpt_key] = kpt_array
# set value after all pairs are decompressed
self.update(decompressed_dict)
self.__keypoints_compressed__ = False
def dump_by_pickle(self, pkl_path: str, overwrite: bool = True) -> None:
"""Dump keys and items to a pickle file. It's a secondary dump method,
when a HumanData instance is too large to be dumped by self.dump()
Args:
pkl_path (str):
Path to a dumped pickle file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.pkl'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(pkl_path, ['.pkl']):
raise ValueError('Not an pkl file.')
if not overwrite:
if check_path_existence(pkl_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'__key_strict__': self.__key_strict__,
'__data_len__': self.__data_len__,
'__keypoints_compressed__': self.__keypoints_compressed__,
}
dict_to_dump.update(self)
with open(pkl_path, 'wb') as f_writeb:
pickle.dump(
dict_to_dump, f_writeb, protocol=pickle.HIGHEST_PROTOCOL)
def load_by_pickle(self, pkl_path: str) -> None:
"""Load data from pkl_path and update them to self.
When a HumanData Instance was dumped by
self.dump_by_pickle(), use this to load.
Args:
npz_path (str):
Path to a dumped npz file.
"""
with open(pkl_path, 'rb') as f_readb:
tmp_data_dict = pickle.load(f_readb)
for key, value in list(tmp_data_dict.items()):
if value is None:
tmp_data_dict.pop(key)
elif key == '__key_strict__' or \
key == '__data_len__' or\
key == '__keypoints_compressed__':
self.__setattr__(key, value)
# pop the attributes to keep dict clean
tmp_data_dict.pop(key)
elif key == 'bbox_xywh' and value.shape[1] == 4:
value = np.hstack([value, np.ones([value.shape[0], 1])])
tmp_data_dict[key] = value
else:
tmp_data_dict[key] = value
self.update(tmp_data_dict)
self.__set_default_values__()
def __set_default_values__(self) -> None:
"""For older versions of HumanData, call this method to apply missing
values (also attributes)."""
supported_keys = self.__class__.SUPPORTED_KEYS
if self.__data_len__ == -1:
for key in supported_keys:
if key in self and \
'dim' in supported_keys[key] and\
supported_keys[key]['dim'] is not None:
if 'slice_key' in supported_keys[key] and\
supported_keys[key]['type'] == dict:
sub_key = supported_keys[key]['slice_key']
slice_dim = supported_keys[key]['dim']
self.__data_len__ = \
self[key][sub_key].shape[slice_dim]
else:
slice_dim = supported_keys[key]['dim']
self.__data_len__ = self[key].shape[slice_dim]
break
for key in list(self.keys()):
convention_key = f'{key}_convention'
if key.startswith('keypoints') and \
not key.endswith('_mask') and \
not key.endswith('_convention') and \
convention_key not in self:
self[convention_key] = 'human_data'
def concatenate(cls, human_data_0: _HumanData,
human_data_1: _HumanData) -> _HumanData:
"""Concatenate two human_data. All keys will be kept it the returned
human_data. If either value from human_data_0 or human_data_1 matches
data_len from its HumanData, the two values will be concatenated as a
single value. If not, postfix will be added to the key to specify
source of the value.
Args:
human_data_0 (_HumanData)
human_data_1 (_HumanData)
Returns:
_HumanData:
A new human_data instance with all concatenated data.
"""
ret_human_data = cls.new(key_strict=False)
set_0 = set(human_data_0.keys())
set_1 = set(human_data_1.keys())
common_keys = set_0.intersection(set_1)
dim_dict_0 = human_data_0.__get_slice_dim__()
dim_dict_1 = human_data_1.__get_slice_dim__()
for key in common_keys:
value_0 = human_data_0[key]
value_1 = human_data_1[key]
# align type
value_0 = list(value_0) if isinstance(value_0, tuple)\
else value_0
value_1 = list(value_1) if isinstance(value_1, tuple)\
else value_1
assert type(value_0) == type(value_1)
# align convention
if key.startswith('keypoints') and\
key.endswith('_convention'):
assert value_0 == value_1
ret_human_data[key] = value_0
continue
# mask_0 and mask_1
elif key.startswith('keypoints') and\
key.endswith('_mask'):
new_mask = value_0 * value_1
ret_human_data[key] = new_mask
continue
# go through the sub dict
if isinstance(value_0, dict):
sub_dict = {}
for sub_key, sub_value_0 in value_0.items():
# only found in value_0
if sub_key not in value_1:
sub_dict[sub_key] = sub_value_0
# found in both values
else:
sub_value_1 = value_1[sub_key]
concat_sub_dict = cls.__concat_value__(
key=sub_key,
value_0=sub_value_0,
dim_0=dim_dict_0[key][sub_key],
value_1=sub_value_1,
dim_1=dim_dict_1[key][sub_key])
sub_dict.update(concat_sub_dict)
for sub_key, sub_value_1 in value_1.items():
if sub_key not in value_0:
sub_dict[sub_key] = sub_value_1
ret_human_data[key] = sub_dict
# try concat
else:
concat_dict = cls.__concat_value__(
key=key,
value_0=value_0,
dim_0=dim_dict_0[key],
value_1=value_1,
dim_1=dim_dict_1[key])
ret_human_data.update(concat_dict)
# check exclusive keys
for key, value in human_data_0.items():
if key not in common_keys:
# value not for concat and slice
if dim_dict_0[key] is None:
ret_human_data[key] = value
# value aligned with data_len of HumanData_0
else:
ret_human_data[f'{key}_0'] = value
for key, value in human_data_1.items():
if key not in common_keys:
# same as above
if dim_dict_1[key] is None:
ret_human_data[key] = value
else:
ret_human_data[f'{key}_1'] = value
return ret_human_data
def __concat_value__(cls, key: Any, value_0: Any, value_1: Any,
dim_0: Union[None, int], dim_1: Union[None,
int]) -> dict:
"""Concat two values from two different HumanData.
Args:
key (Any):
The common key of the two values.
value_0 (Any):
Value from 0.
value_1 (Any):
Value from 1.
dim_0 (Union[None, int]):
The dim for concat and slice. None for N/A.
dim_1 (Union[None, int]):
The dim for concat and slice. None for N/A.
Returns:
dict:
Dict for concatenated result.
"""
ret_dict = {}
if dim_0 is None or dim_1 is None:
ret_dict[f'{key}_0'] = value_0
ret_dict[f'{key}_1'] = value_1
elif isinstance(value_0, list):
ret_dict[key] = value_0 + value_1
# elif isinstance(value_0, np.ndarray):
else:
ret_dict[key] = np.concatenate((value_0, value_1), axis=dim_0)
return ret_dict
def __add_zero_pad__(cls, compressed_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Pad zeros to a compressed keypoints array.
Args:
compressed_array (np.ndarray):
A compressed keypoints array.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A keypoints array in full-size.
"""
assert mask_array.sum() == compressed_array.shape[1]
data_len, _, dim = compressed_array.shape
mask_len = mask_array.shape[0]
ret_value = np.zeros(
shape=[data_len, mask_len, dim], dtype=compressed_array.dtype)
valid_mask_index = np.where(mask_array == 1)[0]
ret_value[:, valid_mask_index, :] = compressed_array
return ret_value
def __remove_zero_pad__(cls, zero_pad_array: np.ndarray,
mask_array: np.ndarray) -> np.ndarray:
"""Remove zero-padding from a full-size keypoints array.
Args:
zero_pad_array (np.ndarray):
A keypoints array in full-size.
mask_array (np.ndarray):
The mask records compression relationship.
Returns:
np.ndarray:
A compressed keypoints array.
"""
assert mask_array.shape[0] == zero_pad_array.shape[1]
valid_mask_index = np.where(mask_array == 1)[0]
ret_value = np.take(zero_pad_array, valid_mask_index, axis=1)
return ret_value
def __get_key_warn_msg__(cls, key: Any) -> str:
"""Get the warning message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The warning message.
"""
class_name = cls.__name__
warn_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Ignore this if you know exactly' +\
' what you are doing.\n' +\
'Otherwise, Call self.set_key_strict(True)' +\
' to avoid wrong keys.\n'
return warn_message + suggestion_message
def __get_key_error_msg__(cls, key: Any) -> str:
"""Get the error message when a key fails the check.
Args:
key (Any):
The key with wrong.
Returns:
str:
The error message.
"""
class_name = cls.__name__
absent_message = \
f'{key} is absent in' +\
f' {class_name}.SUPPORTED_KEYS.\n'
suggestion_message = \
'Call self.set_key_strict(False)' +\
' to allow unsupported keys.\n'
return absent_message + suggestion_message
def __get_value_error_msg__(cls) -> str:
"""Get the error message when a value fails the check.
Returns:
str:
The error message.
"""
error_message = \
'An supported value doesn\'t ' +\
'match definition.\n'
suggestion_message = \
'See error log for details.\n'
return error_message + suggestion_message
def __get_sliced_result__(
cls, input_data: Union[np.ndarray, list, tuple], slice_dim: int,
slice_range: slice) -> Union[np.ndarray, list, tuple]:
"""Slice input_data along slice_dim with slice_range.
Args:
input_data (Union[np.ndarray, list, tuple]):
Data to be sliced.
slice_dim (int):
Dimension to be sliced.
slice_range (slice):
An instance of class slice.
Returns:
Union[np.ndarray, list, tuple]:
A slice of input_data.
"""
if isinstance(input_data, np.ndarray):
slice_list = [
slice(None),
] * len(input_data.shape)
slice_list[slice_dim] = slice_range
sliced_data = input_data[tuple(slice_list)]
else:
sliced_data = \
input_data[slice_range]
return sliced_data
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol','smoothnet',
'smoothnet_windowsize8','smoothnet_windowsize16',
'smoothnet_windowsize32','smoothnet_windowsize64'].
Defaults to 'savgol'. 'smoothnet' is default with windowsize=8.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if smooth_type == 'smoothnet':
smooth_type = 'smoothnet_windowsize8'
assert smooth_type in [
'oneeuro', 'gaus1d', 'savgol', 'smoothnet_windowsize8',
'smoothnet_windowsize16', 'smoothnet_windowsize32',
'smoothnet_windowsize64'
]
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def speed_up_process(x,
speed_up_type='deciwatch',
cfg_base_dir='configs/_base_/post_processing/'):
"""Speed up the process with the specified speed up type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir.
Defaults to 'configs/_base_/post_processing/'
Raises:
ValueError: check the input speed up type.
Returns:
np.ndarray: Completed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.clone()
assert x.ndim == 4 or x.ndim == 5
cfg_dict = cfg['speed_up_cfg']
cfg_dict['device'] = x.device
speed_up_func = build_post_processing(cfg_dict)
if x.ndim == 5:
for i in range(x.shape[1]):
x[:, i] = speed_up_func(x[:, i])
elif x.ndim == 4:
x = speed_up_func(x)
return np.array(x.cpu())
def get_speed_up_interval(speed_up_type,
cfg_base_dir='configs/_base_/post_processing/'):
"""Get the interval of specific speed up type.
Args:
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input speed up type.
Returns:
int: speed up interval
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
return cfg['speed_up_cfg']['interval']
def speed_up_interpolate(selected_frames, speed_up_frames, smpl_poses,
smpl_betas, pred_cams, bboxes_xyxy):
"""Interpolate smpl_betas, pred_cams, and bboxes_xyxyx for speed up.
Args:
selected_frames (np.ndarray): Shape should be (selected frame number).
speed_up_frames (int): Total speed up frame number
smpl_poses (np.ndarray): selected frame smpl poses parameter
smpl_betas (np.ndarray): selected frame smpl shape paeameter
pred_cams (np.ndarray): selected frame camera parameter
bboxes_xyxy (np.ndarray): selected frame bbox
Returns:
smpl_poses (np.ndarray): interpolated frame smpl poses parameter
smpl_betas (np.ndarray): interpolated frame smpl shape paeameter
pred_cams (np.ndarray): interpolated frame camera parameter
bboxes_xyxy (np.ndarray): interpolated frame bbox
"""
selected_frames = selected_frames[selected_frames <= speed_up_frames]
pred_cams[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, pred_cams[selected_frames, :], kind='linear', axis=0)(
np.arange(0, max(selected_frames)))
bboxes_xyxy[:speed_up_frames, :] = interpolate.interp1d(
selected_frames,
bboxes_xyxy[selected_frames, :],
kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
smpl_betas[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, smpl_betas[selected_frames, :], kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
return smpl_poses, smpl_betas, pred_cams, bboxes_xyxy
def extract_feature_sequence(extracted_results,
frame_idx,
causal,
seq_len,
step=1):
"""Extract the target frame from person results, and pad the sequence to a
fixed length.
Args:
extracted_results (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the feature information of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the first frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[Dict]]: Multi-frame feature extraction results stored in a
nested list with a length of seq_len.
int: The target frame index in the padded sequence.
"""
if causal:
frames_left = 0
frames_right = seq_len - 1
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(extracted_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
extracted_results_seq = [extracted_results[0]] * pad_left + \
extracted_results[start:end:step] + [extracted_results[-1]] * pad_right
return extracted_results_seq
def array_to_images(
image_array: np.ndarray,
output_folder: str,
img_format: str = '%06d.png',
resolution: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None,
disable_log: bool = False,
) -> None:
"""Convert an array to images directly.
Args:
image_array (np.ndarray): shape should be (f * h * w * 3).
output_folder (str): output folder for the images.
img_format (str, optional): format of the images.
Defaults to '%06d.png'.
resolution (Optional[Union[Tuple[int, int], Tuple[float, float]]],
optional): resolution(height, width) of output.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check output folder.
TypeError: check input array.
Returns:
None
"""
prepare_output_path(
output_folder,
allowed_suffix=[],
tag='output image folder',
path_type='dir',
overwrite=True)
if not isinstance(image_array, np.ndarray):
raise TypeError('Input should be np.ndarray.')
assert image_array.ndim == 4
assert image_array.shape[-1] == 3
if resolution:
height, width = resolution
else:
height, width = image_array.shape[1], image_array.shape[2]
command = [
'ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f',
'rawvideo',
'-s',
f'{int(width)}x{int(height)}', # size of one frame
'-pix_fmt',
'bgr24', # bgr24 for matching OpenCV
'-loglevel',
'error',
'-threads',
'4',
'-i',
'-', # The input comes from a pipe
'-f',
'image2',
'-start_number',
'0',
os.path.join(output_folder, img_format),
]
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=10**8,
close_fds=True)
if process.stdin is None or process.stderr is None:
raise BrokenPipeError('No buffer received.')
index = 0
while True:
if index >= image_array.shape[0]:
break
process.stdin.write(image_array[index].tobytes())
index += 1
process.stdin.close()
process.stderr.close()
process.wait()
def rotmat_to_aa(
matrix: Union[torch.Tensor, numpy.ndarray]
) -> Union[torch.Tensor, numpy.ndarray]:
"""Convert rotation matrixs to axis angles.
Args:
matrix (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3, 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_quaternion, quaternion_to_axis_angle])
return t(matrix)
The provided code snippet includes necessary dependencies for implementing the `multi_person_with_mmtracking` function. Write a Python function `def multi_person_with_mmtracking(args, frames_iter)` to solve the following problem:
Estimate smpl parameters from multi-person images with mmtracking Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames
Here is the function:
def multi_person_with_mmtracking(args, frames_iter):
"""Estimate smpl parameters from multi-person
images with mmtracking
Args:
args (object): object of argparse.Namespace.
frames_iter (np.ndarray,): prepared frames
"""
mesh_model, extractor = \
init_model(args.mesh_reg_config, args.mesh_reg_checkpoint,
device=args.device.lower())
max_track_id, max_instance, frame_id_list, result_list = \
get_tracking_result(args, frames_iter, mesh_model, extractor)
frame_num = len(frame_id_list)
verts = np.zeros([frame_num, max_track_id + 1, 6890, 3])
pred_cams = np.zeros([frame_num, max_track_id + 1, 3])
bboxes_xyxy = np.zeros([frame_num, max_track_id + 1, 5])
smpl_poses = np.zeros([frame_num, max_track_id + 1, 24, 3, 3])
smpl_betas = np.zeros([frame_num, max_track_id + 1, 10])
# speed up
if args.speed_up_type:
speed_up_interval = get_speed_up_interval(args.speed_up_type)
speed_up_frames = (frame_num -
1) // speed_up_interval * speed_up_interval
track_ids_lists = []
for i, result in enumerate(mmcv.track_iter_progress(result_list)):
frame_id = frame_id_list[i]
if mesh_model.cfg.model.type == 'VideoBodyModelEstimator':
if args.speed_up_type:
warnings.warn(
'Video based models do not support speed up. '
'By default we will inference with original speed.',
UserWarning)
feature_results_seq = extract_feature_sequence(
result_list, frame_idx=i, causal=True, seq_len=16, step=1)
mesh_results = inference_video_based_model(
mesh_model,
extracted_results=feature_results_seq,
with_track_id=True)
elif mesh_model.cfg.model.type == 'ImageBodyModelEstimator':
if args.speed_up_type and i % speed_up_interval != 0\
and i <= speed_up_frames:
mesh_results = []
for idx in range(len(result)):
mesh_result = result[idx].copy()
mesh_result['bbox'] = np.zeros((5))
mesh_result['camera'] = np.zeros((3))
mesh_result['smpl_pose'] = np.zeros((24, 3, 3))
mesh_result['smpl_beta'] = np.zeros((10))
mesh_result['vertices'] = np.zeros((6890, 3))
mesh_result['keypoints_3d'] = np.zeros((17, 3))
mesh_results.append(mesh_result)
else:
mesh_results = inference_image_based_model(
mesh_model,
frames_iter[frame_id],
result,
bbox_thr=args.bbox_thr,
format='xyxy')
else:
raise Exception(
f'{mesh_model.cfg.model.type} is not supported yet')
track_ids = []
for mesh_result in mesh_results:
instance_id = mesh_result['track_id']
bboxes_xyxy[i, instance_id] = mesh_result['bbox']
pred_cams[i, instance_id] = mesh_result['camera']
verts[i, instance_id] = mesh_result['vertices']
smpl_betas[i, instance_id] = mesh_result['smpl_beta']
smpl_poses[i, instance_id] = mesh_result['smpl_pose']
track_ids.append(instance_id)
track_ids_lists.append(track_ids)
# release GPU memory
del mesh_model
del extractor
torch.cuda.empty_cache()
# speed up
if args.speed_up_type:
smpl_poses = speed_up_process(
torch.tensor(smpl_poses).to(args.device.lower()),
args.speed_up_type)
selected_frames = np.arange(0, len(frames_iter), speed_up_interval)
smpl_poses, smpl_betas, pred_cams, bboxes_xyxy = speed_up_interpolate(
selected_frames, speed_up_frames, smpl_poses, smpl_betas,
pred_cams, bboxes_xyxy)
# smooth
if args.smooth_type is not None:
smpl_poses = smooth_process(
smpl_poses.reshape(frame_num, -1, 24, 9),
smooth_type=args.smooth_type).reshape(frame_num, -1, 24, 3, 3)
verts = smooth_process(verts, smooth_type=args.smooth_type)
pred_cams = smooth_process(
pred_cams[:, np.newaxis],
smooth_type=args.smooth_type).reshape(frame_num, -1, 3)
if smpl_poses.shape[2:] == (24, 3, 3):
smpl_poses = rotmat_to_aa(smpl_poses)
elif smpl_poses.shape[2:] == (24, 3):
smpl_poses = smpl_poses
else:
raise Exception(f'Wrong shape of `smpl_pose`: {smpl_poses.shape}')
if args.output is not None:
body_pose_, global_orient_, smpl_betas_, verts_, pred_cams_, \
bboxes_xyxy_, image_path_, frame_id_, person_id_ = \
[], [], [], [], [], [], [], [], []
human_data = HumanData()
frames_folder = osp.join(args.output, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list], output_folder=frames_folder)
for i, img_i in enumerate(sorted(os.listdir(frames_folder))):
for person_i in track_ids_lists[i]:
body_pose_.append(smpl_poses[i][person_i][1:])
global_orient_.append(smpl_poses[i][person_i][:1])
smpl_betas_.append(smpl_betas[i][person_i])
verts_.append(verts[i][person_i])
pred_cams_.append(pred_cams[i][person_i])
bboxes_xyxy_.append(bboxes_xyxy[i][person_i])
image_path_.append(os.path.join('images', img_i))
person_id_.append(person_i)
frame_id_.append(frame_id_list[i])
smpl = {}
smpl['body_pose'] = np.array(body_pose_).reshape((-1, 23, 3))
smpl['global_orient'] = np.array(global_orient_).reshape((-1, 3))
smpl['betas'] = np.array(smpl_betas_).reshape((-1, 10))
human_data['smpl'] = smpl
human_data['verts'] = verts_
human_data['pred_cams'] = pred_cams_
human_data['bboxes_xyxy'] = bboxes_xyxy_
human_data['image_path'] = image_path_
human_data['person_id'] = person_id_
human_data['frame_id'] = frame_id_
human_data.dump(osp.join(args.output, 'inference_result.npz'))
# To compress vertices array
compressed_verts = np.zeros([frame_num, max_instance, 6890, 3])
compressed_cams = np.zeros([frame_num, max_instance, 3])
compressed_bboxs = np.zeros([frame_num, max_instance, 5])
compressed_poses = np.zeros([frame_num, max_instance, 24, 3])
compressed_betas = np.zeros([frame_num, max_instance, 10])
for i, track_ids_list in enumerate(track_ids_lists):
instance_num = len(track_ids_list)
compressed_verts[i, :instance_num] = verts[i, track_ids_list]
compressed_cams[i, :instance_num] = pred_cams[i, track_ids_list]
compressed_bboxs[i, :instance_num] = bboxes_xyxy[i, track_ids_list]
compressed_poses[i, :instance_num] = smpl_poses[i, track_ids_list]
compressed_betas[i, :instance_num] = smpl_betas[i, track_ids_list]
assert len(frame_id_list) > 0
if args.show_path is not None:
if args.output is not None:
frames_folder = os.path.join(args.output, 'images')
else:
frames_folder = osp.join(Path(args.show_path).parent, 'images')
os.makedirs(frames_folder, exist_ok=True)
array_to_images(
np.array(frames_iter)[frame_id_list],
output_folder=frames_folder)
body_model_config = dict(model_path=args.body_model_dir, type='smpl')
visualize_smpl_hmr(
poses=compressed_poses.reshape(-1, max_instance, 24 * 3),
betas=compressed_betas,
cam_transl=compressed_cams,
bbox=compressed_bboxs,
output_path=args.show_path,
render_choice=args.render_choice,
resolution=frames_iter[0].shape[:2],
origin_frames=frames_folder,
body_model_config=body_model_config,
overwrite=True,
palette=args.palette,
read_frames_batch=True) | Estimate smpl parameters from multi-person images with mmtracking Args: args (object): object of argparse.Namespace. frames_iter (np.ndarray,): prepared frames |
14,264 | version_info = parse_version_info(__version__)
The provided code snippet includes necessary dependencies for implementing the `parse_version_info` function. Write a Python function `def parse_version_info(version_str)` to solve the following problem:
Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
Here is the function:
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info) | Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). |
14,265 | import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (
DistSamplerSeedHook,
Fp16OptimizerHook,
OptimizerHook,
build_runner,
)
from mmhuman3d.core.distributed_wrapper import DistributedDataParallelWrapper
from mmhuman3d.core.evaluation import DistEvalHook, EvalHook
from mmhuman3d.core.optimizer import build_optimizers
from mmhuman3d.data.datasets import build_dataloader, build_dataset
from mmhuman3d.utils.logger import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `set_random_seed` function. Write a Python function `def set_random_seed(seed, deterministic=False)` to solve the following problem:
Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False.
Here is the function:
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. |
14,266 | import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (
DistSamplerSeedHook,
Fp16OptimizerHook,
OptimizerHook,
build_runner,
)
from mmhuman3d.core.distributed_wrapper import DistributedDataParallelWrapper
from mmhuman3d.core.evaluation import DistEvalHook, EvalHook
from mmhuman3d.core.optimizer import build_optimizers
from mmhuman3d.data.datasets import build_dataloader, build_dataset
from mmhuman3d.utils.logger import get_root_logger
class DistributedDataParallelWrapper(nn.Module):
"""A DistributedDataParallel wrapper for models in 3D mesh estimation task.
In 3D mesh estimation task, there is a need to wrap different modules in
the models with separate DistributedDataParallel. Otherwise, it will cause
errors for GAN training.
More specific, the GAN model, usually has two sub-modules:
generator and discriminator. If we wrap both of them in one
standard DistributedDataParallel, it will cause errors during training,
because when we update the parameters of the generator (or discriminator),
the parameters of the discriminator (or generator) is not updated, which is
not allowed for DistributedDataParallel.
So we design this wrapper to separately wrap DistributedDataParallel
for generator and discriminator.
In this wrapper, we perform two operations:
1. Wrap the modules in the models with separate MMDistributedDataParallel.
Note that only modules with parameters will be wrapped.
2. Do scatter operation for 'forward', 'train_step' and 'val_step'.
Note that the arguments of this wrapper is the same as those in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Args:
module (nn.Module): Module that needs to be wrapped.
device_ids (list[int | `torch.device`]): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
dim (int, optional): Same as that in the official scatter function in
pytorch. Defaults to 0.
broadcast_buffers (bool): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Defaults to False.
find_unused_parameters (bool, optional): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Traverse the autograd graph of all tensors contained in returned
value of the wrapped module’s forward function. Defaults to False.
kwargs (dict): Other arguments used in
`torch.nn.parallel.distributed.DistributedDataParallel`.
"""
def __init__(self,
module,
device_ids,
dim=0,
broadcast_buffers=False,
find_unused_parameters=False,
**kwargs):
super().__init__()
assert len(device_ids) == 1, (
'Currently, DistributedDataParallelWrapper only supports one'
'single CUDA device for each process.'
f'The length of device_ids must be 1, but got {len(device_ids)}.')
self.module = module
self.dim = dim
self.to_ddp(
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.output_device = _get_device_index(device_ids[0], True)
def to_ddp(self, device_ids, dim, broadcast_buffers,
find_unused_parameters, **kwargs):
"""Wrap models with separate MMDistributedDataParallel.
It only wraps the modules with parameters.
"""
for name, module in self.module._modules.items():
if next(module.parameters(), None) is None:
module = module.cuda()
elif all(not p.requires_grad for p in module.parameters()):
module = module.cuda()
else:
module = MMDistributedDataParallel(
module.cuda(),
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.module._modules[name] = module
def scatter(self, inputs, kwargs, device_ids):
"""Scatter function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
device_ids (int): Device id.
"""
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
"""Forward function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
"""Train step function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
"""Validation step function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for ``scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
def get_root_logger(log_file=None, log_level=logging.INFO):
return get_logger('mmhuman3d', log_file, log_level)
The provided code snippet includes necessary dependencies for implementing the `train_model` function. Write a Python function `def train_model(model, dataset, cfg, distributed=False, validate=False, timestamp=None, device='cuda', meta=None)` to solve the following problem:
Main api for training model.
Here is the function:
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
device='cuda',
meta=None):
"""Main api for training model."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
round_up=True,
seed=cfg.seed) for ds in dataset
]
# determine whether use adversarial training precess or not
use_adverserial_train = cfg.get('use_adversarial_train', False)
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
if use_adverserial_train:
# Use DistributedDataParallelWrapper for adversarial training
model = DistributedDataParallelWrapper(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
if device == 'cuda':
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
elif device == 'cpu':
model = model.cpu()
else:
raise ValueError(F'unsupported device name {device}.')
# build runner
optimizer = build_optimizers(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
if use_adverserial_train:
# The optimizer step process is included in the train_step function
# of the model, so the runner should NOT include optimizer hook.
optimizer_config = None
else:
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get('momentum_config', None),
custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=cfg.data.samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
round_up=True)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) | Main api for training model. |
14,267 | from typing import Dict, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import load_checkpoint
from mmcv.utils import print_log
from mmhuman3d.data.datasets.pipelines import Compose
from mmhuman3d.models.architectures.builder import build_architecture
from mmhuman3d.models.backbones.builder import build_backbone
from mmhuman3d.utils.demo_utils import box2cs, xywh2xyxy, xyxy2xywh
def build_architecture(cfg):
"""Build framework."""
return ARCHITECTURES.build(cfg)
def build_backbone(cfg):
"""Build backbone."""
if cfg is None:
return None
return BACKBONES.build(cfg)
The provided code snippet includes necessary dependencies for implementing the `init_model` function. Write a Python function `def init_model(config, checkpoint=None, device='cuda:0')` to solve the following problem:
Initialize a model from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Returns: nn.Module: The constructed model. (nn.Module, None): The constructed extractor model
Here is the function:
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed model.
(nn.Module, None): The constructed extractor model
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.data.test.test_mode = True
model = build_architecture(config.model)
if checkpoint is None:
try:
model.init_weights()
except Exception as e:
print_log(f'init model weights failed, please check: {e}')
if checkpoint is not None:
# load model checkpoint
load_checkpoint(model, checkpoint, map_location=device)
# save the config in the model for convenience
model.cfg = config
model.to(device)
model.eval()
extractor = None
if config.model.type == 'VideoBodyModelEstimator':
extractor = build_backbone(config.extractor.backbone)
if config.extractor.checkpoint is not None:
# load model checkpoint
load_checkpoint(extractor, config.extractor.checkpoint)
extractor.cfg = config
extractor.to(device)
extractor.eval()
return model, extractor | Initialize a model from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Returns: nn.Module: The constructed model. (nn.Module, None): The constructed extractor model |
14,268 | from typing import Dict, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import load_checkpoint
from mmcv.utils import print_log
from mmhuman3d.data.datasets.pipelines import Compose
from mmhuman3d.models.architectures.builder import build_architecture
from mmhuman3d.models.backbones.builder import build_backbone
from mmhuman3d.utils.demo_utils import box2cs, xywh2xyxy, xyxy2xywh
class LoadImage:
"""A simple pipeline to load image."""
def __init__(self, color_type='color', channel_order='bgr'):
self.color_type = color_type
self.channel_order = channel_order
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the image_path.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['image_path'], str):
results['image_file'] = results['image_path']
img = mmcv.imread(results['image_path'], self.color_type,
self.channel_order)
elif isinstance(results['image_path'], np.ndarray):
results['image_file'] = ''
if self.color_type == 'color' and self.channel_order == 'rgb':
img = cv2.cvtColor(results['image_path'], cv2.COLOR_BGR2RGB)
else:
img = results['image_path']
else:
raise TypeError('"image_path" must be a numpy array or a str or '
'a pathlib.Path object')
results['img'] = img
return results
def _indexing_sequence(input: Union[Sequence, Dict[str, Sequence]],
index: Union[int, Tuple[int, ...]]):
"""Get item of the specified index from input.
Args:
input (Union[Sequence, Dict[str, Sequence]]): The input sequence.
index (Union[int, Tuple[int, ...]]): The Specified index.
Returns:
Union[Sequence, Dict[str, Sequence]]: The item of specified index.
"""
if isinstance(input, dict):
result = {}
for key, value in input.items():
result[key] = _indexing_sequence(value, index)
return result
elif isinstance(input, (np.ndarray, torch.Tensor, list, tuple)):
return input[index]
else:
return input
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
if not isinstance(bbox_xyxy, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xyxy)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[..., 2] = bbox_xywh[..., 2] - bbox_xywh[..., 0]
bbox_xywh[..., 3] = bbox_xywh[..., 3] - bbox_xywh[..., 1]
return bbox_xywh
def xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (np.ndarray): Bounding boxes (with scores), shaped
(n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, right, bottom, [score])
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[..., 2] = bbox_xyxy[..., 2] + bbox_xyxy[..., 0] - 1
bbox_xyxy[..., 3] = bbox_xyxy[..., 3] + bbox_xyxy[..., 1] - 1
return bbox_xyxy
def box2cs(bbox_xywh, aspect_ratio=1.0, bbox_scale_factor=1.25):
"""Convert xywh coordinates to center and scale.
Args:
bbox_xywh (numpy.ndarray): the height of the bbox_xywh
aspect_ratio (int, optional): Defaults to 1.0
bbox_scale_factor (float, optional): Defaults to 1.25
Returns:
numpy.ndarray: center of the bbox
numpy.ndarray: the scale of the bbox w & h
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xywh.copy()
pixel_std = 1
center = np.stack([
bbox_xywh[..., 0] + bbox_xywh[..., 2] * 0.5,
bbox_xywh[..., 1] + bbox_xywh[..., 3] * 0.5
], -1)
mask_h = bbox_xywh[..., 2] > aspect_ratio * bbox_xywh[..., 3]
mask_w = ~mask_h
bbox_xywh[mask_h, 3] = bbox_xywh[mask_h, 2] / aspect_ratio
bbox_xywh[mask_w, 2] = bbox_xywh[mask_w, 3] * aspect_ratio
scale = np.stack([
bbox_xywh[..., 2] * 1.0 / pixel_std,
bbox_xywh[..., 3] * 1.0 / pixel_std
], -1)
scale = scale * bbox_scale_factor
return center, scale
The provided code snippet includes necessary dependencies for implementing the `inference_image_based_model` function. Write a Python function `def inference_image_based_model( model, img_or_path, det_results, bbox_thr=None, format='xywh', )` to solve the following problem:
Inference a single image with a list of person bounding boxes. Args: model (nn.Module): The loaded pose model. img_or_path (Union[str, np.ndarray]): Image filename or loaded image. det_results (List(dict)): the item in the dict may contain 'bbox' and/or 'track_id'. 'bbox' (4, ) or (5, ): The person bounding box, which contains 4 box coordinates (and score). 'track_id' (int): The unique id for each human instance. bbox_thr (float, optional): Threshold for bounding boxes. Only bboxes with higher scores will be fed into the pose detector. If bbox_thr is None, ignore it. Defaults to None. format (str, optional): bbox format ('xyxy' | 'xywh'). Default: 'xywh'. 'xyxy' means (left, top, right, bottom), 'xywh' means (left, top, width, height). Returns: list[dict]: Each item in the list is a dictionary, containing the bbox: (left, top, right, bottom, [score]), SMPL parameters, vertices, kp3d, and camera.
Here is the function:
def inference_image_based_model(
model,
img_or_path,
det_results,
bbox_thr=None,
format='xywh',
):
"""Inference a single image with a list of person bounding boxes.
Args:
model (nn.Module): The loaded pose model.
img_or_path (Union[str, np.ndarray]): Image filename or loaded image.
det_results (List(dict)): the item in the dict may contain
'bbox' and/or 'track_id'.
'bbox' (4, ) or (5, ): The person bounding box, which contains
4 box coordinates (and score).
'track_id' (int): The unique id for each human instance.
bbox_thr (float, optional): Threshold for bounding boxes.
Only bboxes with higher scores will be fed into the pose detector.
If bbox_thr is None, ignore it. Defaults to None.
format (str, optional): bbox format ('xyxy' | 'xywh'). Default: 'xywh'.
'xyxy' means (left, top, right, bottom),
'xywh' means (left, top, width, height).
Returns:
list[dict]: Each item in the list is a dictionary,
containing the bbox: (left, top, right, bottom, [score]),
SMPL parameters, vertices, kp3d, and camera.
"""
# only two kinds of bbox format is supported.
assert format in ['xyxy', 'xywh']
mesh_results = []
if len(det_results) == 0:
return []
# Change for-loop preprocess each bbox to preprocess all bboxes at once.
bboxes = np.array([box['bbox'] for box in det_results])
# Select bboxes by score threshold
if bbox_thr is not None:
assert bboxes.shape[1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
det_results = [det_results[i] for i in valid_idx]
if format == 'xyxy':
bboxes_xyxy = bboxes
bboxes_xywh = xyxy2xywh(bboxes)
else:
# format is already 'xywh'
bboxes_xywh = bboxes
bboxes_xyxy = xywh2xyxy(bboxes)
# if bbox_thr remove all bounding box
if len(bboxes_xywh) == 0:
return []
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
inference_pipeline = [LoadImage()] + cfg.inference_pipeline
inference_pipeline = Compose(inference_pipeline)
assert len(bboxes[0]) in [4, 5]
batch_data = []
input_size = cfg['img_res']
aspect_ratio = 1 if isinstance(input_size,
int) else input_size[0] / input_size[1]
for i, bbox in enumerate(bboxes_xywh):
center, scale = box2cs(bbox, aspect_ratio, bbox_scale_factor=1.25)
# prepare data
data = {
'image_path': img_or_path,
'center': center,
'scale': scale,
'rotation': 0,
'bbox_score': bbox[4] if len(bbox) == 5 else 1,
'sample_idx': i,
}
data = inference_pipeline(data)
batch_data.append(data)
batch_data = collate(batch_data, samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter not work so just move image to cuda device
batch_data['img'] = batch_data['img'].to(device)
# get all img_metas of each bounding box
batch_data['img_metas'] = [
img_metas[0] for img_metas in batch_data['img_metas'].data
]
# forward the model
with torch.no_grad():
results = model(
img=batch_data['img'],
img_metas=batch_data['img_metas'],
sample_idx=batch_data['sample_idx'],
)
for idx in range(len(det_results)):
mesh_result = det_results[idx].copy()
mesh_result['bbox'] = bboxes_xyxy[idx]
for key, value in results.items():
mesh_result[key] = _indexing_sequence(value, index=idx)
mesh_results.append(mesh_result)
return mesh_results | Inference a single image with a list of person bounding boxes. Args: model (nn.Module): The loaded pose model. img_or_path (Union[str, np.ndarray]): Image filename or loaded image. det_results (List(dict)): the item in the dict may contain 'bbox' and/or 'track_id'. 'bbox' (4, ) or (5, ): The person bounding box, which contains 4 box coordinates (and score). 'track_id' (int): The unique id for each human instance. bbox_thr (float, optional): Threshold for bounding boxes. Only bboxes with higher scores will be fed into the pose detector. If bbox_thr is None, ignore it. Defaults to None. format (str, optional): bbox format ('xyxy' | 'xywh'). Default: 'xywh'. 'xyxy' means (left, top, right, bottom), 'xywh' means (left, top, width, height). Returns: list[dict]: Each item in the list is a dictionary, containing the bbox: (left, top, right, bottom, [score]), SMPL parameters, vertices, kp3d, and camera. |
14,269 | from typing import Dict, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import load_checkpoint
from mmcv.utils import print_log
from mmhuman3d.data.datasets.pipelines import Compose
from mmhuman3d.models.architectures.builder import build_architecture
from mmhuman3d.models.backbones.builder import build_backbone
from mmhuman3d.utils.demo_utils import box2cs, xywh2xyxy, xyxy2xywh
def _gather_input_features(extracted_results):
"""Gather input features.
Args:
extracted_results (List[List[Dict]]):
Multi-frame feature extraction results
Returns:
List[List[dict]]: Multi-frame feature extraction results
stored in a nested list. Each element of the outer list is the
feature extraction results of a single frame, and each element of
the inner list is the extracted results of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
"""
sequence_inputs = []
for frame in extracted_results:
frame_inputs = []
for res in frame:
inputs = dict()
if 'features' in res:
inputs['features'] = res['features']
if 'track_id' in res:
inputs['track_id'] = res['track_id']
frame_inputs.append(inputs)
sequence_inputs.append(frame_inputs)
return sequence_inputs
def _collate_feature_sequence(extracted_features,
with_track_id=True,
target_frame=0):
"""Reorganize multi-frame feature extraction results into individual
feature sequences.
Args:
extracted_features (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the extracted results of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
with_track_id (bool): If True, the element in pose_results is expected
to contain "track_id", which will be used to gather the pose
sequence of a person from multiple frames. Otherwise, the pose
results in each frame are expected to have a consistent number and
order of identities. Default is True.
target_frame (int): The index of the target frame. Default: 0.
"""
T = len(extracted_features)
assert T > 0
target_frame = (T + target_frame) % T # convert negative index to positive
N = len(
extracted_features[target_frame]) # use identities in the target frame
if N == 0:
return []
C = extracted_features[target_frame][0]['features'].shape[0]
track_ids = None
if with_track_id:
track_ids = [
res['track_id'] for res in extracted_features[target_frame]
]
feature_sequences = []
for idx in range(N):
feature_seq = dict()
# gather static information
for k, v in extracted_features[target_frame][idx].items():
if k != 'features':
feature_seq[k] = v
# gather keypoints
if not with_track_id:
feature_seq['features'] = np.stack(
[frame[idx]['features'] for frame in extracted_features])
else:
features = np.zeros((T, C), dtype=np.float32)
features[target_frame] = extracted_features[target_frame][idx][
'features']
# find the left most frame containing track_ids[idx]
for frame_idx in range(target_frame - 1, -1, -1):
contains_idx = False
for res in extracted_features[frame_idx]:
if res['track_id'] == track_ids[idx]:
features[frame_idx] = res['features']
contains_idx = True
break
if not contains_idx:
# replicate the left most frame
features[frame_idx] = features[frame_idx + 1]
# find the right most frame containing track_idx[idx]
for frame_idx in range(target_frame + 1, T):
contains_idx = False
for res in extracted_features[frame_idx]:
if res['track_id'] == track_ids[idx]:
features[frame_idx] = res['features']
contains_idx = True
break
if not contains_idx:
# replicate the right most frame
features[frame_idx] = features[frame_idx - 1]
# break
feature_seq['features'] = features
feature_sequences.append(feature_seq)
return feature_sequences
The provided code snippet includes necessary dependencies for implementing the `inference_video_based_model` function. Write a Python function `def inference_video_based_model(model, extracted_results, with_track_id=True, causal=True)` to solve the following problem:
Inference SMPL parameters from extracted featutres using a video-based model. Args: model (nn.Module): The loaded mesh estimation model. extracted_results (List[List[Dict]]): Multi-frame feature extraction results stored in a nested list. Each element of the outer list is the feature extraction results of a single frame, and each element of the inner list is the feature information of one person, which contains: features (ndarray): extracted features track_id (int): unique id of each person, required when ``with_track_id==True``` bbox ((4, ) or (5, )): left, right, top, bottom, [score] with_track_id: If True, the element in extracted_results is expected to contain "track_id", which will be used to gather the feature sequence of a person from multiple frames. Otherwise, the extracted results in each frame are expected to have a consistent number and order of identities. Default is True. causal (bool): If True, the target frame is the first frame in a sequence. Otherwise, the target frame is in the middle of a sequence. Returns: list[dict]: Each item in the list is a dictionary, which contains: SMPL parameters, vertices, kp3d, and camera.
Here is the function:
def inference_video_based_model(model,
extracted_results,
with_track_id=True,
causal=True):
"""Inference SMPL parameters from extracted featutres using a video-based
model.
Args:
model (nn.Module): The loaded mesh estimation model.
extracted_results (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the feature information of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
with_track_id: If True, the element in extracted_results is expected to
contain "track_id", which will be used to gather the feature
sequence of a person from multiple frames. Otherwise, the extracted
results in each frame are expected to have a consistent number and
order of identities. Default is True.
causal (bool): If True, the target frame is the first frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
Returns:
list[dict]: Each item in the list is a dictionary, which contains:
SMPL parameters, vertices, kp3d, and camera.
"""
cfg = model.cfg
device = next(model.parameters()).device
seq_len = cfg.data.test.seq_len
mesh_results = []
# build the data pipeline
inference_pipeline = Compose(cfg.inference_pipeline)
target_idx = 0 if causal else len(extracted_results) // 2
input_features = _gather_input_features(extracted_results)
feature_sequences = _collate_feature_sequence(input_features,
with_track_id, target_idx)
if not feature_sequences:
return mesh_results
batch_data = []
for i, seq in enumerate(feature_sequences):
data = {
'features': seq['features'],
'sample_idx': i,
}
data = inference_pipeline(data)
batch_data.append(data)
batch_data = collate(batch_data, samples_per_gpu=len(batch_data))
if next(model.parameters()).is_cuda:
# scatter not work so just move image to cuda device
batch_data['features'] = batch_data['features'].to(device)
with torch.no_grad():
results = model(
features=batch_data['features'],
img_metas=batch_data['img_metas'],
sample_idx=batch_data['sample_idx'])
results['camera'] = results['camera'].reshape(-1, seq_len, 3)
results['smpl_pose'] = results['smpl_pose'].reshape(-1, seq_len, 24, 3, 3)
results['smpl_beta'] = results['smpl_beta'].reshape(-1, seq_len, 10)
results['vertices'] = results['vertices'].reshape(-1, seq_len, 6890, 3)
results['keypoints_3d'] = results['keypoints_3d'].reshape(
-1, seq_len, 17, 3)
for idx in range(len(feature_sequences)):
mesh_result = dict()
mesh_result['camera'] = results['camera'][idx, target_idx]
mesh_result['smpl_pose'] = results['smpl_pose'][idx, target_idx]
mesh_result['smpl_beta'] = results['smpl_beta'][idx, target_idx]
mesh_result['vertices'] = results['vertices'][idx, target_idx]
mesh_result['keypoints_3d'] = results['keypoints_3d'][idx, target_idx]
mesh_result['bbox'] = extracted_results[target_idx][idx]['bbox']
# 'track_id' is not included in results generated by mmdet
if 'track_id' in extracted_results[target_idx][idx].keys():
mesh_result['track_id'] = extracted_results[target_idx][idx][
'track_id']
mesh_results.append(mesh_result)
return mesh_results | Inference SMPL parameters from extracted featutres using a video-based model. Args: model (nn.Module): The loaded mesh estimation model. extracted_results (List[List[Dict]]): Multi-frame feature extraction results stored in a nested list. Each element of the outer list is the feature extraction results of a single frame, and each element of the inner list is the feature information of one person, which contains: features (ndarray): extracted features track_id (int): unique id of each person, required when ``with_track_id==True``` bbox ((4, ) or (5, )): left, right, top, bottom, [score] with_track_id: If True, the element in extracted_results is expected to contain "track_id", which will be used to gather the feature sequence of a person from multiple frames. Otherwise, the extracted results in each frame are expected to have a consistent number and order of identities. Default is True. causal (bool): If True, the target frame is the first frame in a sequence. Otherwise, the target frame is in the middle of a sequence. Returns: list[dict]: Each item in the list is a dictionary, which contains: SMPL parameters, vertices, kp3d, and camera. |
14,270 | from typing import Dict, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import load_checkpoint
from mmcv.utils import print_log
from mmhuman3d.data.datasets.pipelines import Compose
from mmhuman3d.models.architectures.builder import build_architecture
from mmhuman3d.models.backbones.builder import build_backbone
from mmhuman3d.utils.demo_utils import box2cs, xywh2xyxy, xyxy2xywh
class LoadImage:
"""A simple pipeline to load image."""
def __init__(self, color_type='color', channel_order='bgr'):
self.color_type = color_type
self.channel_order = channel_order
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the image_path.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['image_path'], str):
results['image_file'] = results['image_path']
img = mmcv.imread(results['image_path'], self.color_type,
self.channel_order)
elif isinstance(results['image_path'], np.ndarray):
results['image_file'] = ''
if self.color_type == 'color' and self.channel_order == 'rgb':
img = cv2.cvtColor(results['image_path'], cv2.COLOR_BGR2RGB)
else:
img = results['image_path']
else:
raise TypeError('"image_path" must be a numpy array or a str or '
'a pathlib.Path object')
results['img'] = img
return results
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
if not isinstance(bbox_xyxy, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xyxy)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[..., 2] = bbox_xywh[..., 2] - bbox_xywh[..., 0]
bbox_xywh[..., 3] = bbox_xywh[..., 3] - bbox_xywh[..., 1]
return bbox_xywh
def xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (np.ndarray): Bounding boxes (with scores), shaped
(n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, right, bottom, [score])
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[..., 2] = bbox_xyxy[..., 2] + bbox_xyxy[..., 0] - 1
bbox_xyxy[..., 3] = bbox_xyxy[..., 3] + bbox_xyxy[..., 1] - 1
return bbox_xyxy
def box2cs(bbox_xywh, aspect_ratio=1.0, bbox_scale_factor=1.25):
"""Convert xywh coordinates to center and scale.
Args:
bbox_xywh (numpy.ndarray): the height of the bbox_xywh
aspect_ratio (int, optional): Defaults to 1.0
bbox_scale_factor (float, optional): Defaults to 1.25
Returns:
numpy.ndarray: center of the bbox
numpy.ndarray: the scale of the bbox w & h
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xywh.copy()
pixel_std = 1
center = np.stack([
bbox_xywh[..., 0] + bbox_xywh[..., 2] * 0.5,
bbox_xywh[..., 1] + bbox_xywh[..., 3] * 0.5
], -1)
mask_h = bbox_xywh[..., 2] > aspect_ratio * bbox_xywh[..., 3]
mask_w = ~mask_h
bbox_xywh[mask_h, 3] = bbox_xywh[mask_h, 2] / aspect_ratio
bbox_xywh[mask_w, 2] = bbox_xywh[mask_w, 3] * aspect_ratio
scale = np.stack([
bbox_xywh[..., 2] * 1.0 / pixel_std,
bbox_xywh[..., 3] * 1.0 / pixel_std
], -1)
scale = scale * bbox_scale_factor
return center, scale
The provided code snippet includes necessary dependencies for implementing the `feature_extract` function. Write a Python function `def feature_extract( model, img_or_path, det_results, bbox_thr=None, format='xywh', )` to solve the following problem:
Extract image features with a list of person bounding boxes. Args: model (nn.Module): The loaded feature extraction model. img_or_path (Union[str, np.ndarray]): Image filename or loaded image. det_results (List(dict)): the item in the dict may contain 'bbox' and/or 'track_id'. 'bbox' (4, ) or (5, ): The person bounding box, which contains 4 box coordinates (and score). 'track_id' (int): The unique id for each human instance. bbox_thr (float, optional): Threshold for bounding boxes. If bbox_thr is None, ignore it. Defaults to None. format (str, optional): bbox format. Default: 'xywh'. 'xyxy' means (left, top, right, bottom), 'xywh' means (left, top, width, height). Returns: list[dict]: The bbox & pose info, containing the bbox: (left, top, right, bottom, [score]) and the features.
Here is the function:
def feature_extract(
model,
img_or_path,
det_results,
bbox_thr=None,
format='xywh',
):
"""Extract image features with a list of person bounding boxes.
Args:
model (nn.Module): The loaded feature extraction model.
img_or_path (Union[str, np.ndarray]): Image filename or loaded image.
det_results (List(dict)): the item in the dict may contain
'bbox' and/or 'track_id'.
'bbox' (4, ) or (5, ): The person bounding box, which contains
4 box coordinates (and score).
'track_id' (int): The unique id for each human instance.
bbox_thr (float, optional): Threshold for bounding boxes.
If bbox_thr is None, ignore it. Defaults to None.
format (str, optional): bbox format. Default: 'xywh'.
'xyxy' means (left, top, right, bottom),
'xywh' means (left, top, width, height).
Returns:
list[dict]: The bbox & pose info,
containing the bbox: (left, top, right, bottom, [score])
and the features.
"""
# only two kinds of bbox format is supported.
assert format in ['xyxy', 'xywh']
cfg = model.cfg
device = next(model.parameters()).device
feature_results = []
if len(det_results) == 0:
return feature_results
# Change for-loop preprocess each bbox to preprocess all bboxes at once.
bboxes = np.array([box['bbox'] for box in det_results])
assert len(bboxes[0]) in [4, 5]
# Select bboxes by score threshold
if bbox_thr is not None:
assert bboxes.shape[1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
det_results = [det_results[i] for i in valid_idx]
# if bbox_thr remove all bounding box
if len(bboxes) == 0:
return feature_results
if format == 'xyxy':
bboxes_xyxy = bboxes
bboxes_xywh = xyxy2xywh(bboxes)
else:
# format is already 'xywh'
bboxes_xywh = bboxes
bboxes_xyxy = xywh2xyxy(bboxes)
# build the data pipeline
extractor_pipeline = [LoadImage()] + cfg.extractor_pipeline
extractor_pipeline = Compose(extractor_pipeline)
batch_data = []
input_size = cfg['img_res']
aspect_ratio = 1 if isinstance(input_size,
int) else input_size[0] / input_size[1]
for i, bbox in enumerate(bboxes_xywh):
center, scale = box2cs(bbox, aspect_ratio, bbox_scale_factor=1.25)
# prepare data
data = {
'image_path': img_or_path,
'center': center,
'scale': scale,
'rotation': 0,
'bbox_score': bbox[4] if len(bbox) == 5 else 1,
'sample_idx': i,
}
data = extractor_pipeline(data)
batch_data.append(data)
batch_data = collate(batch_data, samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter not work so just move image to cuda device
batch_data['img'] = batch_data['img'].to(device)
# get all img_metas of each bounding box
batch_data['img_metas'] = [
img_metas[0] for img_metas in batch_data['img_metas'].data
]
# forward the model
with torch.no_grad():
results = model(batch_data['img'])
if isinstance(results, list) or isinstance(results, tuple):
results = results[-1].mean(dim=-1).mean(dim=-1)
for idx in range(len(det_results)):
feature_result = det_results[idx].copy()
feature_result['bbox'] = bboxes_xyxy[idx]
feature_result['features'] = results[idx].cpu().numpy()
feature_results.append(feature_result)
return feature_results | Extract image features with a list of person bounding boxes. Args: model (nn.Module): The loaded feature extraction model. img_or_path (Union[str, np.ndarray]): Image filename or loaded image. det_results (List(dict)): the item in the dict may contain 'bbox' and/or 'track_id'. 'bbox' (4, ) or (5, ): The person bounding box, which contains 4 box coordinates (and score). 'track_id' (int): The unique id for each human instance. bbox_thr (float, optional): Threshold for bounding boxes. If bbox_thr is None, ignore it. Defaults to None. format (str, optional): bbox format. Default: 'xywh'. 'xyxy' means (left, top, right, bottom), 'xywh' means (left, top, width, height). Returns: list[dict]: The bbox & pose info, containing the bbox: (left, top, right, bottom, [score]) and the features. |
14,271 | import json
import os
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.data.data_structures.multi_human_data import MultiHumanData
from .base_converter import BaseConverter
from .builder import DATA_CONVERTERS
def sort_json(json):
return int(json['image_id']) | null |
14,272 | import json
import os
import os.path as osp
import pickle as pk
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_converters.builder import DATA_CONVERTERS
from mmhuman3d.data.data_structures.human_data import HumanData
from .base_converter import BaseModeConverter
The provided code snippet includes necessary dependencies for implementing the `project_points` function. Write a Python function `def project_points(K, xyz)` to solve the following problem:
Project keypoints 3D to keypoints 2D on images. Using intrinsics K.
Here is the function:
def project_points(K, xyz):
"""Project keypoints 3D to keypoints 2D on images.
Using intrinsics K.
"""
uv = np.matmul(K, xyz.T).T
return uv[:, :2] / uv[:, -1:] | Project keypoints 3D to keypoints 2D on images. Using intrinsics K. |
14,273 | from mmcv.utils import Registry
DATA_CONVERTERS = Registry('data_converters')
The provided code snippet includes necessary dependencies for implementing the `build_data_converter` function. Write a Python function `def build_data_converter(cfg)` to solve the following problem:
Build data converter.
Here is the function:
def build_data_converter(cfg):
"""Build data converter."""
return DATA_CONVERTERS.build(cfg) | Build data converter. |
14,274 | import copy
from typing import Optional, Union
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from skimage.util.shape import view_as_windows
from .builder import DATASETS
from .human_image_dataset import HumanImageDataset
def get_vid_name(image_path: str):
"""Get base_dir of the given path."""
content = image_path.split('/')
vid_name = '/'.join(content[:-1])
return vid_name
The provided code snippet includes necessary dependencies for implementing the `split_into_chunks` function. Write a Python function `def split_into_chunks(data_infos: list, seq_len: int, stride: int, test_mode: bool, only_vid_name: bool)` to solve the following problem:
Split annotations into chunks. Adapted from https://github.com/mkocabas/VIBE Args: data_infos (list): parsed annotations. seq_len (int): the length of each chunk. stride (int): the interval between chunks. test_mode (bool): if test_mode is true, then an additional chunk will be added to cover all frames. Otherwise, last few frames will be dropped. only_vid_name (bool): if only_vid_name is true, image_path only contains the video name. Otherwise, image_path contains both video_name and frame index. Return: list: shape: [N, 4]. Each chunk contains four parameters: start_frame, end_frame, valid_start_frame, valid_end_frame. The last two parameters are used to suppress redundant frames.
Here is the function:
def split_into_chunks(data_infos: list, seq_len: int, stride: int,
test_mode: bool, only_vid_name: bool):
"""Split annotations into chunks.
Adapted from https://github.com/mkocabas/VIBE
Args:
data_infos (list): parsed annotations.
seq_len (int): the length of each chunk.
stride (int): the interval between chunks.
test_mode (bool): if test_mode is true, then an additional chunk
will be added to cover all frames. Otherwise, last few frames
will be dropped.
only_vid_name (bool): if only_vid_name is true, image_path only
contains the video name. Otherwise, image_path contains both
video_name and frame index.
Return:
list:
shape: [N, 4]. Each chunk contains four parameters: start_frame,
end_frame, valid_start_frame, valid_end_frame. The last two
parameters are used to suppress redundant frames.
"""
vid_names = []
for image_path in data_infos:
if only_vid_name:
vid_name = image_path
else:
vid_name = get_vid_name(image_path)
vid_names.append(vid_name)
vid_names = np.array(vid_names)
video_start_end_indices = []
video_names, group = np.unique(vid_names, return_index=True)
perm = np.argsort(group)
video_names, group = video_names[perm], group[perm]
indices = np.split(np.arange(0, vid_names.shape[0]), group[1:])
for idx in range(len(video_names)):
indexes = indices[idx]
if indexes.shape[0] < seq_len:
continue
chunks = view_as_windows(indexes, (seq_len, ), step=stride)
start_finish = chunks[:, (0, -1, 0, -1)].tolist()
video_start_end_indices += start_finish
if chunks[-1][-1] < indexes[-1] and test_mode:
start_frame = indexes[-1] - seq_len + 1
end_frame = indexes[-1]
valid_start_frame = chunks[-1][-1] + 1
valid_end_frame = indexes[-1]
extra_start_finish = [[
start_frame, end_frame, valid_start_frame, valid_end_frame
]]
video_start_end_indices += extra_start_finish
return video_start_end_indices | Split annotations into chunks. Adapted from https://github.com/mkocabas/VIBE Args: data_infos (list): parsed annotations. seq_len (int): the length of each chunk. stride (int): the interval between chunks. test_mode (bool): if test_mode is true, then an additional chunk will be added to cover all frames. Otherwise, last few frames will be dropped. only_vid_name (bool): if only_vid_name is true, image_path only contains the video name. Otherwise, image_path contains both video_name and frame index. Return: list: shape: [N, 4]. Each chunk contains four parameters: start_frame, end_frame, valid_start_frame, valid_end_frame. The last two parameters are used to suppress redundant frames. |
14,275 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
The provided code snippet includes necessary dependencies for implementing the `get_warp_matrix` function. Write a Python function `def get_warp_matrix(theta, size_input, size_dst, size_target)` to solve the following problem:
Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: theta (float): Rotation angle in degrees. size_input (np.ndarray): Size of input image [w, h]. size_dst (np.ndarray): Size of output image [w, h]. size_target (np.ndarray): Size of ROI in input plane [w, h]. Returns: matrix (np.ndarray): A matrix for transformation.
Here is the function:
def get_warp_matrix(theta, size_input, size_dst, size_target):
"""Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
matrix (np.ndarray): A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = math.cos(theta) * scale_x
matrix[0, 1] = -math.sin(theta) * scale_x
matrix[0, 2] = scale_x * (-0.5 * size_input[0] * math.cos(theta) +
0.5 * size_input[1] * math.sin(theta) +
0.5 * size_target[0])
matrix[1, 0] = math.sin(theta) * scale_y
matrix[1, 1] = math.cos(theta) * scale_y
matrix[1, 2] = scale_y * (-0.5 * size_input[0] * math.sin(theta) -
0.5 * size_input[1] * math.cos(theta) +
0.5 * size_target[1])
return matrix | Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: theta (float): Rotation angle in degrees. size_input (np.ndarray): Size of input image [w, h]. size_dst (np.ndarray): Size of output image [w, h]. size_target (np.ndarray): Size of ROI in input plane [w, h]. Returns: matrix (np.ndarray): A matrix for transformation. |
14,276 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
The provided code snippet includes necessary dependencies for implementing the `warp_affine_joints` function. Write a Python function `def warp_affine_joints(joints, mat)` to solve the following problem:
Apply affine transformation defined by the transform matrix on the joints. Args: joints (np.ndarray[..., 2]): Origin coordinate of joints. mat (np.ndarray[3, 2]): The affine matrix. Returns: matrix (np.ndarray[..., 2]): Result coordinate of joints.
Here is the function:
def warp_affine_joints(joints, mat):
"""Apply affine transformation defined by the transform matrix on the
joints.
Args:
joints (np.ndarray[..., 2]): Origin coordinate of joints.
mat (np.ndarray[3, 2]): The affine matrix.
Returns:
matrix (np.ndarray[..., 2]): Result coordinate of joints.
"""
joints = np.array(joints)
shape = joints.shape
joints = joints.reshape(-1, 2)
return np.dot(
np.concatenate((joints, joints[:, 0:1] * 0 + 1), axis=1),
mat.T).reshape(shape) | Apply affine transformation defined by the transform matrix on the joints. Args: joints (np.ndarray[..., 2]): Origin coordinate of joints. mat (np.ndarray[3, 2]): The affine matrix. Returns: matrix (np.ndarray[..., 2]): Result coordinate of joints. |
14,277 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
The provided code snippet includes necessary dependencies for implementing the `_flip_smpl_pose` function. Write a Python function `def _flip_smpl_pose(pose)` to solve the following problem:
Flip SMPL pose parameters horizontally. Args: pose (np.ndarray([72])): SMPL pose parameters Returns: pose_flipped
Here is the function:
def _flip_smpl_pose(pose):
"""Flip SMPL pose parameters horizontally.
Args:
pose (np.ndarray([72])): SMPL pose parameters
Returns:
pose_flipped
"""
flippedParts = [
0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11, 15, 16, 17, 12, 13, 14, 18, 19,
20, 24, 25, 26, 21, 22, 23, 27, 28, 29, 33, 34, 35, 30, 31, 32, 36, 37,
38, 42, 43, 44, 39, 40, 41, 45, 46, 47, 51, 52, 53, 48, 49, 50, 57, 58,
59, 54, 55, 56, 63, 64, 65, 60, 61, 62, 69, 70, 71, 66, 67, 68
]
pose_flipped = pose[flippedParts]
# Negate the second and the third dimension of the axis-angle
pose_flipped[1::3] = -pose_flipped[1::3]
pose_flipped[2::3] = -pose_flipped[2::3]
return pose_flipped | Flip SMPL pose parameters horizontally. Args: pose (np.ndarray([72])): SMPL pose parameters Returns: pose_flipped |
14,278 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
The provided code snippet includes necessary dependencies for implementing the `_flip_smplx_pose` function. Write a Python function `def _flip_smplx_pose(pose)` to solve the following problem:
Flip SMPLX pose parameters horizontally. Args: pose (np.ndarray([63])): SMPLX pose parameters Returns: pose_flipped (np.ndarray([21,3]))
Here is the function:
def _flip_smplx_pose(pose):
"""Flip SMPLX pose parameters horizontally.
Args:
pose (np.ndarray([63])): SMPLX pose parameters
Returns:
pose_flipped (np.ndarray([21,3]))
"""
flippedParts = np.array([
6, 7, 8, 3, 4, 5, 9, 10, 11, 15, 16, 17, 12, 13, 14, 18, 19, 20, 24,
25, 26, 21, 22, 23, 27, 28, 29, 33, 34, 35, 30, 31, 32, 36, 37, 38, 42,
43, 44, 39, 40, 41, 45, 46, 47, 51, 52, 53, 48, 49, 50, 57, 58, 59, 54,
55, 56, 63, 64, 65, 60, 61, 62
],
dtype=np.int32) - 3
dim_flip = np.array([1, -1, -1], dtype=pose.dtype)
pose = (pose[flippedParts].reshape(21, 3) * dim_flip).copy()
return pose | Flip SMPLX pose parameters horizontally. Args: pose (np.ndarray([63])): SMPLX pose parameters Returns: pose_flipped (np.ndarray([21,3])) |
14,279 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
The provided code snippet includes necessary dependencies for implementing the `_flip_axis_angle` function. Write a Python function `def _flip_axis_angle(r)` to solve the following problem:
Flip axis_angle horizontally. Args: r (np.ndarray([3])) Returns: f_flipped
Here is the function:
def _flip_axis_angle(r):
"""Flip axis_angle horizontally.
Args:
r (np.ndarray([3]))
Returns:
f_flipped
"""
dim_flip = np.array([1, -1, -1], dtype=r.dtype)
r = r * dim_flip
return r | Flip axis_angle horizontally. Args: r (np.ndarray([3])) Returns: f_flipped |
14,280 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
def _flip_hand_pose(r_pose, l_pose):
dim_flip = np.array([1, -1, -1], dtype=r_pose.dtype)
ret_l_pose = r_pose * dim_flip
ret_r_pose = l_pose * dim_flip
return ret_r_pose, ret_l_pose | null |
14,281 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
The provided code snippet includes necessary dependencies for implementing the `_flip_keypoints` function. Write a Python function `def _flip_keypoints(keypoints, flip_pairs, img_width=None)` to solve the following problem:
Flip human joints horizontally. Note: num_keypoints: K num_dimension: D Args: keypoints (np.ndarray([K, D])): Coordinates of keypoints. flip_pairs (list[tuple()]): Pairs of keypoints which are mirrored (for example, left ear -- right ear). img_width (int | None, optional): The width of the original image. To flip 2D keypoints, image width is needed. To flip 3D keypoints, we simply negate the value of x-axis. Default: None. Returns: keypoints_flipped
Here is the function:
def _flip_keypoints(keypoints, flip_pairs, img_width=None):
"""Flip human joints horizontally.
Note:
num_keypoints: K
num_dimension: D
Args:
keypoints (np.ndarray([K, D])): Coordinates of keypoints.
flip_pairs (list[tuple()]): Pairs of keypoints which are mirrored
(for example, left ear -- right ear).
img_width (int | None, optional): The width of the original image.
To flip 2D keypoints, image width is needed. To flip 3D keypoints,
we simply negate the value of x-axis. Default: None.
Returns:
keypoints_flipped
"""
keypoints_flipped = keypoints.copy()
# Swap left-right parts
for left, right in flip_pairs:
keypoints_flipped[left, :] = keypoints[right, :]
keypoints_flipped[right, :] = keypoints[left, :]
# Flip horizontally
if img_width is None:
keypoints_flipped[:, 0] = -keypoints_flipped[:, 0]
else:
keypoints_flipped[:, 0] = img_width - 1 - keypoints_flipped[:, 0]
return keypoints_flipped | Flip human joints horizontally. Note: num_keypoints: K num_dimension: D Args: keypoints (np.ndarray([K, D])): Coordinates of keypoints. flip_pairs (list[tuple()]): Pairs of keypoints which are mirrored (for example, left ear -- right ear). img_width (int | None, optional): The width of the original image. To flip 2D keypoints, image width is needed. To flip 3D keypoints, we simply negate the value of x-axis. Default: None. Returns: keypoints_flipped |
14,282 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
def _construct_rotation_matrix(rot, size=3):
"""Construct the in-plane rotation matrix.
Args:
rot (float): Rotation angle (degree).
size (int): The size of the rotation matrix.
Candidate Values: 2, 3. Defaults to 3.
Returns:
rot_mat (np.ndarray([size, size]): Rotation matrix.
"""
rot_mat = np.eye(size, dtype=np.float32)
if rot != 0:
rot_rad = np.deg2rad(rot)
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0, :2] = [cs, -sn]
rot_mat[1, :2] = [sn, cs]
return rot_mat
The provided code snippet includes necessary dependencies for implementing the `_rotate_joints_3d` function. Write a Python function `def _rotate_joints_3d(joints_3d, rot)` to solve the following problem:
Rotate the 3D joints in the local coordinates. Notes: Joints number: K Args: joints_3d (np.ndarray([K, 3])): Coordinates of keypoints. rot (float): Rotation angle (degree). Returns: joints_3d_rotated
Here is the function:
def _rotate_joints_3d(joints_3d, rot):
"""Rotate the 3D joints in the local coordinates.
Notes:
Joints number: K
Args:
joints_3d (np.ndarray([K, 3])): Coordinates of keypoints.
rot (float): Rotation angle (degree).
Returns:
joints_3d_rotated
"""
# in-plane rotation
# 3D joints are rotated counterclockwise,
# so the rot angle is inversed.
rot_mat = _construct_rotation_matrix(-rot, 3)
joints_3d_rotated = np.einsum('ij,kj->ki', rot_mat, joints_3d)
joints_3d_rotated = joints_3d_rotated.astype('float32')
return joints_3d_rotated | Rotate the 3D joints in the local coordinates. Notes: Joints number: K Args: joints_3d (np.ndarray([K, 3])): Coordinates of keypoints. rot (float): Rotation angle (degree). Returns: joints_3d_rotated |
14,283 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from ..builder import PIPELINES
from .compose import Compose
def _construct_rotation_matrix(rot, size=3):
"""Construct the in-plane rotation matrix.
Args:
rot (float): Rotation angle (degree).
size (int): The size of the rotation matrix.
Candidate Values: 2, 3. Defaults to 3.
Returns:
rot_mat (np.ndarray([size, size]): Rotation matrix.
"""
rot_mat = np.eye(size, dtype=np.float32)
if rot != 0:
rot_rad = np.deg2rad(rot)
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0, :2] = [cs, -sn]
rot_mat[1, :2] = [sn, cs]
return rot_mat
The provided code snippet includes necessary dependencies for implementing the `_rotate_smpl_pose` function. Write a Python function `def _rotate_smpl_pose(pose, rot)` to solve the following problem:
Rotate SMPL pose parameters. SMPL (https://smpl.is.tue.mpg.de/) is a 3D human model. Args: pose (np.ndarray([72])): SMPL pose parameters rot (float): Rotation angle (degree). Returns: pose_rotated
Here is the function:
def _rotate_smpl_pose(pose, rot):
"""Rotate SMPL pose parameters.
SMPL (https://smpl.is.tue.mpg.de/) is a 3D
human model.
Args:
pose (np.ndarray([72])): SMPL pose parameters
rot (float): Rotation angle (degree).
Returns:
pose_rotated
"""
pose_rotated = pose.copy()
if rot != 0:
rot_mat = _construct_rotation_matrix(-rot)
orient = pose[:3]
# find the rotation of the body in camera frame
per_rdg, _ = cv2.Rodrigues(orient.astype(np.float32))
# apply the global rotation to the global orientation
res_rot, _ = cv2.Rodrigues(np.dot(rot_mat, per_rdg))
pose_rotated[:3] = (res_rot.T)[0]
return pose_rotated | Rotate SMPL pose parameters. SMPL (https://smpl.is.tue.mpg.de/) is a 3D human model. Args: pose (np.ndarray([72])): SMPL pose parameters rot (float): Rotation angle (degree). Returns: pose_rotated |
14,284 | import numpy as np
from PIL import Image
def transform(pt, center, scale, res, invert=0):
"""Transform pixel location to different reference."""
t = get_transform(center, scale, res)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int) + 1
The provided code snippet includes necessary dependencies for implementing the `crop` function. Write a Python function `def crop(img, center, scale, res)` to solve the following problem:
Crop image according to the supplied bounding box.
Here is the function:
def crop(img, center, scale, res):
"""Crop image according to the supplied bounding box."""
# Upper left point
ul = np.array(transform([1, 1], center, scale, res, invert=1)) - 1
# Bottom right point
br = np.array(
transform([res[0] + 1, res[1] + 1], center, scale, res, invert=1)) - 1
# Padding so that when rotated proper amount of context is included
# pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],
old_x[0]:old_x[1]]
new_img_resized = np.array(
Image.fromarray(new_img.astype(np.uint8)).resize(res))
return new_img_resized, new_img | Crop image according to the supplied bounding box. |
14,285 | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from PIL import Image
from ..builder import PIPELINES
The provided code snippet includes necessary dependencies for implementing the `to_tensor` function. Write a Python function `def to_tensor(data)` to solve the following problem:
Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`.
Here is the function:
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(
f'Type {type(data)} cannot be converted to tensor.'
'Supported types are: `numpy.ndarray`, `torch.Tensor`, '
'`Sequence`, `int` and `float`') | Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. |
14,286 | import os.path
import random
import cv2
import numpy as np
from ..builder import PIPELINES
The provided code snippet includes necessary dependencies for implementing the `load_pascal_occluders` function. Write a Python function `def load_pascal_occluders(occluders_file)` to solve the following problem:
load pascal occluders from the occluder file.
Here is the function:
def load_pascal_occluders(occluders_file):
"""load pascal occluders from the occluder file."""
if os.path.isfile(occluders_file):
return np.load(occluders_file, allow_pickle=True)
else:
raise NotImplementedError() | load pascal occluders from the occluder file. |
14,287 | import os.path
import random
import cv2
import numpy as np
from ..builder import PIPELINES
def paste_over(im_src, im_dst, center):
"""Pastes `im_src` onto `im_dst` at a specified position, with alpha
blending, in place.
Locations outside the bounds of `im_dst`
are handled as expected (only a part or none of `im_src` becomes visible).
Args:
im_src: The RGBA image to be pasted onto `im_dst`.
Its size can be arbitrary.
im_dst: The target image.
alpha: A float (0.0-1.0) array of the same size as `im_src`
controlling the alpha blending at each pixel.
Large values mean more visibility for `im_src`.
center: coordinates in `im_dst` where
the center of `im_src` should be placed.
"""
width_height_src = np.asarray([im_src.shape[1], im_src.shape[0]])
width_height_dst = np.asarray([im_dst.shape[1], im_dst.shape[0]])
center = np.round(center).astype(np.int32)
raw_start_dst = center - width_height_src // 2
raw_end_dst = raw_start_dst + width_height_src
start_dst = np.clip(raw_start_dst, 0, width_height_dst)
end_dst = np.clip(raw_end_dst, 0, width_height_dst)
region_dst = im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]]
start_src = start_dst - raw_start_dst
end_src = width_height_src + (end_dst - raw_end_dst)
region_src = im_src[start_src[1]:end_src[1], start_src[0]:end_src[0]]
color_src = region_src[..., 0:3]
alpha = region_src[..., 3:].astype(np.float32) / 255
im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]] = (
alpha * color_src + (1 - alpha) * region_dst)
def resize_by_factor(im, factor):
"""Returns a copy of `im` resized by `factor`, using bilinear interp for up
and area interp for downscaling."""
new_size = tuple(
np.round(np.array([im.shape[1], im.shape[0]]) * factor).astype(int))
interp = cv2.INTER_LINEAR if factor > 1.0 else cv2.INTER_AREA
return cv2.resize(im, new_size, fx=factor, fy=factor, interpolation=interp)
The provided code snippet includes necessary dependencies for implementing the `occlude_with_pascal_objects` function. Write a Python function `def occlude_with_pascal_objects(im, occluders)` to solve the following problem:
Returns an augmented version of `im`, containing some occluders from the Pascal VOC dataset.
Here is the function:
def occlude_with_pascal_objects(im, occluders):
"""Returns an augmented version of `im`, containing some occluders from the
Pascal VOC dataset."""
result = im.copy()
width_height = np.asarray([im.shape[1], im.shape[0]])
im_scale_factor = min(width_height) / 256
count = np.random.randint(1, 8)
# logger.debug(f'Number of augmentation objects: {count}')
for _ in range(count):
occluder = random.choice(occluders)
center = np.random.uniform([0, 0], width_height)
random_scale_factor = np.random.uniform(0.2, 1.0)
scale_factor = random_scale_factor * im_scale_factor
# logger.debug(f'occluder size: {occluder.shape},
# scale_f: {scale_factor}, img_scale: {im_scale_factor}')
occluder = resize_by_factor(occluder, scale_factor)
paste_over(im_src=occluder, im_dst=result, center=center)
return result | Returns an augmented version of `im`, containing some occluders from the Pascal VOC dataset. |
14,288 | import os.path
import random
import cv2
import numpy as np
from ..builder import PIPELINES
The provided code snippet includes necessary dependencies for implementing the `list_filepaths` function. Write a Python function `def list_filepaths(dirpath)` to solve the following problem:
list the file paths.
Here is the function:
def list_filepaths(dirpath):
"""list the file paths."""
names = os.listdir(dirpath)
paths = [os.path.join(dirpath, name) for name in names]
return sorted(filter(os.path.isfile, paths)) | list the file paths. |
14,289 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
def bbox_xywh_to_xyxy(xywh):
"""Convert bounding boxes from format (x, y, w, h) to (xmin, ymin, xmax,
ymax)
Args:
xywh (list, tuple or numpy.ndarray): bbox in format (x, y, w, h).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
Returns:
xyxy (tuple or numpy.ndarray): Converted bboxes in format (xmin, ymin,
xmax, ymax). Return numpy.ndarray if input is in the same format.
"""
if isinstance(xywh, (tuple, list)):
if not len(xywh) == 4:
raise IndexError(
'Bounding boxes must have 4 elements, given {}'.format(
len(xywh)))
w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0)
return (xywh[0], xywh[1], xywh[0] + w, xywh[1] + h)
elif isinstance(xywh, np.ndarray):
if not xywh.size % 4 == 0:
raise IndexError(
'Bounding boxes must have n * 4 elements, given {}'.format(
xywh.shape))
xyxy = np.hstack(
(xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1)))
return xyxy
else:
raise TypeError(
'Expect input xywh a list, tuple or numpy.ndarray, given {}'.
format(type(xywh)))
def bbox_clip_xyxy(xyxy, width, height):
"""Clip bounding box with format (xmin, ymin, xmax, ymax) to `(0, 0, width,
height)`.
Args:
xyxy (list, tuple or numpy.ndarray): bbox in format (xmin, ymin,
xmax, ymax). If numpy.ndarray is provided, we expect multiple bounding
boxes with shape `(N, 4)`.
width (int or float): Boundary width.
height (int or float): Boundary height.
Returns:
xyxy (list, tuple or numpy.ndarray): clipped bbox in format (xmin, ymin,
xmax, ymax) and input type
"""
if isinstance(xyxy, (tuple, list)):
if not len(xyxy) == 4:
raise IndexError(
'Bounding boxes must have 4 elements, given {}'.format(
len(xyxy)))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[3]))
return (x1, y1, x2, y2)
elif isinstance(xyxy, np.ndarray):
if not xyxy.size % 4 == 0:
raise IndexError(
'Bounding boxes must have n * 4 elements, given {}'.format(
xyxy.shape))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3]))
return np.hstack((x1, y1, x2, y2))
else:
raise TypeError(
'Expect input xywh a list, tuple or numpy.ndarray, given {}'.
format(type(xyxy)))
The provided code snippet includes necessary dependencies for implementing the `get_bbox` function. Write a Python function `def get_bbox(bbox_xywh, w, h)` to solve the following problem:
Obtain bbox in xyxy format given bbox in xywh format and applying clipping to ensure bbox is within image bounds. Args: xywh (list): bbox in format (x, y, w, h). w (int): image width h (int): image height Returns: xyxy (numpy.ndarray): Converted bboxes in format (xmin, ymin, xmax, ymax).
Here is the function:
def get_bbox(bbox_xywh, w, h):
"""Obtain bbox in xyxy format given bbox in xywh format and applying
clipping to ensure bbox is within image bounds.
Args:
xywh (list): bbox in format (x, y, w, h).
w (int): image width
h (int): image height
Returns:
xyxy (numpy.ndarray): Converted bboxes in format (xmin, ymin,
xmax, ymax).
"""
bbox_xywh = bbox_xywh.reshape(1, 4)
xmin, ymin, xmax, ymax = bbox_clip_xyxy(bbox_xywh_to_xyxy(bbox_xywh), w, h)
bbox = np.array([xmin, ymin, xmax, ymax])
return bbox | Obtain bbox in xyxy format given bbox in xywh format and applying clipping to ensure bbox is within image bounds. Args: xywh (list): bbox in format (x, y, w, h). w (int): image width h (int): image height Returns: xyxy (numpy.ndarray): Converted bboxes in format (xmin, ymin, xmax, ymax). |
14,290 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
def transform_preds(coords, center, scale, output_size):
"""Transform heatmap coordinates to image coordinates."""
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(
center, scale, 0, output_size, inv=1, pixel_std=1)
target_coords[0:2] = affine_transform(coords[0:2], trans)
return target_coords
The provided code snippet includes necessary dependencies for implementing the `heatmap2coord` function. Write a Python function `def heatmap2coord(pred_jts, pred_scores, hm_shape, bbox, output_3d=False, mean_bbox_scale=None)` to solve the following problem:
Retrieve predicted keypoints and scores from heatmap.
Here is the function:
def heatmap2coord(pred_jts,
pred_scores,
hm_shape,
bbox,
output_3d=False,
mean_bbox_scale=None):
"""Retrieve predicted keypoints and scores from heatmap."""
hm_width, hm_height = hm_shape
ndims = pred_jts.dim()
assert ndims in [2, 3], 'Dimensions of input heatmap should be 2 or 3'
if ndims == 2:
pred_jts = pred_jts.unsqueeze(0)
pred_scores = pred_scores.unsqueeze(0)
coords = pred_jts.cpu().numpy()
coords = coords.astype(float)
pred_scores = pred_scores.cpu().numpy()
pred_scores = pred_scores.astype(float)
coords[:, :, 0] = (coords[:, :, 0] + 0.5) * hm_width
coords[:, :, 1] = (coords[:, :, 1] + 0.5) * hm_height
preds = np.zeros_like(coords)
# transform bbox to scale
xmin, ymin, xmax, ymax = bbox
w = xmax - xmin
h = ymax - ymin
center = np.array([xmin + w * 0.5, ymin + h * 0.5])
scale = np.array([w, h])
# Transform back
for i in range(coords.shape[0]):
for j in range(coords.shape[1]):
preds[i, j, 0:2] = transform_preds(coords[i, j, 0:2], center,
scale, [hm_width, hm_height])
if output_3d:
if mean_bbox_scale is not None:
zscale = scale[0] / mean_bbox_scale
preds[i, j, 2] = coords[i, j, 2] / zscale
else:
preds[i, j, 2] = coords[i, j, 2]
# maxvals = np.ones((*preds.shape[:2], 1), dtype=float)
# score_mul = 1 if norm_name == 'sigmoid' else 5
return preds, pred_scores | Retrieve predicted keypoints and scores from heatmap. |
14,291 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `cam2pixel` function. Write a Python function `def cam2pixel(cam_coord, f, c)` to solve the following problem:
Convert coordinates from camera to image frame given f and c Args: cam_coord (np.ndarray): Coordinates in camera frame f (list): focal length, fx, fy c (list): principal point offset, x0, y0 Returns: img_coord (np.ndarray): Coordinates in image frame
Here is the function:
def cam2pixel(cam_coord, f, c):
"""Convert coordinates from camera to image frame given f and c
Args:
cam_coord (np.ndarray): Coordinates in camera frame
f (list): focal length, fx, fy
c (list): principal point offset, x0, y0
Returns:
img_coord (np.ndarray): Coordinates in image frame
"""
x = cam_coord[:, 0] / (cam_coord[:, 2] + 1e-8) * f[0] + c[0]
y = cam_coord[:, 1] / (cam_coord[:, 2] + 1e-8) * f[1] + c[1]
z = cam_coord[:, 2]
img_coord = np.concatenate((x[:, None], y[:, None], z[:, None]), 1)
return img_coord | Convert coordinates from camera to image frame given f and c Args: cam_coord (np.ndarray): Coordinates in camera frame f (list): focal length, fx, fy c (list): principal point offset, x0, y0 Returns: img_coord (np.ndarray): Coordinates in image frame |
14,292 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `get_intrinsic_matrix` function. Write a Python function `def get_intrinsic_matrix(f, c, inv=False)` to solve the following problem:
Get intrisic matrix (or its inverse) given f and c. Args: f (list): focal length, fx, fy c (list): principal point offset, x0, y0 inv (bool): Store True to get inverse. Default: False. Returns: intrinsic matrix (np.ndarray): 3x3 intrinsic matrix or its inverse
Here is the function:
def get_intrinsic_matrix(f, c, inv=False):
"""Get intrisic matrix (or its inverse) given f and c.
Args:
f (list): focal length, fx, fy
c (list): principal point offset, x0, y0
inv (bool): Store True to get inverse. Default: False.
Returns:
intrinsic matrix (np.ndarray): 3x3 intrinsic matrix or its inverse
"""
intrinsic_metrix = np.zeros((3, 3)).astype(np.float32)
intrinsic_metrix[0, 0] = f[0]
intrinsic_metrix[0, 2] = c[0]
intrinsic_metrix[1, 1] = f[1]
intrinsic_metrix[1, 2] = c[1]
intrinsic_metrix[2, 2] = 1
if inv:
intrinsic_metrix = np.linalg.inv(intrinsic_metrix).astype(np.float32)
return intrinsic_metrix | Get intrisic matrix (or its inverse) given f and c. Args: f (list): focal length, fx, fy c (list): principal point offset, x0, y0 inv (bool): Store True to get inverse. Default: False. Returns: intrinsic matrix (np.ndarray): 3x3 intrinsic matrix or its inverse |
14,293 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `aa_to_quat_numpy` function. Write a Python function `def aa_to_quat_numpy(axis_angle)` to solve the following problem:
Convert rotations given as axis/angle to quaternions. Args: axis_angle: Rotations given as a vector in axis angle form, as a np.ndarray of shape (..., 3), where the magnitude is the angle turned anticlockwise in radians around the vector's direction. Returns: quaternions with real part first, as np.ndarray of shape (..., 4).
Here is the function:
def aa_to_quat_numpy(axis_angle):
"""Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a np.ndarray of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as np.ndarray of shape (..., 4).
"""
angles = np.linalg.norm(axis_angle, ord=2, axis=-1, keepdims=True)
half_angles = 0.5 * angles
eps = 1e-6
small_angles = np.abs(angles) < eps
sin_half_angles_over_angles = np.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
np.sin(half_angles[~small_angles]) / angles[~small_angles])
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48)
quaternions = np.concatenate(
[np.cos(half_angles), axis_angle * sin_half_angles_over_angles],
axis=-1)
return quaternions | Convert rotations given as axis/angle to quaternions. Args: axis_angle: Rotations given as a vector in axis angle form, as a np.ndarray of shape (..., 3), where the magnitude is the angle turned anticlockwise in radians around the vector's direction. Returns: quaternions with real part first, as np.ndarray of shape (..., 4). |
14,294 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `flip_thetas` function. Write a Python function `def flip_thetas(thetas, theta_pairs)` to solve the following problem:
Flip thetas. Args: thetas (np.ndarray): joints in shape (num_thetas, 3) theta_pairs (list): flip pairs for thetas Returns: thetas_flip (np.ndarray): flipped thetas with shape (num_thetas, 3)
Here is the function:
def flip_thetas(thetas, theta_pairs):
"""Flip thetas.
Args:
thetas (np.ndarray): joints in shape (num_thetas, 3)
theta_pairs (list): flip pairs for thetas
Returns:
thetas_flip (np.ndarray): flipped thetas with shape (num_thetas, 3)
"""
thetas_flip = thetas.copy()
# reflect horizontally
thetas_flip[:, 1] = -1 * thetas_flip[:, 1]
thetas_flip[:, 2] = -1 * thetas_flip[:, 2]
# change left-right parts
for pair in theta_pairs:
thetas_flip[pair[0], :], thetas_flip[pair[1], :] = \
thetas_flip[pair[1], :], thetas_flip[pair[0], :].copy()
return thetas_flip | Flip thetas. Args: thetas (np.ndarray): joints in shape (num_thetas, 3) theta_pairs (list): flip pairs for thetas Returns: thetas_flip (np.ndarray): flipped thetas with shape (num_thetas, 3) |
14,295 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `flip_joints_3d` function. Write a Python function `def flip_joints_3d(joints_3d, joints_3d_visible, width, flip_pairs)` to solve the following problem:
Flip 3d joints. Args: joints_3d (np.ndarray): joints in shape (N, 3, 2) width (int): Image width joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3, 2) joints_3d_visible_flipped (np.ndarray): visibility of (N, 3, 2)
Here is the function:
def flip_joints_3d(joints_3d, joints_3d_visible, width, flip_pairs):
"""Flip 3d joints.
Args:
joints_3d (np.ndarray): joints in shape (N, 3, 2)
width (int): Image width
joint_pairs (list): flip pairs for joints
Returns:
joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3, 2)
joints_3d_visible_flipped (np.ndarray): visibility of (N, 3, 2)
"""
assert len(joints_3d) == len(joints_3d_visible)
joints_3d[:, 0] = width - joints_3d[:, 0] - 1
joints_3d_flipped = joints_3d.copy()
joints_3d_visible_flipped = joints_3d_visible.copy()
# Swap left-right parts
for left, right in flip_pairs:
joints_3d_flipped[left, :] = joints_3d[right, :]
joints_3d_flipped[right, :] = joints_3d[left, :]
joints_3d_visible_flipped[left, :] = joints_3d_visible[right, :]
joints_3d_visible_flipped[right, :] = joints_3d_visible[left, :]
joints_3d_flipped = joints_3d_flipped * joints_3d_visible_flipped
return joints_3d_flipped, joints_3d_visible_flipped | Flip 3d joints. Args: joints_3d (np.ndarray): joints in shape (N, 3, 2) width (int): Image width joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3, 2) joints_3d_visible_flipped (np.ndarray): visibility of (N, 3, 2) |
14,296 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `flip_xyz_joints_3d` function. Write a Python function `def flip_xyz_joints_3d(joints_3d, flip_pairs)` to solve the following problem:
Flip 3d xyz joints. Args: joints_3d (np.ndarray): Joints in shape (N, 3) joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3)
Here is the function:
def flip_xyz_joints_3d(joints_3d, flip_pairs):
"""Flip 3d xyz joints.
Args:
joints_3d (np.ndarray): Joints in shape (N, 3)
joint_pairs (list): flip pairs for joints
Returns:
joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3)
"""
joints_3d[:, 0] = -1 * joints_3d[:, 0]
joints_3d_flipped = joints_3d.copy()
# change left-right parts
for left, right in flip_pairs:
joints_3d_flipped[left, :] = joints_3d[right, :]
joints_3d_flipped[right, :] = joints_3d[left, :]
return joints_3d_flipped | Flip 3d xyz joints. Args: joints_3d (np.ndarray): Joints in shape (N, 3) joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3) |
14,297 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `flip_twist` function. Write a Python function `def flip_twist(twist_phi, twist_weight, twist_pairs)` to solve the following problem:
Flip twist and weight. Args: twist_phi (np.ndarray): twist in shape (num_twist, 2) twist_weight (np.ndarray): weight in shape (num_twist, 2) twist_pairs (list): flip pairs for twist Returns: twist_flip (np.ndarray): flipped twist with shape (num_twist, 2) weight_flip (np.ndarray): flipped weights with shape (num_twist, 2)
Here is the function:
def flip_twist(twist_phi, twist_weight, twist_pairs):
"""Flip twist and weight.
Args:
twist_phi (np.ndarray): twist in shape (num_twist, 2)
twist_weight (np.ndarray): weight in shape (num_twist, 2)
twist_pairs (list): flip pairs for twist
Returns:
twist_flip (np.ndarray): flipped twist with shape (num_twist, 2)
weight_flip (np.ndarray): flipped weights with shape (num_twist, 2)
"""
# twist_flip = -1 * twist_phi.copy() # 23 x 2
twist_flip = np.zeros_like(twist_phi)
weight_flip = twist_weight.copy()
twist_flip[:, 0] = twist_phi[:, 0].copy() # cos
twist_flip[:, 1] = -1 * twist_phi[:, 1].copy() # sin
for pair in twist_pairs:
idx0 = pair[0] - 1
idx1 = pair[1] - 1
twist_flip[idx0, :], twist_flip[idx1, :] = \
twist_flip[idx1, :], twist_flip[idx0, :].copy()
weight_flip[idx0, :], weight_flip[idx1, :] = \
weight_flip[idx1, :], weight_flip[idx0, :].copy()
return twist_flip, weight_flip | Flip twist and weight. Args: twist_phi (np.ndarray): twist in shape (num_twist, 2) twist_weight (np.ndarray): weight in shape (num_twist, 2) twist_pairs (list): flip pairs for twist Returns: twist_flip (np.ndarray): flipped twist with shape (num_twist, 2) weight_flip (np.ndarray): flipped weights with shape (num_twist, 2) |
14,298 | import math
import random
import cv2
import mmcv
import numpy as np
from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs
from mmhuman3d.utils.demo_utils import box2cs, xyxy2xywh
from ..builder import PIPELINES
from .transforms import (
_rotate_smpl_pose,
affine_transform,
get_affine_transform,
)
The provided code snippet includes necessary dependencies for implementing the `_center_scale_to_box` function. Write a Python function `def _center_scale_to_box(center, scale)` to solve the following problem:
Flip twist and weight. Args: joints_3d (np.ndarray): Joints in shape (N, 3) joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3)
Here is the function:
def _center_scale_to_box(center, scale):
"""Flip twist and weight.
Args:
joints_3d (np.ndarray): Joints in shape (N, 3)
joint_pairs (list): flip pairs for joints
Returns:
joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3)
"""
pixel_std = 1.0
w = scale[0] * pixel_std
h = scale[1] * pixel_std
xmin = center[0] - w * 0.5
ymin = center[1] - h * 0.5
xmax = xmin + w
ymax = ymin + h
bbox = [xmin, ymin, xmax, ymax]
return bbox | Flip twist and weight. Args: joints_3d (np.ndarray): Joints in shape (N, 3) joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3) |
14,299 | import platform
import random
from functools import partial
from typing import Optional, Union
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from .samplers import DistributedSampler
DATASETS = Registry('dataset')
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
add `get_cat_ids` function.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets: list):
super(ConcatDataset, self).__init__(datasets)
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset: Dataset, times: int):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self._ori_len = len(self.dataset)
def __getitem__(self, idx: int):
return self.dataset[idx % self._ori_len]
def __len__(self):
return self.times * self._ori_len
The provided code snippet includes necessary dependencies for implementing the `build_dataset` function. Write a Python function `def build_dataset(cfg: Union[dict, list, tuple], default_args: Optional[Union[dict, None]] = None)` to solve the following problem:
Build dataset by the given config.
Here is the function:
def build_dataset(cfg: Union[dict, list, tuple],
default_args: Optional[Union[dict, None]] = None):
""""Build dataset by the given config."""
from .dataset_wrappers import (
ConcatDataset,
RepeatDataset,
)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset | Build dataset by the given config. |
14,300 | import platform
import random
from functools import partial
from typing import Optional, Union
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from .samplers import DistributedSampler
def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int):
"""Init random seed for each worker."""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
The provided code snippet includes necessary dependencies for implementing the `build_dataloader` function. Write a Python function `def build_dataloader(dataset: Dataset, samples_per_gpu: int, workers_per_gpu: int, num_gpus: Optional[int] = 1, dist: Optional[bool] = True, shuffle: Optional[bool] = True, round_up: Optional[bool] = True, seed: Optional[Union[int, None]] = None, persistent_workers: Optional[bool] = True, **kwargs)` to solve the following problem:
Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (:obj:`Dataset`): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int, optional): Number of GPUs. Only used in non-distributed training. dist (bool, optional): Distributed training/test or not. Default: True. shuffle (bool, optional): Whether to shuffle the data at every epoch. Default: True. round_up (bool, optional): Whether to round up the length of dataset by adding extra samples to make it evenly divisible. Default: True. persistent_workers (bool): If True, the data loader will not shutdown the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. The argument also has effect in PyTorch>=1.7.0. Default: True kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader.
Here is the function:
def build_dataloader(dataset: Dataset,
samples_per_gpu: int,
workers_per_gpu: int,
num_gpus: Optional[int] = 1,
dist: Optional[bool] = True,
shuffle: Optional[bool] = True,
round_up: Optional[bool] = True,
seed: Optional[Union[int, None]] = None,
persistent_workers: Optional[bool] = True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int, optional): Number of GPUs. Only used in non-distributed
training.
dist (bool, optional): Distributed training/test or not. Default: True.
shuffle (bool, optional): Whether to shuffle the data at every epoch.
Default: True.
round_up (bool, optional): Whether to round up the length of dataset by
adding extra samples to make it evenly divisible. Default: True.
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle, round_up=round_up)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
shuffle=shuffle,
worker_init_fn=init_fn,
persistent_workers=persistent_workers,
**kwargs)
return data_loader | Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (:obj:`Dataset`): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int, optional): Number of GPUs. Only used in non-distributed training. dist (bool, optional): Distributed training/test or not. Default: True. shuffle (bool, optional): Whether to shuffle the data at every epoch. Default: True. round_up (bool, optional): Whether to round up the length of dataset by adding extra samples to make it evenly divisible. Default: True. persistent_workers (bool): If True, the data loader will not shutdown the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. The argument also has effect in PyTorch>=1.7.0. Default: True kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader. |
14,301 | import math
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
from pytorch3d.renderer import cameras
from pytorch3d.structures import Meshes
from pytorch3d.transforms import Transform3d
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_ndc_to_screen,
convert_screen_to_ndc,
convert_world_view,
)
from mmhuman3d.utils.transforms import ee_to_rotmat
from .builder import CAMERAS
class MMCamerasBase(cameras.CamerasBase):
"""Inherited from Pytorch3D CamerasBase and provide some new functions."""
def __init__(self, **kwargs) -> None:
"""Initialize your cameras with `build_cameras` following:
1): provide `K`, `R`, `T`, `resolution`/`image_size`, `in_ndc`
directly.
`K` should be shape of (N, 3, 3) or (N, 4, 4).
`R` should be shape of (N, 3, 3).
`T` should be shape of (N, 3).
2): if `K` is not provided, will use `get_default_projection_matrix`
to generate K from camera intrinsic parameters.
E.g., you can pass `focal_length`, `principal_point` for
perspective camers.
If these args are not provided, will use default values.
3): if `R` is not provided, will use Identity matrix as default.
4): if `T` is not provided, will use zeros matrix as default.
5): `convention` means your source parameter camera convention.
This mainly depends on how you get the matrixs. E.g., you get the
`K` `R`, `T` by calibration with opencv, you should set
`convention = opencv`. To figure out your camera convention,
please see the definition of its extrinsic and intrinsic matrixs.
For projection and rendering, the matrixs will be converted to
`pytorch3d` finally since the `transforms3d` called in rendering
and projection are defined as `pytorch3d` convention.
6): `image_size` equals `resolution`.
7): `in_ndc` could be set for 'PerspectiveCameras' and
'OrthographicCameras', other cameras are fixed for this arg.
`in_ndc = True` means your projection matrix is defined as `camera
space to NDC space`. Under this cirecumstance you need to set
`image_size` or `resolution` (they are equal) when you need to do
`transform_points_screen`. You can also override resolution
in `transform_points_screen` function.
`in_ndc = False` means your projections matrix is defined as
`cameras space to screen space`. Under this cirecumstance you do
not need to set `image_size` or `resolution` (they are equal) when
you need to do `transform_points_screen` since the projection
matrix is defined as view space to screen space.
"""
for k in kwargs:
if isinstance(kwargs.get(k), np.ndarray):
kwargs.update({k: torch.Tensor(kwargs[k])})
convention = kwargs.pop('convention', 'pytorch3d').lower()
in_ndc = kwargs.pop('in_ndc', kwargs.get('_in_ndc'))
kwargs.update(_in_ndc=in_ndc)
is_perspective = kwargs.get('_is_perspective')
kwargs.pop('is_perspective', None)
image_size = kwargs.get('image_size', kwargs.get('resolution', None))
if image_size is not None:
if isinstance(image_size, (int, float)):
image_size = (image_size, image_size)
if isinstance(image_size, (tuple, list)):
image_size = torch.Tensor(image_size)
if isinstance(image_size, torch.Tensor):
if image_size.numel() == 1:
image_size = image_size.repeat(2)
image_size = image_size.view(-1, 2)
if kwargs.get('K') is None:
focal_length = kwargs.get('focal_length', None)
if focal_length is not None:
if not isinstance(focal_length, Iterable):
focal_length = [focal_length, focal_length]
if not torch.is_tensor(focal_length):
focal_length = torch.FloatTensor(focal_length).view(-1, 2)
elif focal_length.numel() == 1:
focal_length = focal_length.repeat(2).view(-1, 2)
kwargs.update(focal_length=focal_length)
principal_point = kwargs.get('principal_point', None)
if principal_point is not None:
if isinstance(principal_point, (tuple, list)):
principal_point = torch.FloatTensor(principal_point)
principal_point = principal_point.view(-1, 2)
kwargs.update(principal_point=principal_point)
K = self.get_default_projection_matrix(**kwargs)
K, _, _ = convert_camera_matrix(
K=K,
is_perspective=is_perspective,
convention_src='pytorch3d',
convention_dst='pytorch3d',
in_ndc_src=in_ndc,
in_ndc_dst=in_ndc,
resolution_dst=image_size,
resolution_src=image_size)
kwargs.update(K=K)
K, R, T = convert_camera_matrix(
K=kwargs.get('K'),
R=kwargs.get('R', None),
T=kwargs.get('T', None),
convention_src=convention,
convention_dst='pytorch3d',
is_perspective=is_perspective,
in_ndc_src=in_ndc,
in_ndc_dst=in_ndc,
resolution_src=image_size,
resolution_dst=image_size)
if image_size is not None:
if image_size.shape[0] == 1:
image_size = image_size.repeat(K.shape[0], 1)
kwargs.update(image_size=image_size)
kwargs.update(resolution=image_size)
kwargs.update(K=K, R=R, T=T)
super().__init__(**kwargs)
def get_camera_plane_normals(self, **kwargs) -> torch.Tensor:
"""Get the identity normal vector which stretchs out of the camera
plane.
Could pass `R` to override the camera extrinsic rotation matrix.
Returns:
torch.Tensor: shape will be (N, 3)
"""
normals = torch.Tensor([0, 0, 1]).view(1, 3).to(self.device)
w2v_trans = self.get_world_to_view_transform(**kwargs)
normals = w2v_trans.inverse().transform_normals(normals)
return normals.view(-1, 3)
def compute_depth_of_points(self, points: torch.Tensor) -> torch.Tensor:
"""Compute depth of points to the camera plane.
Args:
points ([torch.Tensor]): shape should be (batch_size, ..., 3).
Returns:
torch.Tensor: shape will be (batch_size, 1)
"""
world_to_view_transform = self.get_world_to_view_transform()
world_to_view_points = world_to_view_transform.transform_points(
points.to(self.device))
return world_to_view_points[..., 2:3]
def compute_normal_of_meshes(self, meshes: Meshes) -> torch.Tensor:
"""Compute normal of meshes in the camera view.
Args:
points ([torch.Tensor]): shape should be (batch_size, 3).
Returns:
torch.Tensor: shape will be (batch_size, 1)
"""
world_to_view_transform = self.get_world_to_view_transform()
world_to_view_normals = world_to_view_transform.transform_normals(
meshes.verts_normals_padded().to(self.device))
return world_to_view_normals
def __repr__(self):
"""Rewrite __repr__
Returns:
str: print the information of cameras (N, in_ndc, device).
"""
main_str = super().__repr__()
main_str = main_str.split(')')[0]
main_str += f'N: {self.__len__()}, in_ndc: {self.in_ndc()}, '
main_str += f'device: {self.device})'
return main_str
def get_image_size(self):
"""Returns the image size, if provided, expected in the form of
(height, width) The image size is used for conversion of projected
points to screen coordinates."""
if hasattr(self, 'image_size'):
image_size = self.image_size
if hasattr(self, 'resolution'):
if self.resolution is not None:
image_size = self.resolution
else:
image_size = None
return image_size
def __getitem__(
self, index: Union[slice, int, torch.Tensor, List,
Tuple]) -> 'MMCamerasBase':
"""Slice the cameras by batch dim.
Args:
index (Union[slice, int, torch.Tensor, List, Tuple]):
index for slicing.
Returns:
MMCamerasBase: sliced cameras.
"""
if isinstance(index, int):
index = [index]
return self.__class__(
K=self.K[index],
R=self.R[index],
T=self.T[index],
image_size=self.get_image_size()[index]
if self.get_image_size() is not None else None,
in_ndc=self.in_ndc(),
convention='pytorch3d',
device=self.device)
def extend(self, N) -> 'MMCamerasBase':
"""Create new camera class which contains each input camera N times.
Args:
N: number of new copies of each camera.
Returns:
MMCamerasBase object.
"""
return self.__class__(
K=self.K.repeat(N, 1, 1),
R=self.R.repeat(N, 1, 1),
T=self.T.repeat(N, 1),
image_size=self.get_image_size(),
in_ndc=self.in_ndc(),
convention='pytorch3d',
device=self.device)
def extend_(self, N):
"""extend camera inplace."""
self.K = self.K.repeat(N, 1, 1)
self.R = self.R.repeat(N, 1, 1)
self.T = self.T.repeat(N, 1)
self._N = self._N * N
def get_default_projection_matrix(cls, ):
"""Class method. Calculate the projective transformation matrix by
default parameters.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in `__init__`.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
"""
raise NotImplementedError()
def to_screen_(self, **kwargs) -> 'MMCamerasBase':
"""Convert to screen inplace."""
if self.in_ndc():
if self.get_image_size() is None:
self.image_size = kwargs.get('image_size')
else:
self.image_size = self.get_image_size()
self.K = convert_ndc_to_screen(
K=self.K,
resolution=self.image_size,
is_perspective=self._is_perspective)
self._in_ndc = False
else:
print('Redundant operation, already in screen.')
def to_ndc_(self, **kwargs) -> 'MMCamerasBase':
"""Convert to ndc inplace."""
if self.in_ndc():
print('Redundant operation, already in ndc.')
else:
if self.get_image_size() is None:
self.image_size = kwargs.get('image_size')
else:
self.image_size = self.get_image_size()
self.K = convert_screen_to_ndc(
K=self.K,
resolution=self.image_size,
is_perspective=self._is_perspective)
self._in_ndc = True
def to_screen(self, **kwargs) -> 'MMCamerasBase':
"""Convert to screen."""
if self.in_ndc():
if self.get_image_size() is None:
self.image_size = kwargs.get('image_size')
else:
self.image_size = self.get_image_size()
K = convert_ndc_to_screen(
K=self.K,
resolution=self.image_size,
is_perspective=self._is_perspective)
return self.__class__(
K=K,
R=self.R,
T=self.T,
in_ndc=False,
resolution=self.image_size)
else:
print('Redundant operation, already in screen.')
def to_ndc(self, **kwargs) -> 'MMCamerasBase':
"""Convert to ndc."""
if self.in_ndc():
print('Redundant operation, already in ndc.')
else:
if self.get_image_size() is None:
self.image_size = kwargs.get('image_size')
else:
self.image_size = self.get_image_size()
K = convert_screen_to_ndc(
K=self.K,
resolution=self.image_size,
is_perspective=self._is_perspective)
return self.__class__(
K=K,
R=self.R,
T=self.T,
in_ndc=True,
resolution=self.image_size)
def detach(self) -> 'MMCamerasBase':
image_size = self.image_size.detach(
) if self.image_size is not None else None
return self.__class__(
K=self.K.detach(),
R=self.R.detach(),
T=self.T.detach(),
in_ndc=self.in_ndc(),
device=self.device,
resolution=image_size)
def concat(self, others) -> 'MMCamerasBase':
if isinstance(others, type(self)):
others = [others]
else:
raise TypeError('Could only concat with same type cameras.')
return concat_cameras([self] + others)
name=('WeakPerspectiveCameras', 'WeakPerspective', 'weakperspective'))
The provided code snippet includes necessary dependencies for implementing the `concat_cameras` function. Write a Python function `def concat_cameras(cameras_list: List[MMCamerasBase]) -> MMCamerasBase` to solve the following problem:
Concat a list of cameras of the same type. Args: cameras_list (List[cameras.CamerasBase]): a list of cameras. Returns: MMCamerasBase: the returned cameras concated following the batch dim.
Here is the function:
def concat_cameras(cameras_list: List[MMCamerasBase]) -> MMCamerasBase:
"""Concat a list of cameras of the same type.
Args:
cameras_list (List[cameras.CamerasBase]): a list of cameras.
Returns:
MMCamerasBase: the returned cameras concated following the batch
dim.
"""
K = []
R = []
T = []
is_perspective = cameras_list[0].is_perspective()
in_ndc = cameras_list[0].in_ndc()
cam_cls = type(cameras_list[0])
image_size = cameras_list[0].get_image_size()
device = cameras_list[0].device
for cam in cameras_list:
assert type(cam) is cam_cls
assert cam.in_ndc() is in_ndc
assert cam.is_perspective() is is_perspective
assert cam.device is device
K.append(cam.K)
R.append(cam.R)
T.append(cam.T)
K = torch.cat(K)
R = torch.cat(R)
T = torch.cat(T)
concated_cameras = cam_cls(
K=K,
R=R,
T=T,
device=device,
is_perspective=is_perspective,
in_ndc=in_ndc,
image_size=image_size)
return concated_cameras | Concat a list of cameras of the same type. Args: cameras_list (List[cameras.CamerasBase]): a list of cameras. Returns: MMCamerasBase: the returned cameras concated following the batch dim. |
14,302 | import math
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
from pytorch3d.renderer import cameras
from pytorch3d.structures import Meshes
from pytorch3d.transforms import Transform3d
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_ndc_to_screen,
convert_screen_to_ndc,
convert_world_view,
)
from mmhuman3d.utils.transforms import ee_to_rotmat
from .builder import CAMERAS
class FoVPerspectiveCameras(cameras.FoVPerspectiveCameras, MMCamerasBase):
"""Inherited from Pytorch3D `FoVPerspectiveCameras`."""
def __init__(
self,
znear=1.0,
zfar=100.0,
aspect_ratio=1.0,
fov=60.0,
degrees: bool = True,
R: Optional[torch.Tensor] = None,
T: Optional[torch.Tensor] = None,
K: Optional[torch.Tensor] = None,
device: Union[torch.device, str] = 'cpu',
convention: str = 'pytorch3d',
**kwargs,
) -> None:
"""Initialize a camera.
Args:
znear (float, optional): Defaults to 1.0.
zfar (float, optional): Defaults to 100.0.
aspect_ratio (float, optional): Defaults to 1.0.
fov (float, optional): Defaults to 60.0.
degrees (bool, optional): Defaults to True.
R (Optional[torch.Tensor], optional): Defaults to None.
T (Optional[torch.Tensor], optional): Defaults to None.
K (Optional[torch.Tensor], optional): Defaults to None.
device (Union[torch.device, str], optional): Defaults to 'cpu'.
convention (str, optional): Defaults to 'pytorch3d'.
"""
kwargs.update(
_in_ndc=True,
_is_perspective=True,
)
kwargs.pop('in_ndc', None)
kwargs.pop('is_perspective', None)
super(cameras.FoVPerspectiveCameras, self).__init__(
device=device,
znear=znear,
zfar=zfar,
aspect_ratio=aspect_ratio,
fov=fov,
R=R,
T=T,
K=K,
convention=convention,
**kwargs,
)
self.degrees = degrees
def __getitem__(self, index: Union[slice, int, torch.Tensor, List, Tuple]):
"""Slice the cameras by batch dim.
Args:
index (Union[slice, int, torch.Tensor, List, Tuple]):
index for slicing.
Returns:
MMCamerasBase: sliced cameras.
"""
return super(cameras.FoVPerspectiveCameras, self).__getitem__(index)
def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
kwargs.pop('cameras', None)
return super().get_ndc_camera_transform(**kwargs)
def transform_points_screen(self,
points,
eps: Optional[float] = None,
**kwargs) -> torch.Tensor:
kwargs.pop('cameras', None)
return super().transform_points_screen(points, eps, **kwargs)
def get_default_projection_matrix(cls, **args) -> torch.Tensor:
"""Class method. Calculate the projective transformation matrix by
default parameters.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in `__init__`.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
"""
znear = args.get('znear', 1.0)
zfar = args.get('zfar', 100.0)
aspect_ratio = args.get('aspect_ratio', 1.0)
fov = args.get('fov', 60.0)
degrees = args.get('degrees', True)
batch_size = args.get('batch_size', 1)
K = torch.zeros((1, 4, 4), dtype=torch.float32)
if degrees:
fov = (math.pi / 180) * fov
if not torch.is_tensor(fov):
fov = torch.tensor(fov)
tanHalfFov = torch.tan((fov / 2))
max_y = tanHalfFov * znear
min_y = -max_y
max_x = max_y * aspect_ratio
min_x = -max_x
z_sign = 1.0
K[:, 0, 0] = 2.0 * znear / (max_x - min_x)
K[:, 1, 1] = 2.0 * znear / (max_y - min_y)
K[:, 0, 2] = (max_x + min_x) / (max_x - min_x)
K[:, 1, 2] = (max_y + min_y) / (max_y - min_y)
K[:, 3, 2] = z_sign
K[:, 2, 2] = z_sign * zfar / (zfar - znear)
K[:, 2, 3] = -(zfar * znear) / (zfar - znear)
K = K.repeat(batch_size, 1, 1)
return K
def to_ndc_(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
def to_screen_(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
def to_ndc(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
def to_screen(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
name=('OrthographicCameras', 'Orthographic', 'orthographic'))
def ee_to_rotmat(euler_angle: Union[torch.Tensor, numpy.ndarray],
convention='xyz') -> Union[torch.Tensor, numpy.ndarray]:
"""Convert euler angle to rotation matrixs.
Args:
euler_angle (Union[torch.Tensor, numpy.ndarray]): input shape
should be (..., 3). ndim of input is unlimited.
convention (str, optional): Convention string of three letters
from {“x”, “y”, and “z”}. Defaults to 'xyz'.
Returns:
Union[torch.Tensor, numpy.ndarray]: shape would be (..., 3, 3).
"""
if euler_angle.shape[-1] != 3:
raise ValueError(
f'Invalid input euler angles shape f{euler_angle.shape}.')
t = Compose([euler_angles_to_matrix])
return t(euler_angle, convention.upper())
The provided code snippet includes necessary dependencies for implementing the `compute_orbit_cameras` function. Write a Python function `def compute_orbit_cameras( K: Union[torch.Tensor, np.ndarray, None] = None, elev: float = 0, azim: float = 0, dist: float = 2.7, at: Union[torch.Tensor, List, Tuple] = (0, 0, 0), batch_size: int = 1, orbit_speed: Union[float, Tuple[float, float]] = 0, dist_speed: Optional[float] = 0, convention: str = 'pytorch3d', ) -> Union[torch.Tensor, torch.Tensor, torch.Tensor]` to solve the following problem:
Generate a sequence of moving cameras following an orbit. Args: K (Union[torch.Tensor, np.ndarray, None], optional): Intrinsic matrix. Will generate a default K if None. Defaults to None. elev (float, optional): This is the angle between the vector from the object to the camera, and the horizontal plane y = 0 (xz-plane). Defaults to 0. azim (float, optional): angle in degrees or radians. The vector from the object to the camera is projected onto a horizontal plane y = 0. azim is the angle between the projected vector and a reference vector at (0, 0, 1) on the reference plane (the horizontal plane). Defaults to 0. dist (float, optional): distance of the camera from the object. Defaults to 2.7. at (Union[torch.Tensor, List, Tuple], optional): the position of the object(s) in world coordinates. Defaults to (0, 0, 0). batch_size (int, optional): number of frames. Defaults to 1. orbit_speed (Union[float, Tuple[float, float]], optional): degree speed of camera moving along the orbit. Could be one or two number. One number for only elev speed, two number for both. Defaults to 0. dist_speed (Optional[float], optional): speed of camera moving along the center line. Defaults to 0. convention (str, optional): Camera convention. Defaults to 'pytorch3d'. Returns: Union[torch.Tensor, torch.Tensor, torch.Tensor]: computed K, R, T.
Here is the function:
def compute_orbit_cameras(
K: Union[torch.Tensor, np.ndarray, None] = None,
elev: float = 0,
azim: float = 0,
dist: float = 2.7,
at: Union[torch.Tensor, List, Tuple] = (0, 0, 0),
batch_size: int = 1,
orbit_speed: Union[float, Tuple[float, float]] = 0,
dist_speed: Optional[float] = 0,
convention: str = 'pytorch3d',
) -> Union[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Generate a sequence of moving cameras following an orbit.
Args:
K (Union[torch.Tensor, np.ndarray, None], optional):
Intrinsic matrix. Will generate a default K if None.
Defaults to None.
elev (float, optional): This is the angle between the
vector from the object to the camera, and the horizontal
plane y = 0 (xz-plane).
Defaults to 0.
azim (float, optional): angle in degrees or radians. The vector
from the object to the camera is projected onto a horizontal
plane y = 0. azim is the angle between the projected vector and a
reference vector at (0, 0, 1) on the reference plane (the
horizontal plane).
Defaults to 0.
dist (float, optional): distance of the camera from the object.
Defaults to 2.7.
at (Union[torch.Tensor, List, Tuple], optional):
the position of the object(s) in world coordinates.
Defaults to (0, 0, 0).
batch_size (int, optional): number of frames. Defaults to 1.
orbit_speed (Union[float, Tuple[float, float]], optional):
degree speed of camera moving along the orbit.
Could be one or two number. One number for only elev speed,
two number for both.
Defaults to 0.
dist_speed (Optional[float], optional):
speed of camera moving along the center line.
Defaults to 0.
convention (str, optional): Camera convention. Defaults to 'pytorch3d'.
Returns:
Union[torch.Tensor, torch.Tensor, torch.Tensor]: computed K, R, T.
"""
if not isinstance(orbit_speed, Iterable):
orbit_speed = (orbit_speed, 0.0)
if not isinstance(at, torch.Tensor):
at = torch.Tensor(at)
at = at.view(1, 3)
if batch_size > 1 and orbit_speed[0] != 0:
azim = torch.linspace(azim, azim + batch_size * orbit_speed[0],
batch_size)
if batch_size > 1 and orbit_speed[1] != 0:
elev = torch.linspace(elev, elev + batch_size * orbit_speed[1],
batch_size)
if batch_size > 1 and dist_speed != 0:
dist = torch.linspace(dist, dist + batch_size * dist_speed, batch_size)
if convention == 'opencv':
rotation_compensate = ee_to_rotmat(
torch.Tensor([math.pi, 0, 0]).view(1, 3))
at = rotation_compensate.permute(0, 2, 1) @ at.view(-1, 3, 1)
at = at.view(1, 3)
R, T = cameras.look_at_view_transform(
dist=dist, elev=elev, azim=azim, at=at)
if K is None:
K = FoVPerspectiveCameras.get_default_projection_matrix(
batch_size=batch_size)
if convention == 'opencv':
rotation_compensate = ee_to_rotmat(
torch.Tensor([math.pi, 0, 0]).view(1, 3))
R = rotation_compensate.permute(0, 2, 1) @ R
return K, R, T | Generate a sequence of moving cameras following an orbit. Args: K (Union[torch.Tensor, np.ndarray, None], optional): Intrinsic matrix. Will generate a default K if None. Defaults to None. elev (float, optional): This is the angle between the vector from the object to the camera, and the horizontal plane y = 0 (xz-plane). Defaults to 0. azim (float, optional): angle in degrees or radians. The vector from the object to the camera is projected onto a horizontal plane y = 0. azim is the angle between the projected vector and a reference vector at (0, 0, 1) on the reference plane (the horizontal plane). Defaults to 0. dist (float, optional): distance of the camera from the object. Defaults to 2.7. at (Union[torch.Tensor, List, Tuple], optional): the position of the object(s) in world coordinates. Defaults to (0, 0, 0). batch_size (int, optional): number of frames. Defaults to 1. orbit_speed (Union[float, Tuple[float, float]], optional): degree speed of camera moving along the orbit. Could be one or two number. One number for only elev speed, two number for both. Defaults to 0. dist_speed (Optional[float], optional): speed of camera moving along the center line. Defaults to 0. convention (str, optional): Camera convention. Defaults to 'pytorch3d'. Returns: Union[torch.Tensor, torch.Tensor, torch.Tensor]: computed K, R, T. |
14,303 | import math
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
from pytorch3d.renderer import cameras
from pytorch3d.structures import Meshes
from pytorch3d.transforms import Transform3d
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_ndc_to_screen,
convert_screen_to_ndc,
convert_world_view,
)
from mmhuman3d.utils.transforms import ee_to_rotmat
from .builder import CAMERAS
class FoVPerspectiveCameras(cameras.FoVPerspectiveCameras, MMCamerasBase):
"""Inherited from Pytorch3D `FoVPerspectiveCameras`."""
def __init__(
self,
znear=1.0,
zfar=100.0,
aspect_ratio=1.0,
fov=60.0,
degrees: bool = True,
R: Optional[torch.Tensor] = None,
T: Optional[torch.Tensor] = None,
K: Optional[torch.Tensor] = None,
device: Union[torch.device, str] = 'cpu',
convention: str = 'pytorch3d',
**kwargs,
) -> None:
"""Initialize a camera.
Args:
znear (float, optional): Defaults to 1.0.
zfar (float, optional): Defaults to 100.0.
aspect_ratio (float, optional): Defaults to 1.0.
fov (float, optional): Defaults to 60.0.
degrees (bool, optional): Defaults to True.
R (Optional[torch.Tensor], optional): Defaults to None.
T (Optional[torch.Tensor], optional): Defaults to None.
K (Optional[torch.Tensor], optional): Defaults to None.
device (Union[torch.device, str], optional): Defaults to 'cpu'.
convention (str, optional): Defaults to 'pytorch3d'.
"""
kwargs.update(
_in_ndc=True,
_is_perspective=True,
)
kwargs.pop('in_ndc', None)
kwargs.pop('is_perspective', None)
super(cameras.FoVPerspectiveCameras, self).__init__(
device=device,
znear=znear,
zfar=zfar,
aspect_ratio=aspect_ratio,
fov=fov,
R=R,
T=T,
K=K,
convention=convention,
**kwargs,
)
self.degrees = degrees
def __getitem__(self, index: Union[slice, int, torch.Tensor, List, Tuple]):
"""Slice the cameras by batch dim.
Args:
index (Union[slice, int, torch.Tensor, List, Tuple]):
index for slicing.
Returns:
MMCamerasBase: sliced cameras.
"""
return super(cameras.FoVPerspectiveCameras, self).__getitem__(index)
def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
kwargs.pop('cameras', None)
return super().get_ndc_camera_transform(**kwargs)
def transform_points_screen(self,
points,
eps: Optional[float] = None,
**kwargs) -> torch.Tensor:
kwargs.pop('cameras', None)
return super().transform_points_screen(points, eps, **kwargs)
def get_default_projection_matrix(cls, **args) -> torch.Tensor:
"""Class method. Calculate the projective transformation matrix by
default parameters.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in `__init__`.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
"""
znear = args.get('znear', 1.0)
zfar = args.get('zfar', 100.0)
aspect_ratio = args.get('aspect_ratio', 1.0)
fov = args.get('fov', 60.0)
degrees = args.get('degrees', True)
batch_size = args.get('batch_size', 1)
K = torch.zeros((1, 4, 4), dtype=torch.float32)
if degrees:
fov = (math.pi / 180) * fov
if not torch.is_tensor(fov):
fov = torch.tensor(fov)
tanHalfFov = torch.tan((fov / 2))
max_y = tanHalfFov * znear
min_y = -max_y
max_x = max_y * aspect_ratio
min_x = -max_x
z_sign = 1.0
K[:, 0, 0] = 2.0 * znear / (max_x - min_x)
K[:, 1, 1] = 2.0 * znear / (max_y - min_y)
K[:, 0, 2] = (max_x + min_x) / (max_x - min_x)
K[:, 1, 2] = (max_y + min_y) / (max_y - min_y)
K[:, 3, 2] = z_sign
K[:, 2, 2] = z_sign * zfar / (zfar - znear)
K[:, 2, 3] = -(zfar * znear) / (zfar - znear)
K = K.repeat(batch_size, 1, 1)
return K
def to_ndc_(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
def to_screen_(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
def to_ndc(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
def to_screen(self, **kwargs):
"""Not implemented."""
raise NotImplementedError()
name=('OrthographicCameras', 'Orthographic', 'orthographic'))
def convert_camera_matrix(
K: Optional[Union[torch.Tensor, np.ndarray]] = None,
R: Optional[Union[torch.Tensor, np.ndarray]] = None,
T: Optional[Union[torch.Tensor, np.ndarray]] = None,
is_perspective: bool = True,
convention_src: str = 'opencv',
convention_dst: str = 'pytorch3d',
in_ndc_src: bool = True,
in_ndc_dst: bool = True,
resolution_src: Optional[Union[int, Tuple[int, int], torch.Tensor,
np.ndarray]] = None,
resolution_dst: Optional[Union[int, Tuple[int, int], torch.Tensor,
np.ndarray]] = None,
camera_conventions: dict = CAMERA_CONVENTIONS,
) -> Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor, np.ndarray],
Union[torch.Tensor, np.ndarray]]:
"""Convert the intrinsic matrix K and extrinsic matrix [R|T] from source
convention to destination convention.
Args:
K (Union[torch.Tensor, np.ndarray]): Intrinsic matrix,
shape should be (batch_size, 4, 4) or (batch_size, 3, 3).
Will be ignored if None.
R (Optional[Union[torch.Tensor, np.ndarray]], optional):
Extrinsic rotation matrix. Shape should be (batch_size, 3, 3).
Will be identity if None.
Defaults to None.
T (Optional[Union[torch.Tensor, np.ndarray]], optional):
Extrinsic translation matrix. Shape should be (batch_size, 3).
Will be zeros if None.
Defaults to None.
is_perspective (bool, optional): whether is perspective projection.
Defaults to True.
_____________________________________________________________________
# Camera dependent args
convention_src (str, optional): convention of source camera,
convention_dst (str, optional): convention of destination camera,
We define the convention of cameras by the order of right, front and
up.
E.g., the first one is pyrender and its convention should be
'+x+z+y'. '+' could be ignored.
The second one is opencv and its convention should be '+x-z-y'.
The third one is pytorch3d and its convention should be '-xzy'.
opengl(pyrender) opencv pytorch3d
y z y
| / |
| / |
|_______x /________x x________ |
/ | /
/ | /
z / y | z /
in_ndc_src (bool, optional): Whether is the source camera defined
in ndc.
Defaults to True.
in_ndc_dst (bool, optional): Whether is the destination camera defined
in ndc.
Defaults to True.
in camera_convention, we define these args as:
1). `left_mm_ex` means extrinsic matrix `K` is left matrix
multiplcation defined.
2). `left_mm_in` means intrinsic matrix [`R`| `T`] is left
matrix multiplcation defined.
3) `view_to_world` means extrinsic matrix [`R`| `T`] is defined
as view to world.
resolution_src (Optional[Union[int, Tuple[int, int], torch.Tensor,
np.ndarray]], optional):
Source camera image size of (height, width).
Required if defined in screen.
Will be square if int.
Shape should be (2,) if `array` or `tensor`.
Defaults to None.
resolution_dst (Optional[Union[int, Tuple[int, int], torch.Tensor,
np.ndarray]], optional):
Destination camera image size of (height, width).
Required if defined in screen.
Will be square if int.
Shape should be (2,) if `array` or `tensor`.
Defaults to None.
camera_conventions: (dict, optional): `dict` containing
pre-defined camera convention information.
Defaults to CAMERA_CONVENTIONS.
Raises:
TypeError: K, R, T should all be `torch.Tensor` or `np.ndarray`.
Returns:
Tuple[Union[torch.Tensor, None], Union[torch.Tensor, None],
Union[torch.Tensor, None]]:
Converted K, R, T matrix of `tensor`.
"""
convention_dst = convention_dst.lower()
convention_src = convention_src.lower()
assert convention_dst in CAMERA_CONVENTIONS
assert convention_src in CAMERA_CONVENTIONS
left_mm_ex_src = CAMERA_CONVENTIONS[convention_src].get(
'left_mm_extrinsic', True)
view_to_world_src = CAMERA_CONVENTIONS[convention_src].get(
'view_to_world', False)
left_mm_in_src = CAMERA_CONVENTIONS[convention_src].get(
'left_mm_intrinsic', False)
left_mm_ex_dst = CAMERA_CONVENTIONS[convention_dst].get(
'left_mm_extrinsic', True)
view_to_world_dst = CAMERA_CONVENTIONS[convention_dst].get(
'view_to_world', False)
left_mm_in_dst = CAMERA_CONVENTIONS[convention_dst].get(
'left_mm_intrinsic', False)
sign_src, axis_src = enc_camera_convention(convention_src,
camera_conventions)
sign_dst, axis_dst = enc_camera_convention(convention_dst,
camera_conventions)
sign = torch.Tensor(sign_dst) / torch.Tensor(sign_src)
type_ = []
for x in [K, R, T]:
if x is not None:
type_.append(type(x))
if len(type_) > 0:
if not all(x == type_[0] for x in type_):
raise TypeError('Input type should be the same.')
use_numpy = False
if np.ndarray in type_:
use_numpy = True
# convert raw matrix to tensor
if isinstance(K, np.ndarray):
new_K = torch.Tensor(K)
elif K is None:
new_K = None
elif isinstance(K, torch.Tensor):
new_K = K.clone()
else:
raise TypeError(
f'K should be `torch.Tensor` or `numpy.ndarray`, type(K): '
f'{type(K)}')
if isinstance(R, np.ndarray):
new_R = torch.Tensor(R).view(-1, 3, 3)
elif R is None:
new_R = torch.eye(3, 3)[None]
elif isinstance(R, torch.Tensor):
new_R = R.clone().view(-1, 3, 3)
else:
raise TypeError(
f'R should be `torch.Tensor` or `numpy.ndarray`, type(R): '
f'{type(R)}')
if isinstance(T, np.ndarray):
new_T = torch.Tensor(T).view(-1, 3)
elif T is None:
new_T = torch.zeros(1, 3)
elif isinstance(T, torch.Tensor):
new_T = T.clone().view(-1, 3)
else:
raise TypeError(
f'T should be `torch.Tensor` or `numpy.ndarray`, type(T): '
f'{type(T)}')
if axis_dst != axis_src:
new_R = ee_to_rotmat(
rotmat_to_ee(new_R, convention=axis_src), convention=axis_dst)
# convert extrinsic to world_to_view
if view_to_world_src is True:
new_R, new_T = convert_world_view(new_R, new_T)
# right mm to left mm
if (not left_mm_ex_src) and left_mm_ex_dst:
new_R *= sign.to(new_R.device)
new_R = new_R.permute(0, 2, 1)
# left mm to right mm
elif left_mm_ex_src and (not left_mm_ex_dst):
new_R = new_R.permute(0, 2, 1)
new_R *= sign.to(new_R.device)
# right_mm to right mm
elif (not left_mm_ex_dst) and (not left_mm_ex_src):
new_R *= sign.to(new_R.device)
# left mm to left mm
elif left_mm_ex_src and left_mm_ex_dst:
new_R *= sign.view(3, 1).to(new_R.device)
new_T *= sign.to(new_T.device)
# convert extrinsic to as definition
if view_to_world_dst is True:
new_R, new_T = convert_world_view(new_R, new_T)
# in ndc or in screen
if in_ndc_dst is False and in_ndc_src is True:
assert resolution_dst is not None, \
'dst in screen, should specify resolution_dst.'
if in_ndc_src is False and in_ndc_dst is True:
assert resolution_src is not None, \
'src in screen, should specify resolution_dst.'
if resolution_dst is None:
resolution_dst = 2.0
if resolution_src is None:
resolution_src = 2.0
if new_K is not None:
if left_mm_in_src is False and left_mm_in_dst is True:
new_K = new_K.permute(0, 2, 1)
if new_K.shape[-2:] == (3, 3):
new_K = convert_K_3x3_to_4x4(new_K, is_perspective)
# src in ndc, dst in screen
if in_ndc_src is True and (in_ndc_dst is False):
new_K = convert_ndc_to_screen(
K=new_K,
is_perspective=is_perspective,
sign=sign.to(new_K.device),
resolution=resolution_dst)
# src in screen, dst in ndc
elif in_ndc_src is False and in_ndc_dst is True:
new_K = convert_screen_to_ndc(
K=new_K,
is_perspective=is_perspective,
sign=sign.to(new_K.device),
resolution=resolution_src)
# src in ndc, dst in ndc
elif in_ndc_src is True and in_ndc_dst is True:
if is_perspective:
new_K[:, 0, 2] *= sign[0].to(new_K.device)
new_K[:, 1, 2] *= sign[1].to(new_K.device)
else:
new_K[:, 0, 3] *= sign[0].to(new_K.device)
new_K[:, 1, 3] *= sign[1].to(new_K.device)
# src in screen, dst in screen
else:
pass
if left_mm_in_src is True and left_mm_in_dst is False:
new_K = new_K.permute(0, 2, 1)
num_batch = max(new_K.shape[0], new_R.shape[0], new_T.shape[0])
if new_K.shape[0] == 1:
new_K = new_K.repeat(num_batch, 1, 1)
if new_R.shape[0] == 1:
new_R = new_R.repeat(num_batch, 1, 1)
if new_T.shape[0] == 1:
new_T = new_T.repeat(num_batch, 1)
if use_numpy:
if isinstance(new_K, torch.Tensor):
new_K = new_K.cpu().numpy()
if isinstance(new_R, torch.Tensor):
new_R = new_R.cpu().numpy()
if isinstance(new_T, torch.Tensor):
new_T = new_T.cpu().numpy()
return new_K, new_R, new_T
def convert_world_view(
R: Union[torch.Tensor, np.ndarray], T: Union[torch.Tensor, np.ndarray]
) -> Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor, np.ndarray]]:
"""Convert between view_to_world and world_to_view defined extrinsic
matrix.
Args:
R (Union[torch.Tensor, np.ndarray]): extrinsic rotation matrix.
shape should be (batch, 3, 4)
T (Union[torch.Tensor, np.ndarray]): extrinsic translation matrix.
Raises:
TypeError: R and T should be of the same type.
Returns:
Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor,
np.ndarray]]: output R, T.
"""
if not (type(R) is type(T)):
raise TypeError(
f'R: {type(R)}, T: {type(T)} should have the same type.')
if isinstance(R, torch.Tensor):
R = R.clone()
T = T.clone()
R = R.permute(0, 2, 1)
T = -(R @ T.view(-1, 3, 1)).view(-1, 3)
elif isinstance(R, np.ndarray):
R = R.copy()
T = T.copy()
R = R.transpose(0, 2, 1)
T = -(R @ T.reshape(-1, 3, 1)).reshape(-1, 3)
else:
raise TypeError(f'R: {type(R)}, T: {type(T)} should be torch.Tensor '
f'or numpy.ndarray.')
return R, T
The provided code snippet includes necessary dependencies for implementing the `compute_direction_cameras` function. Write a Python function `def compute_direction_cameras( K: Union[torch.Tensor, np.ndarray, None] = None, at: Union[torch.Tensor, List, Tuple, None] = None, eye: Union[torch.Tensor, List, Tuple, None] = None, plane: Union[Iterable[torch.Tensor], None] = None, dist: float = 1.0, batch_size: int = 1, dist_speed: float = 0.0, z_vec: Union[torch.Tensor, List, Tuple, None] = None, y_vec: Union[torch.Tensor, List, Tuple] = (0, 1, 0), convention: str = 'pytorch3d', ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` to solve the following problem:
Generate a sequence of moving cameras along a direction. We need a `z_vec`, `y_vec` to generate `x_vec` so as to get the `R` matrix. And we need `eye` as `T` matrix. `K` matrix could be set or use default. We recommend `y_vec` as default (0, 1, 0), and it will be orthogonal decomposed. The `x_vec` will be generated by cross production from `y_vec` and `x_vec`. You can set `z_vec` by: 1. set `at`, `dist`, `dist_speed`, `plane`, `batch_size` to get `eye`, then get `z_vec`. 2. set `at`, `eye` directly and get `z_vec`. 3. set `z_vec` directly and: 1). set `eye` and `dist`. 2). set `at`, `dist`, `dist_speed`, `batch_size` then get `eye`. When we have `eye`, `z_vec`, `y_vec`, we will have `R` and `T`. Args: K (Union[torch.Tensor, np.ndarray, None], optional): Intrinsic matrix. Will generate a default K if None. Defaults to None. at (Union[torch.Tensor, List, Tuple], optional): the position of the object(s) in world coordinates. Required. Defaults to None. eye (Union[torch.Tensor, List, Tuple], optional): the position of the camera(s) in world coordinates. If eye is not None, it will override the camera position derived from plane, dist, dist_speed. Defaults to None. plane (Optional[Iterable[torch.Tensor, List, Tuple]], optional): The plane of your z direction normal. Should be a tuple or list containing two vectors of shape (N, 3). Defaults to None. dist (float, optional): distance to at. Defaults to 1.0. dist_speed (float, optional): distance moving speed. Defaults to 1.0. batch_size (int, optional): number of frames. Defaults to 1. z_vec (Union[torch.Tensor, List, Tuple], optional): z direction of shape (-1, 3). If z_vec is not None, it will override plane, dist, dist_speed. Defaults to None. y_vec (Union[torch.Tensor, List, Tuple], optional): Will only be used when z_vec is used. Defaults to (0, 1, 0). convention (str, optional): Camera convention. Defaults to 'pytorch3d'. Returns: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: computed K, R, T.
Here is the function:
def compute_direction_cameras(
K: Union[torch.Tensor, np.ndarray, None] = None,
at: Union[torch.Tensor, List, Tuple, None] = None,
eye: Union[torch.Tensor, List, Tuple, None] = None,
plane: Union[Iterable[torch.Tensor], None] = None,
dist: float = 1.0,
batch_size: int = 1,
dist_speed: float = 0.0,
z_vec: Union[torch.Tensor, List, Tuple, None] = None,
y_vec: Union[torch.Tensor, List, Tuple] = (0, 1, 0),
convention: str = 'pytorch3d',
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Generate a sequence of moving cameras along a direction.
We need a `z_vec`, `y_vec` to generate `x_vec` so as to get the `R` matrix.
And we need `eye` as `T` matrix.
`K` matrix could be set or use default.
We recommend `y_vec` as default (0, 1, 0), and it will be orthogonal
decomposed. The `x_vec` will be generated by cross production from
`y_vec` and `x_vec`.
You can set `z_vec` by: 1. set `at`, `dist`, `dist_speed`, `plane`,
`batch_size` to get `eye`, then get `z_vec`.
2. set `at`, `eye` directly and get `z_vec`.
3. set `z_vec` directly and:
1). set `eye` and `dist`.
2). set `at`, `dist`, `dist_speed`,
`batch_size` then get `eye`.
When we have `eye`, `z_vec`, `y_vec`, we will have `R` and `T`.
Args:
K (Union[torch.Tensor, np.ndarray, None], optional):
Intrinsic matrix. Will generate a default K if None.
Defaults to None.
at (Union[torch.Tensor, List, Tuple], optional):
the position of the object(s) in world coordinates.
Required.
Defaults to None.
eye (Union[torch.Tensor, List, Tuple], optional):
the position of the camera(s) in world coordinates.
If eye is not None, it will override the camera position derived
from plane, dist, dist_speed.
Defaults to None.
plane (Optional[Iterable[torch.Tensor, List, Tuple]], optional):
The plane of your z direction normal.
Should be a tuple or list containing two vectors of shape (N, 3).
Defaults to None.
dist (float, optional): distance to at.
Defaults to 1.0.
dist_speed (float, optional): distance moving speed.
Defaults to 1.0.
batch_size (int, optional): number of frames.
Defaults to 1.
z_vec (Union[torch.Tensor, List, Tuple], optional):
z direction of shape (-1, 3). If z_vec is not None, it will
override plane, dist, dist_speed.
Defaults to None.
y_vec (Union[torch.Tensor, List, Tuple], optional):
Will only be used when z_vec is used.
Defaults to (0, 1, 0).
convention (str, optional): Camera convention.
Defaults to 'pytorch3d'.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: computed K, R, T.
"""
def norm_vec(vec):
return vec / torch.sqrt((vec * vec).sum())
if z_vec is None:
assert at is not None
at = torch.Tensor(at).view(-1, 3)
if eye is None:
assert plane is not None
dist = torch.linspace(dist, dist + batch_size * dist_speed,
batch_size)
vec1 = torch.Tensor(plane[0]).view(-1, 3)
norm_vec1 = norm_vec(vec1)
vec2 = torch.Tensor(plane[1]).view(-1, 3)
norm_vec2 = norm_vec(vec2)
norm = torch.cross(norm_vec1, norm_vec2)
normed_norm = norm_vec(norm)
eye = at + normed_norm * dist
else:
eye = torch.Tensor(eye).view(-1, 3)
norm = eye - at
normed_norm = norm_vec(norm)
z_vec = -normed_norm
else:
z_vec = torch.Tensor(z_vec).view(-1, 3)
z_vec = norm_vec(z_vec)
if eye is None:
assert at is not None
at = torch.Tensor(at).view(-1, 3)
dist = torch.linspace(dist, dist + batch_size * dist_speed,
batch_size)
eye = -z_vec * dist + at
eye = torch.Tensor(eye).view(-1, 3)
assert eye is not None
z_vec = norm_vec(z_vec)
normed_norm = -z_vec
z_vec = z_vec.view(-1, 3)
y_vec = torch.Tensor(y_vec).view(-1, 3)
y_vec = y_vec - torch.bmm(y_vec.view(-1, 1, 3), z_vec.view(-1, 3, 1)).view(
-1, 1) * z_vec
y_vec = norm_vec(y_vec)
x_vec = torch.cross(y_vec, z_vec)
R = torch.cat(
[x_vec.view(-1, 3, 1),
y_vec.view(-1, 3, 1),
z_vec.view(-1, 3, 1)], 1).view(-1, 3, 3)
T = eye
R = R.permute(0, 2, 1)
_, T = convert_world_view(R=R, T=T)
if K is None:
K = FoVPerspectiveCameras.get_default_projection_matrix(
batch_size=batch_size)
K, R, T = convert_camera_matrix(
K=K,
R=R,
T=T,
is_perspective=True,
convention_src='pytorch3d',
convention_dst=convention)
return K, R, T | Generate a sequence of moving cameras along a direction. We need a `z_vec`, `y_vec` to generate `x_vec` so as to get the `R` matrix. And we need `eye` as `T` matrix. `K` matrix could be set or use default. We recommend `y_vec` as default (0, 1, 0), and it will be orthogonal decomposed. The `x_vec` will be generated by cross production from `y_vec` and `x_vec`. You can set `z_vec` by: 1. set `at`, `dist`, `dist_speed`, `plane`, `batch_size` to get `eye`, then get `z_vec`. 2. set `at`, `eye` directly and get `z_vec`. 3. set `z_vec` directly and: 1). set `eye` and `dist`. 2). set `at`, `dist`, `dist_speed`, `batch_size` then get `eye`. When we have `eye`, `z_vec`, `y_vec`, we will have `R` and `T`. Args: K (Union[torch.Tensor, np.ndarray, None], optional): Intrinsic matrix. Will generate a default K if None. Defaults to None. at (Union[torch.Tensor, List, Tuple], optional): the position of the object(s) in world coordinates. Required. Defaults to None. eye (Union[torch.Tensor, List, Tuple], optional): the position of the camera(s) in world coordinates. If eye is not None, it will override the camera position derived from plane, dist, dist_speed. Defaults to None. plane (Optional[Iterable[torch.Tensor, List, Tuple]], optional): The plane of your z direction normal. Should be a tuple or list containing two vectors of shape (N, 3). Defaults to None. dist (float, optional): distance to at. Defaults to 1.0. dist_speed (float, optional): distance moving speed. Defaults to 1.0. batch_size (int, optional): number of frames. Defaults to 1. z_vec (Union[torch.Tensor, List, Tuple], optional): z direction of shape (-1, 3). If z_vec is not None, it will override plane, dist, dist_speed. Defaults to None. y_vec (Union[torch.Tensor, List, Tuple], optional): Will only be used when z_vec is used. Defaults to (0, 1, 0). convention (str, optional): Camera convention. Defaults to 'pytorch3d'. Returns: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: computed K, R, T. |
14,304 | import json
import warnings
from enum import Enum
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from mmhuman3d.core.cameras.cameras import PerspectiveCameras
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
from .builder import build_cameras
The provided code snippet includes necessary dependencies for implementing the `__parse_chessboard_param__` function. Write a Python function `def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True)` to solve the following problem:
Parse a dict loaded from chessboard file into another dict needed by CameraParameter. Args: chessboard_camera_param (dict): A dict loaded from json.load(chessboard_file). name (str): Name of this camera. inverse (bool, optional): Whether to inverse rotation and translation mat. Defaults to True. Returns: dict: A dict of parameters in CameraParameter.to_dict() format.
Here is the function:
def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True):
"""Parse a dict loaded from chessboard file into another dict needed by
CameraParameter.
Args:
chessboard_camera_param (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to True.
Returns:
dict:
A dict of parameters in CameraParameter.to_dict() format.
"""
camera_param_dict = {}
camera_param_dict['H'] = chessboard_camera_param['imgSize'][1]
camera_param_dict['W'] = chessboard_camera_param['imgSize'][0]
camera_param_dict['in_mat'] = chessboard_camera_param['K']
camera_param_dict['k1'] = 0
camera_param_dict['k2'] = 0
camera_param_dict['k3'] = 0
camera_param_dict['k4'] = 0
camera_param_dict['k5'] = 0
camera_param_dict['p1'] = 0
camera_param_dict['p2'] = 0
camera_param_dict['name'] = name
camera_param_dict['rotation'] = chessboard_camera_param['R']
camera_param_dict['translation'] = chessboard_camera_param['T']
if inverse:
rmatrix = np.linalg.inv(
np.array(camera_param_dict['rotation']).reshape(3, 3))
camera_param_dict['rotation'] = rmatrix.tolist()
tmatrix = np.array(camera_param_dict['translation']).reshape((3, 1))
tvec = -np.dot(rmatrix, tmatrix)
camera_param_dict['translation'] = tvec.reshape((3)).tolist()
return camera_param_dict | Parse a dict loaded from chessboard file into another dict needed by CameraParameter. Args: chessboard_camera_param (dict): A dict loaded from json.load(chessboard_file). name (str): Name of this camera. inverse (bool, optional): Whether to inverse rotation and translation mat. Defaults to True. Returns: dict: A dict of parameters in CameraParameter.to_dict() format. |
14,305 | import json
import warnings
from enum import Enum
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from mmhuman3d.core.cameras.cameras import PerspectiveCameras
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
from .builder import build_cameras
The provided code snippet includes necessary dependencies for implementing the `__zero_mat_list__` function. Write a Python function `def __zero_mat_list__(n=3)` to solve the following problem:
Return a zero mat in list format. Args: n (int, optional): Length of the edge. Defaults to 3. Returns: list: List[List[int]]
Here is the function:
def __zero_mat_list__(n=3):
"""Return a zero mat in list format.
Args:
n (int, optional):
Length of the edge.
Defaults to 3.
Returns:
list:
List[List[int]]
"""
ret_list = [[0] * n for _ in range(n)]
return ret_list | Return a zero mat in list format. Args: n (int, optional): Length of the edge. Defaults to 3. Returns: list: List[List[int]] |
14,306 | from mmcv.runner import build_optimizer
from mmcv.utils import Registry
The provided code snippet includes necessary dependencies for implementing the `build_optimizers` function. Write a Python function `def build_optimizers(model, cfgs)` to solve the following problem:
Build multiple optimizers from configs. If `cfgs` contains several dicts for optimizers, then a dict for each constructed optimizers will be returned. If `cfgs` only contains one optimizer config, the constructed optimizer itself will be returned. For example, 1) Multiple optimizer configs: .. code-block:: python optimizer_cfg = dict( model1=dict(type='SGD', lr=lr), model2=dict(type='SGD', lr=lr)) The return dict is ``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)`` 2) Single optimizer config: .. code-block:: python optimizer_cfg = dict(type='SGD', lr=lr) The return is ``torch.optim.Optimizer``. Args: model (:obj:`nn.Module`): The model with parameters to be optimized. cfgs (dict): The config dict of the optimizer. Returns: dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`: The initialized optimizers.
Here is the function:
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs. If `cfgs` contains several dicts
for optimizers, then a dict for each constructed optimizers will be
returned. If `cfgs` only contains one optimizer config, the constructed
optimizer itself will be returned. For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
if all(isinstance(v, dict) for v in cfgs.values()):
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
return build_optimizer(model, cfgs) | Build multiple optimizers from configs. If `cfgs` contains several dicts for optimizers, then a dict for each constructed optimizers will be returned. If `cfgs` only contains one optimizer config, the constructed optimizer itself will be returned. For example, 1) Multiple optimizer configs: .. code-block:: python optimizer_cfg = dict( model1=dict(type='SGD', lr=lr), model2=dict(type='SGD', lr=lr)) The return dict is ``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)`` 2) Single optimizer config: .. code-block:: python optimizer_cfg = dict(type='SGD', lr=lr) The return is ``torch.optim.Optimizer``. Args: model (:obj:`nn.Module`): The model with parameters to be optimized. cfgs (dict): The config dict of the optimizer. Returns: dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`: The initialized optimizers. |
14,307 | import warnings
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import mmhuman3d.core.conventions.keypoints_mapping as keypoints_mapping
from mmhuman3d.core.renderer.matplotlib3d_renderer import Axes3dJointsRenderer
from mmhuman3d.utils.demo_utils import get_different_colors
from mmhuman3d.utils.keypoint_utils import search_limbs
from mmhuman3d.utils.path_utils import prepare_output_path
def _norm_pose(pose_numpy: np.ndarray, min_value: Union[float, int],
max_value: Union[float, int], mask: Union[np.ndarray, list]):
"""Normalize the poses and make the center close to axis center."""
assert max_value > min_value
pose_np_normed = pose_numpy.copy()
if not mask:
mask = list(range(pose_numpy.shape[-2]))
axis_num = 3
axis_stat = np.zeros(shape=[axis_num, 4])
for axis_index in range(axis_num):
axis_data = pose_np_normed[..., mask, axis_index]
axis_min = np.min(axis_data)
axis_max = np.max(axis_data)
axis_mid = (axis_min + axis_max) / 2.0
axis_span = axis_max - axis_min
axis_stat[axis_index] = np.asarray(
(axis_min, axis_max, axis_mid, axis_span))
target_mid = (max_value + min_value) / 2.0
max_span = np.max(axis_stat[:, 3])
target_span = max_value - min_value
for axis_index in range(axis_num):
pose_np_normed[..., axis_index] = \
pose_np_normed[..., axis_index] - \
axis_stat[axis_index, 2]
pose_np_normed = pose_np_normed / max_span * target_span
pose_np_normed = pose_np_normed + target_mid
return pose_np_normed
class Axes3dJointsRenderer(Axes3dBaseRenderer):
"""Render of joints."""
def __init__(self):
self.if_camera_init = False
self.cam_vector_list = None
self.if_connection_setup = False
self.if_frame_updated = False
self.temp_path = ''
def set_connections(self, limbs_connection, limbs_palette):
"""set body limbs."""
self.limbs_connection = limbs_connection
self.limbs_palette = limbs_palette
self.if_connection_setup = True
def render_kp3d_to_video(
self,
keypoints_np: np.ndarray,
output_path: Optional[str] = None,
convention='opencv',
fps: Union[float, int] = 30,
resolution: Iterable[int] = (720, 720),
visual_range: Iterable[int] = (-100, 100),
frame_names: Optional[List[str]] = None,
disable_limbs: bool = False,
return_array: bool = False,
) -> None:
"""Render 3d keypoints to a video.
Args:
keypoints_np (np.ndarray): shape of input array should be
(f * n * J * 3).
output_path (str): output video path or frame folder.
sign (Iterable[int], optional): direction of the axis.
Defaults to (1, 1, 1).
axis (str, optional): axis convention.
Defaults to 'xzy'.
fps (Union[float, int], optional): fps.
Defaults to 30.
resolution (Iterable[int], optional): (width, height) of
output video.
Defaults to (720, 720).
visual_range (Iterable[int], optional): range of axis value.
Defaults to (-100, 100).
frame_names (Optional[List[str]], optional): List of string
for frame title, no title if None. Defaults to None.
disable_limbs (bool, optional): whether need to disable drawing
limbs.
Defaults to False.
Returns:
None.
"""
assert self.if_camera_init is True
assert self.if_connection_setup is True
sign, axis = enc_camera_convention(convention)
if output_path is not None:
if check_path_suffix(output_path, ['.mp4', '.gif']):
self.temp_path = os.path.join(
Path(output_path).parent,
Path(output_path).name + '_output_temp')
mmcv.mkdir_or_exist(self.temp_path)
print('make dir', self.temp_path)
self.remove_temp = True
else:
self.temp_path = output_path
self.remove_temp = False
else:
self.temp_path = None
keypoints_np = _set_new_pose(keypoints_np, sign, axis)
if not self.if_frame_updated:
if self.cam_vector_list is None:
self._get_camera_vector_list(
frame_number=keypoints_np.shape[0])
assert len(self.cam_vector_list) == keypoints_np.shape[0]
if visual_range is None:
visual_range = self._get_visual_range(keypoints_np)
else:
visual_range = np.asarray(visual_range)
if len(visual_range.shape) == 1:
one_dim_visual_range = np.expand_dims(visual_range, 0)
visual_range = one_dim_visual_range.repeat(3, axis=0)
image_array = self._export_frames(keypoints_np, resolution,
visual_range, frame_names,
disable_limbs, return_array)
self.if_frame_updated = True
if output_path is not None:
if check_path_suffix(output_path, '.mp4'):
images_to_video(
self.temp_path,
output_path,
img_format='frame_%06d.png',
fps=fps)
return image_array
def _export_frames(self, keypoints_np, resolution, visual_range,
frame_names, disable_limbs, return_array):
"""Write output/temp images."""
image_array = []
for frame_index in range(keypoints_np.shape[0]):
keypoints_frame = keypoints_np[frame_index]
cam_ele, cam_hor = self.cam_vector_list[frame_index]
fig, ax = \
self._draw_scene(visual_range=visual_range, axis_len=0.5,
cam_elev_angle=cam_ele,
cam_hori_angle=cam_hor)
# draw limbs
num_person = keypoints_frame.shape[0]
for person_index, keypoints_person in enumerate(keypoints_frame):
if num_person >= 2:
self.limbs_palette = get_different_colors(
num_person)[person_index].reshape(-1, 3)
if not disable_limbs:
for part_name, limbs in self.limbs_connection.items():
if part_name == 'body':
linewidth = 2
else:
linewidth = 1
if isinstance(self.limbs_palette, np.ndarray):
color = self.limbs_palette.astype(
np.int32).reshape(-1, 3)
elif isinstance(self.limbs_palette, dict):
color = np.array(
self.limbs_palette[part_name]).astype(np.int32)
for limb_index, limb in enumerate(limbs):
limb_index = min(limb_index, len(color) - 1)
ax = _plot_line_on_fig(
ax,
keypoints_person[limb[0]],
keypoints_person[limb[1]],
color=np.array(color[limb_index]) / 255.0,
linewidth=linewidth)
scatter_points_index = list(
set(
np.array(self.limbs_connection['body']).reshape(
-1).tolist()))
ax.scatter(
keypoints_person[scatter_points_index, 0],
keypoints_person[scatter_points_index, 1],
keypoints_person[scatter_points_index, 2],
c=np.array([0, 0, 0]).reshape(1, -1),
s=10,
marker='o')
if num_person >= 2:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
labels = []
custom_lines = []
for person_index in range(num_person):
color = get_different_colors(
num_person)[person_index].reshape(1, 3) / 255.0
custom_lines.append(
Line2D([0], [0],
linestyle='-',
color=color[0],
lw=2,
marker='',
markeredgecolor='k',
markeredgewidth=.1,
markersize=20))
labels.append(f'person_{person_index + 1}')
ax.legend(
handles=custom_lines,
labels=labels,
loc='upper left',
)
plt.close('all')
rgb_mat = _get_cv2mat_from_buf(fig)
resized_mat = cv2.resize(rgb_mat, resolution)
if frame_names is not None:
cv2.putText(
resized_mat, str(frame_names[frame_index]),
(resolution[0] // 10, resolution[1] // 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5 * resolution[0] / 500,
np.array([255, 255, 255]).astype(np.int32).tolist(), 2)
if self.temp_path is not None:
frame_path = os.path.join(self.temp_path,
'frame_%06d.png' % frame_index)
cv2.imwrite(frame_path, resized_mat)
if return_array:
image_array.append(resized_mat[None])
if return_array:
image_array = np.concatenate(image_array)
return image_array
else:
return None
def __del__(self):
"""remove temp images."""
self.remove_temp_frames()
def remove_temp_frames(self):
"""remove temp images."""
if self.temp_path is not None:
if Path(self.temp_path).is_dir() and self.remove_temp:
shutil.rmtree(self.temp_path)
def get_different_colors(number_of_colors,
flag=0,
alpha: float = 1.0,
mode: str = 'bgr',
int_dtype: bool = True):
"""Get a numpy of colors of shape (N, 3)."""
mode = mode.lower()
assert set(mode).issubset({'r', 'g', 'b', 'a'})
nst0 = np.random.get_state()
np.random.seed(flag)
colors = []
for i in np.arange(0., 360., 360. / number_of_colors):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
colors_np = np.asarray(colors)
if int_dtype:
colors_bgr = (255 * colors_np).astype(np.uint8)
else:
colors_bgr = colors_np.astype(np.float32)
# recover the random state
np.random.set_state(nst0)
color_dict = {}
if 'a' in mode:
color_dict['a'] = np.ones((colors_bgr.shape[0], 3)) * alpha
color_dict['b'] = colors_bgr[:, 0:1]
color_dict['g'] = colors_bgr[:, 1:2]
color_dict['r'] = colors_bgr[:, 2:3]
colors_final = []
for channel in mode:
colors_final.append(color_dict[channel])
colors_final = np.concatenate(colors_final, -1)
return colors_final
def search_limbs(
data_source: str,
mask: Optional[Union[np.ndarray, tuple, list]] = None,
keypoints_factory: dict = KEYPOINTS_FACTORY) -> Tuple[dict, dict]:
"""Search the corresponding limbs following the basis human_data limbs. The
mask could mask out the incorrect keypoints.
Args:
data_source (str): data source type.
mask (Optional[Union[np.ndarray, tuple, list]], optional):
refer to keypoints_mapping. Defaults to None.
keypoints_factory (dict, optional): Dict of all the conventions.
Defaults to KEYPOINTS_FACTORY.
Returns:
Tuple[dict, dict]: (limbs_target, limbs_palette).
"""
limbs_source = HUMAN_DATA_LIMBS_INDEX
limbs_palette = HUMAN_DATA_PALETTE
keypoints_source = keypoints_factory['human_data']
keypoints_target = keypoints_factory[data_source]
limbs_target = {}
for k, part_limbs in limbs_source.items():
limbs_target[k] = []
for limb in part_limbs:
flag = False
if (keypoints_source[limb[0]]
in keypoints_target) and (keypoints_source[limb[1]]
in keypoints_target):
if mask is not None:
if mask[keypoints_target.index(keypoints_source[
limb[0]])] != 0 and mask[keypoints_target.index(
keypoints_source[limb[1]])] != 0:
flag = True
else:
flag = True
if flag:
limbs_target.setdefault(k, []).append([
keypoints_target.index(keypoints_source[limb[0]]),
keypoints_target.index(keypoints_source[limb[1]])
])
if k in limbs_target:
if k == 'body':
np.random.seed(0)
limbs_palette[k] = np.random.randint(
0, high=255, size=(len(limbs_target[k]), 3))
else:
limbs_palette[k] = np.array(limbs_palette[k])
return limbs_target, limbs_palette
def prepare_output_path(output_path: str,
allowed_suffix: List[str] = [],
tag: str = 'output file',
path_type: Literal['file', 'dir', 'auto'] = 'auto',
overwrite: bool = True) -> None:
"""Check output folder or file.
Args:
output_path (str): could be folder or file.
allowed_suffix (List[str], optional):
Check the suffix of `output_path`. If folder, should be [] or [''].
If could both be folder or file, should be [suffixs..., ''].
Defaults to [].
tag (str, optional): The `string` tag to specify the output type.
Defaults to 'output file'.
path_type (Literal[, optional):
Choose `file` for file and `dir` for folder.
Choose `auto` if allowed to be both.
Defaults to 'auto'.
overwrite (bool, optional):
Whether overwrite the existing file or folder.
Defaults to True.
Raises:
FileNotFoundError: suffix does not match.
FileExistsError: file or folder already exists and `overwrite` is
False.
Returns:
None
"""
if path_type.lower() == 'dir':
allowed_suffix = []
exist_result = check_path_existence(output_path, path_type=path_type)
if exist_result == Existence.MissingParent:
warnings.warn(
f'The parent folder of {tag} does not exist: {output_path},' +
f' will make dir {Path(output_path).parent.absolute().__str__()}')
os.makedirs(
Path(output_path).parent.absolute().__str__(), exist_ok=True)
elif exist_result == Existence.DirectoryNotExist:
os.mkdir(output_path)
print(f'Making directory {output_path} for saving results.')
elif exist_result == Existence.FileNotExist:
suffix_matched = \
check_path_suffix(output_path, allowed_suffix=allowed_suffix)
if not suffix_matched:
raise FileNotFoundError(
f'The {tag} should be {", ".join(allowed_suffix)}: '
f'{output_path}.')
elif exist_result == Existence.FileExist:
if not overwrite:
raise FileExistsError(
f'{output_path} exists (set overwrite = True to overwrite).')
else:
print(f'Overwriting {output_path}.')
elif exist_result == Existence.DirectoryExistEmpty:
pass
elif exist_result == Existence.DirectoryExistNotEmpty:
if not overwrite:
raise FileExistsError(
f'{output_path} is not empty (set overwrite = '
'True to overwrite the files).')
else:
print(f'Overwriting {output_path} and its files.')
else:
raise FileNotFoundError(f'No Existence type for {output_path}.')
The provided code snippet includes necessary dependencies for implementing the `visualize_kp3d` function. Write a Python function `def visualize_kp3d( kp3d: np.ndarray, output_path: Optional[str] = None, limbs: Optional[Union[np.ndarray, List[int]]] = None, palette: Optional[Iterable[int]] = None, data_source: str = 'coco', mask: Optional[Union[list, tuple, np.ndarray]] = None, start: int = 0, end: Optional[int] = None, resolution: Union[list, Tuple[int, int]] = (1024, 1024), fps: Union[float, int] = 30, frame_names: Optional[Union[List[str], str]] = None, orbit_speed: Union[float, int] = 0.5, value_range: Union[Tuple[int, int], list] = (-100, 100), pop_parts: Iterable[str] = (), disable_limbs: bool = False, return_array: Optional[bool] = None, convention: str = 'opencv', keypoints_factory: dict = keypoints_mapping.KEYPOINTS_FACTORY, ) -> Union[None, np.ndarray]` to solve the following problem:
Visualize 3d keypoints to a video with matplotlib. Support multi person and specified limb connections. Args: kp3d (np.ndarray): shape could be (f * J * 4/3/2) or (f * num_person * J * 4/3/2) output_path (str): output video path image folder. limbs (Optional[Union[np.ndarray, List[int]]], optional): if not specified, the limbs will be searched by search_limbs, this option is for free skeletons like BVH file. Defaults to None. palette (Iterable, optional): specified palette, three int represents (B, G, R). Should be tuple or list. Defaults to None. data_source (str, optional): data source type. Defaults to 'coco'. choose in ['coco', 'smplx', 'smpl', 'coco_wholebody', 'mpi_inf_3dhp', 'mpi_inf_3dhp_test', 'h36m', 'pw3d', 'mpii'] mask (Optional[Union[list, tuple, np.ndarray]], optional): mask to mask out the incorrect points. Defaults to None. start (int, optional): start frame index. Defaults to 0. end (int, optional): end frame index. Could be positive int or negative int or None. None represents include all the frames. Defaults to None. resolution (Union[list, Tuple[int, int]], optional): (width, height) of the output video will be the same size as the original images if not specified. Defaults to None. fps (Union[float, int], optional): fps. Defaults to 30. frame_names (Optional[Union[List[str], str]], optional): List(should be the same as frame numbers) or single string or string format (like 'frame%06d')for frame title, no title if None. Defaults to None. orbit_speed (Union[float, int], optional): orbit speed of camera. Defaults to 0.5. value_range (Union[Tuple[int, int], list], optional): range of axis value. Defaults to (-100, 100). pop_parts (Iterable[str], optional): The body part names you do not want to visualize. Choose in ['left_eye','right_eye', 'nose', 'mouth', 'face', 'left_hand', 'right_hand']Defaults to []. disable_limbs (bool, optional): whether need to disable drawing limbs. Defaults to False. return_array (bool, optional): Whether to return images as opencv array .If None, an array will be returned when frame number is below 100. Defaults to None. keypoints_factory (dict, optional): Dict of all the conventions. Defaults to KEYPOINTS_FACTORY. Raises: TypeError: check the type of input keypoints. FileNotFoundError: check the output video path. Returns: Union[None, np.ndarray].
Here is the function:
def visualize_kp3d(
kp3d: np.ndarray,
output_path: Optional[str] = None,
limbs: Optional[Union[np.ndarray, List[int]]] = None,
palette: Optional[Iterable[int]] = None,
data_source: str = 'coco',
mask: Optional[Union[list, tuple, np.ndarray]] = None,
start: int = 0,
end: Optional[int] = None,
resolution: Union[list, Tuple[int, int]] = (1024, 1024),
fps: Union[float, int] = 30,
frame_names: Optional[Union[List[str], str]] = None,
orbit_speed: Union[float, int] = 0.5,
value_range: Union[Tuple[int, int], list] = (-100, 100),
pop_parts: Iterable[str] = (),
disable_limbs: bool = False,
return_array: Optional[bool] = None,
convention: str = 'opencv',
keypoints_factory: dict = keypoints_mapping.KEYPOINTS_FACTORY,
) -> Union[None, np.ndarray]:
"""Visualize 3d keypoints to a video with matplotlib. Support multi person
and specified limb connections.
Args:
kp3d (np.ndarray): shape could be (f * J * 4/3/2) or
(f * num_person * J * 4/3/2)
output_path (str): output video path image folder.
limbs (Optional[Union[np.ndarray, List[int]]], optional):
if not specified, the limbs will be searched by search_limbs,
this option is for free skeletons like BVH file.
Defaults to None.
palette (Iterable, optional): specified palette, three int represents
(B, G, R). Should be tuple or list.
Defaults to None.
data_source (str, optional): data source type. Defaults to 'coco'.
choose in ['coco', 'smplx', 'smpl', 'coco_wholebody',
'mpi_inf_3dhp', 'mpi_inf_3dhp_test', 'h36m', 'pw3d', 'mpii']
mask (Optional[Union[list, tuple, np.ndarray]], optional):
mask to mask out the incorrect points. Defaults to None.
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index.
Could be positive int or negative int or None.
None represents include all the frames.
Defaults to None.
resolution (Union[list, Tuple[int, int]], optional):
(width, height) of the output video
will be the same size as the original images if not specified.
Defaults to None.
fps (Union[float, int], optional): fps. Defaults to 30.
frame_names (Optional[Union[List[str], str]], optional): List(should be
the same as frame numbers) or single string or string format
(like 'frame%06d')for frame title, no title if None.
Defaults to None.
orbit_speed (Union[float, int], optional): orbit speed of camera.
Defaults to 0.5.
value_range (Union[Tuple[int, int], list], optional):
range of axis value. Defaults to (-100, 100).
pop_parts (Iterable[str], optional): The body part names you do not
want to visualize. Choose in ['left_eye','right_eye', 'nose',
'mouth', 'face', 'left_hand', 'right_hand']Defaults to [].
disable_limbs (bool, optional): whether need to disable drawing limbs.
Defaults to False.
return_array (bool, optional): Whether to return images as opencv array
.If None, an array will be returned when frame number is below 100.
Defaults to None.
keypoints_factory (dict, optional): Dict of all the conventions.
Defaults to KEYPOINTS_FACTORY.
Raises:
TypeError: check the type of input keypoints.
FileNotFoundError: check the output video path.
Returns:
Union[None, np.ndarray].
"""
# check input shape
if not isinstance(kp3d, np.ndarray):
raise TypeError(
f'Input type is {type(kp3d)}, which should be numpy.ndarray.')
kp3d = kp3d.copy()
if kp3d.shape[-1] == 2:
kp3d = np.concatenate([kp3d, np.zeros_like(kp3d)[..., 0:1]], axis=-1)
warnings.warn(
'The input array is 2-Dimensional coordinates, will concatenate ' +
f'zeros to the last axis. The new array shape: {kp3d.shape}')
elif kp3d.shape[-1] >= 4:
kp3d = kp3d[..., :3]
warnings.warn(
'The input array has more than 3-Dimensional coordinates, will ' +
'keep only the first 3-Dimensions of the last axis. The new ' +
f'array shape: {kp3d.shape}')
if kp3d.ndim == 3:
kp3d = np.expand_dims(kp3d, 1)
num_frames = kp3d.shape[0]
assert kp3d.ndim == 4
assert kp3d.shape[-1] == 3
if return_array is None:
if num_frames > 100:
return_array = False
else:
return_array = True
# check data_source & mask
if data_source not in keypoints_factory:
raise ValueError('Wrong data_source. Should choose in' +
f'{list(keypoints_factory.keys())}')
if mask is not None:
if not isinstance(mask, np.ndarray):
mask = np.array(mask).reshape(-1)
assert mask.shape == (
len(keypoints_factory[data_source]),
), f'mask length should fit with keypoints number \
{len(keypoints_factory[data_source])}'
# check the output path
if output_path is not None:
prepare_output_path(
output_path,
path_type='auto',
tag='output video',
allowed_suffix=['.mp4', '.gif', ''])
# slice the frames
end = num_frames if end is None else end
kp3d = kp3d[start:end]
# norm the coordinates
if value_range is not None:
# norm pose location to value_range (70% value range)
mask_index = np.where(np.array(mask) > 0) if mask is not None else None
margin_width = abs(value_range[1] - value_range[0]) * 0.15
pose_np_normed = _norm_pose(kp3d, value_range[0] + margin_width,
value_range[1] - margin_width, mask_index)
input_pose_np = pose_np_normed
else:
input_pose_np = kp3d
# determine the limb connections and palettes
if limbs is not None:
limbs_target, limbs_palette = {
'body': limbs.tolist() if isinstance(limbs, np.ndarray) else limbs
}, get_different_colors(len(limbs))
else:
limbs_target, limbs_palette = search_limbs(
data_source=data_source, mask=mask)
if palette is not None:
limbs_palette = np.array(palette, dtype=np.uint8)[None]
# check and pop the pop_parts
assert set(pop_parts).issubset(
keypoints_mapping.human_data.HUMAN_DATA_PALETTE.keys(
)), f'wrong part_names in pop_parts, could only \
choose in{set(keypoints_mapping.human_data.HUMAN_DATA_PALETTE.keys())}'
for part_name in pop_parts:
if part_name in limbs_target:
limbs_target.pop(part_name)
# initialize renderer and start render
renderer = Axes3dJointsRenderer()
renderer.init_camera(cam_hori_speed=orbit_speed, cam_elev_speed=0.2)
renderer.set_connections(limbs_target, limbs_palette)
if isinstance(frame_names, str):
if '%' in frame_names:
frame_names = [
frame_names % index for index in range(input_pose_np.shape[0])
]
else:
frame_names = [frame_names] * input_pose_np.shape[0]
image_array = renderer.render_kp3d_to_video(
input_pose_np,
output_path,
convention,
fps=fps,
resolution=resolution,
visual_range=value_range,
frame_names=frame_names,
disable_limbs=disable_limbs,
return_array=return_array)
return image_array | Visualize 3d keypoints to a video with matplotlib. Support multi person and specified limb connections. Args: kp3d (np.ndarray): shape could be (f * J * 4/3/2) or (f * num_person * J * 4/3/2) output_path (str): output video path image folder. limbs (Optional[Union[np.ndarray, List[int]]], optional): if not specified, the limbs will be searched by search_limbs, this option is for free skeletons like BVH file. Defaults to None. palette (Iterable, optional): specified palette, three int represents (B, G, R). Should be tuple or list. Defaults to None. data_source (str, optional): data source type. Defaults to 'coco'. choose in ['coco', 'smplx', 'smpl', 'coco_wholebody', 'mpi_inf_3dhp', 'mpi_inf_3dhp_test', 'h36m', 'pw3d', 'mpii'] mask (Optional[Union[list, tuple, np.ndarray]], optional): mask to mask out the incorrect points. Defaults to None. start (int, optional): start frame index. Defaults to 0. end (int, optional): end frame index. Could be positive int or negative int or None. None represents include all the frames. Defaults to None. resolution (Union[list, Tuple[int, int]], optional): (width, height) of the output video will be the same size as the original images if not specified. Defaults to None. fps (Union[float, int], optional): fps. Defaults to 30. frame_names (Optional[Union[List[str], str]], optional): List(should be the same as frame numbers) or single string or string format (like 'frame%06d')for frame title, no title if None. Defaults to None. orbit_speed (Union[float, int], optional): orbit speed of camera. Defaults to 0.5. value_range (Union[Tuple[int, int], list], optional): range of axis value. Defaults to (-100, 100). pop_parts (Iterable[str], optional): The body part names you do not want to visualize. Choose in ['left_eye','right_eye', 'nose', 'mouth', 'face', 'left_hand', 'right_hand']Defaults to []. disable_limbs (bool, optional): whether need to disable drawing limbs. Defaults to False. return_array (bool, optional): Whether to return images as opencv array .If None, an array will be returned when frame number is below 100. Defaults to None. keypoints_factory (dict, optional): Dict of all the conventions. Defaults to KEYPOINTS_FACTORY. Raises: TypeError: check the type of input keypoints. FileNotFoundError: check the output video path. Returns: Union[None, np.ndarray]. |
14,308 | import copy
import glob
import os
import os.path as osp
import shutil
import warnings
from functools import partial
from pathlib import Path
from typing import List, Optional, Tuple, Union
import mmcv
import numpy as np
import torch
import torch.nn as nn
from colormap import Color
from mmhuman3d.core.cameras import (
WeakPerspectiveCameras,
compute_orbit_cameras,
)
from mmhuman3d.core.cameras.builder import build_cameras
from mmhuman3d.core.conventions.cameras.convert_convention import \
convert_camera_matrix
from mmhuman3d.core.conventions.segmentation import body_segmentation
from mmhuman3d.core.renderer.torch3d_renderer import render_runner
from mmhuman3d.core.renderer.torch3d_renderer.meshes import \
ParametricMeshes
from mmhuman3d.core.renderer.torch3d_renderer.render_smpl_config import (
RENDER_CONFIGS,
)
from mmhuman3d.core.renderer.torch3d_renderer.smpl_renderer import SMPLRenderer
from mmhuman3d.core.renderer.torch3d_renderer.utils import \
align_input_to_padded
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
convert_bbox_to_intrinsic,
convert_crop_cam_to_orig_img,
convert_kp2d_to_bbox,
get_default_hmr_intrinsic,
get_different_colors,
)
from mmhuman3d.utils.ffmpeg_utils import (
check_input_path,
images_to_array,
prepare_output_path,
vid_info_reader,
video_to_array,
video_to_images,
)
from mmhuman3d.utils.mesh_utils import save_meshes_as_objs, save_meshes_as_plys
from mmhuman3d.utils.path_utils import check_path_suffix
def render_smpl(
# smpl parameters
poses: Optional[Union[torch.Tensor, np.ndarray, dict]] = None,
betas: Optional[Union[torch.Tensor, np.ndarray]] = None,
transl: Optional[Union[torch.Tensor, np.ndarray]] = None,
verts: Optional[Union[torch.Tensor, np.ndarray]] = None,
body_model: Optional[nn.Module] = None,
body_model_config: Optional[dict] = None,
# camera parameters
R: Optional[Union[torch.Tensor, np.ndarray]] = None,
T: Optional[Union[torch.Tensor, np.ndarray]] = None,
K: Optional[Union[torch.Tensor, np.ndarray]] = None,
orig_cam: Optional[Union[torch.Tensor, np.ndarray]] = None,
Ks: Optional[Union[torch.Tensor, np.ndarray]] = None,
in_ndc: bool = True,
convention: str = 'pytorch3d',
projection: Literal['weakperspective', 'perspective', 'fovperspective',
'orthographics',
'fovorthographics'] = 'perspective',
orbit_speed: Union[float, Tuple[float, float]] = 0.0,
# render choice parameters
render_choice: Literal['lq', 'mq', 'hq', 'silhouette', 'depth',
'normal', 'pointcloud',
'part_silhouette'] = 'hq',
palette: Union[List[str], str, np.ndarray, torch.Tensor] = 'white',
texture_image: Union[torch.Tensor, np.ndarray] = None,
resolution: Optional[Union[List[int], Tuple[int, int]]] = None,
start: int = 0,
end: Optional[int] = None,
alpha: float = 1.0,
no_grad: bool = True,
batch_size: int = 10,
device: Union[torch.device, str] = 'cuda',
# file io parameters
return_tensor: bool = False,
output_path: str = None,
origin_frames: Optional[str] = None,
frame_list: Optional[List[str]] = None,
image_array: Optional[Union[np.ndarray, torch.Tensor]] = None,
img_format: str = '%06d.png',
overwrite: bool = False,
mesh_file_path: Optional[str] = None,
read_frames_batch: bool = False,
# visualize keypoints
plot_kps: bool = False,
kp3d: Optional[Union[np.ndarray, torch.Tensor]] = None,
mask: Optional[Union[np.ndarray, List[int]]] = None,
vis_kp_index: bool = False,
verbose: bool = False) -> Union[None, torch.Tensor]:
"""Render SMPL, SMPL-X or STAR mesh or silhouette into differentiable
tensors, and export video or images.
Args:
# smpl parameters:
poses (Union[torch.Tensor, np.ndarray, dict]):
1). `tensor` or `array` and ndim is 2, shape should be
(frame, 72).
2). `tensor` or `array` and ndim is 3, shape should be
(frame, num_person, 72/165). num_person equals 1 means
single-person.
Rendering predicted multi-person should feed together with
multi-person weakperspective cameras. meshes would be computed
and use an identity intrinsic matrix.
3). `dict`, standard dict format defined in smplx.body_models.
will be treated as single-person.
Lower priority than `verts`.
Defaults to None.
betas (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 2, shape should be (frame, 10).
2). ndim is 3, shape should be (frame, num_person, 10). num_person
equals 1 means single-person. If poses are multi-person, betas
should be set to the same person number.
None will use default betas.
Defaults to None.
transl (Optional[Union[torch.Tensor, np.ndarray]], optional):
translations of smpl(x).
1). ndim is 2, shape should be (frame, 3).
2). ndim is 3, shape should be (frame, num_person, 3). num_person
equals 1 means single-person. If poses are multi-person,
transl should be set to the same person number.
Defaults to None.
verts (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 3, shape should be (frame, num_verts, 3).
2). ndim is 4, shape should be (frame, num_person, num_verts, 3).
num_person equals 1 means single-person.
Higher priority over `poses` & `betas` & `transl`.
Defaults to None.
body_model (nn.Module, optional): body_model created from smplx.create.
Higher priority than `body_model_config`. If `body_model` is not
None, it will override `body_model_config`.
Should not both be None.
Defaults to None.
body_model_config (dict, optional): body_model_config for build_model.
Lower priority than `body_model`. Should not both be None.
Defaults to None.
# camera parameters:
K (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4) or (frame, 3, 3), frame could be 1.
if (4, 4) or (3, 3), dim 0 will be added automatically.
Will be default `FovPerspectiveCameras` intrinsic if None.
Lower priority than `orig_cam`.
R (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3, 3), If f equals 1, camera will have
identical rotation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `R` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
T (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3). If f equals 1, camera will have
identical translation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `T` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
orig_cam (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4) or (frame, num_person, 4). If f equals
1, will be repeated to num_frames. num_person should be 1 if single
person. Usually for HMR, VIBE predicted cameras.
Higher priority than `K` & `R` & `T`.
Defaults to None.
Ks (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4).
This is for HMR or SPIN multi-person demo.
in_ndc (bool, optional): . Defaults to True.
convention (str, optional): If want to use an existing convention,
choose in ['opengl', 'opencv', 'pytorch3d', 'pyrender', 'open3d',
'maya', 'blender', 'unity'].
If want to use a new convention, define your convention in
(CAMERA_CONVENTION_FACTORY)[mmhuman3d/core/conventions/cameras/
__init__.py] by the order of right, front and up.
Defaults to 'pytorch3d'.
projection (Literal[, optional): projection mode of camers. Choose in
['orthographics, fovperspective', 'perspective', 'weakperspective',
'fovorthographics']
Defaults to 'perspective'.
orbit_speed (float, optional): orbit speed for viewing when no `K`
provided. `float` for only azim speed and Tuple for `azim` and
`elev`.
# render choice parameters:
render_choice (Literal[, optional):
choose in ['lq', 'mq', 'hq', 'silhouette', 'depth', 'normal',
'pointcloud', 'part_silhouette'] .
`lq`, `mq`, `hq` would output (frame, h, w, 4) FloatTensor.
`lq` means low quality, `mq` means medium quality,
h`q means high quality.
`silhouette` would output (frame, h, w) soft binary FloatTensor.
`part_silhouette` would output (frame, h, w, 1) LongTensor.
Every pixel stores a class index.
`depth` will output a depth map of (frame, h, w, 1) FloatTensor
and 'normal' will output a normal map of (frame, h, w, 1).
`pointcloud` will output a (frame, h, w, 4) FloatTensor.
Defaults to 'mq'.
palette (Union[List[str], str, np.ndarray], optional):
color theme str or list of color str or `array`.
1). If use str to represent the color,
should choose in ['segmentation', 'random'] or color from
Colormap https://en.wikipedia.org/wiki/X11_color_names.
If choose 'segmentation', will get a color for each part.
2). If you have multi-person, better give a list of str or all
will be in the same color.
3). If you want to define your specific color, use an `array`
of shape (3,) for single person and (N, 3) for multiple persons.
If (3,) for multiple persons, all will be in the same color.
Your `array` should be in range [0, 255] for 8 bit color.
Defaults to 'white'.
texture_image (Union[torch.Tensor, np.ndarray], optional):
Texture image to be wrapped on the smpl mesh. If not None,
the `palette` will be ignored, and the `body_model` is required
to have `uv_param_path`.
Should pass list or tensor of shape (num_person, H, W, 3).
The color channel should be `RGB`.
Defaults to None.
resolution (Union[Iterable[int], int], optional):
1). If iterable, should be (height, width) of output images.
2). If int, would be taken as (resolution, resolution).
Defaults to (1024, 1024).
This will influence the overlay results when render with
backgrounds. The output video will be rendered following the
size of background images and finally resized to resolution.
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
None represents include all the frames.
Defaults to None.
alpha (float, optional): Transparency of the mesh.
Range in [0.0, 1.0]
Defaults to 1.0.
no_grad (bool, optional): Set to True if do not need differentiable
render.
Defaults to False.
batch_size (int, optional): Batch size for render.
Related to your gpu memory.
Defaults to 10.
# file io parameters:
return_tensor (bool, optional): Whether return the result tensors.
Defaults to False, will return None.
output_path (str, optional): output video or gif or image folder.
Defaults to None, pass export procedure.
# background frames, priority: image_array > frame_list > origin_frames
origin_frames (Optional[str], optional): origin background frame path,
could be `.mp4`, `.gif`(will be sliced into a folder) or an image
folder.
Defaults to None.
frame_list (Optional[List[str]], optional): list of origin background
frame paths, element in list each should be a image path like
`*.jpg` or `*.png`.
Use this when your file names is hard to sort or you only want to
render a small number frames.
Defaults to None.
image_array: (Optional[Union[np.ndarray, torch.Tensor]], optional):
origin background frame `tensor` or `array`, use this when you
want your frames in memory as array or tensor.
overwrite (bool, optional): whether overwriting the existing files.
Defaults to False.
mesh_file_path (bool, optional): the directory path to store the `.ply`
or '.ply' files. Will be named like 'frame_idx_person_idx.ply'.
Defaults to None.
read_frames_batch (bool, optional): Whether read frames by batch.
Set it as True if your video is large in size.
Defaults to False.
# visualize keypoints
plot_kps (bool, optional): whether plot keypoints on the output video.
Defaults to False.
kp3d (Optional[Union[np.ndarray, torch.Tensor]], optional):
the keypoints of any convention, should pass `mask` if have any
none-sense points. Shape should be (frame, )
Defaults to None.
mask (Optional[Union[np.ndarray, List[int]]], optional):
Mask of keypoints existence.
Defaults to None.
vis_kp_index (bool, optional):
Whether plot keypoint index number on human mesh.
Defaults to False.
# visualize render progress
verbose (bool, optional):
Whether print the progress bar for rendering.
Returns:
Union[None, torch.Tensor]: return the rendered image tensors or None.
"""
# initialize the device
device = torch.device(device) if isinstance(device, str) else device
if isinstance(resolution, int):
resolution = (resolution, resolution)
elif isinstance(resolution, list):
resolution = tuple(resolution)
verts, poses, betas, transl = _prepare_input_pose(verts, poses, betas,
transl)
body_model = _prepare_body_model(body_model, body_model_config)
model_type = body_model.name().replace('-', '').lower()
assert model_type in ['smpl', 'smplx', 'star']
if model_type in ['smpl', 'smplx']:
vertices, joints, num_frames, num_person = _prepare_mesh(
poses, betas, transl, verts, start, end, body_model)
elif model_type == 'star':
model_output = body_model(body_pose=poses, betas=betas, transl=transl)
vertices = model_output['vertices']
num_frames = poses.shape[0]
num_person = 1 # star temporarily only support single person
end = num_frames if end is None else end
vertices = vertices.view(num_frames, num_person, -1, 3)
num_verts = vertices.shape[-2]
if not plot_kps:
joints = None
if kp3d is not None:
warnings.warn('`plot_kps` is False, `kp3d` will be set as None.')
kp3d = None
image_array, remove_folder, frames_folder = _prepare_background(
image_array, frame_list, origin_frames, output_path, start, end,
img_format, overwrite, num_frames, read_frames_batch)
render_resolution = None
if image_array is not None:
render_resolution = (image_array.shape[1], image_array.shape[2])
elif frames_folder is not None:
frame_path_list = glob.glob(osp.join(
frames_folder, '*.jpg')) + glob.glob(
osp.join(frames_folder, '*.png')) + glob.glob(
osp.join(frames_folder, '*.jpeg'))
vid_info = vid_info_reader(frame_path_list[0])
render_resolution = (int(vid_info['height']), int(vid_info['width']))
if resolution is not None:
if render_resolution is not None:
if render_resolution != resolution:
warnings.warn(
f'Size of background: {render_resolution} !='
f' resolution: {resolution}, the output video will be '
f'resized as {resolution}')
final_resolution = resolution
elif render_resolution is None:
render_resolution = final_resolution = resolution
elif resolution is None:
if render_resolution is None:
render_resolution = final_resolution = (1024, 1024)
elif render_resolution is not None:
final_resolution = render_resolution
if isinstance(kp3d, np.ndarray):
kp3d = torch.Tensor(kp3d)
if kp3d is not None:
if mask is not None:
map_index = np.where(np.array(mask) != 0)[0]
kp3d = kp3d[map_index.tolist()]
kp3d = kp3d[start:end]
kp3d = kp3d.view(num_frames, -1, 3)
# prepare render_param_dict
render_param_dict = copy.deepcopy(RENDER_CONFIGS[render_choice.lower()])
if model_type == 'smpl':
render_param_dict.update(num_class=24)
elif model_type == 'smplx':
render_param_dict.update(num_class=27)
if render_choice not in [
'hq', 'mq', 'lq', 'silhouette', 'part_silhouette', 'depth',
'pointcloud', 'normal'
]:
raise ValueError('Please choose the right render_choice.')
# body part colorful visualization should use flat shader to be sharper.
if texture_image is None:
if isinstance(palette, str):
palette = [palette] * num_person
elif isinstance(palette, np.ndarray):
palette = torch.Tensor(palette)
palette = palette.view(-1, 3)
if palette.shape[0] != num_person:
_times = num_person // palette.shape[0]
palette = palette.repeat(_times, 1)[:num_person]
if palette.shape[0] == 1:
print(f'Same color for all the {num_person} people')
else:
print('Repeat palette for multi-person.')
else:
raise ValueError('Wrong input palette type. '
'Palette should be tensor, array or list of strs')
colors_all = _prepare_colors(palette, render_choice, num_person,
num_verts, model_type)
colors_all = colors_all.view(-1, num_person * num_verts, 3)
# verts of ParametricMeshes should be in (N, V, 3)
vertices = vertices.view(num_frames, -1, 3)
meshes = ParametricMeshes(
body_model=body_model,
verts=vertices,
N_individual_overdide=num_person,
model_type=model_type,
texture_image=texture_image,
use_nearest=bool(render_choice == 'part_silhouette'),
vertex_color=colors_all)
# write .ply or .obj files
if mesh_file_path is not None:
mmcv.mkdir_or_exist(mesh_file_path)
for person_idx in range(meshes.shape[1]):
mesh_person = meshes[:, person_idx]
if texture_image is None:
ply_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.ply'
for frame_idx in range(num_frames)
]
save_meshes_as_plys(meshes=mesh_person, files=ply_paths)
else:
obj_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.obj'
for frame_idx in range(num_frames)
]
save_meshes_as_objs(meshes=mesh_person, files=obj_paths)
vertices = meshes.verts_padded().view(num_frames, num_person, -1, 3)
# prepare camera matrixs
if Ks is not None:
projection = 'perspective'
orig_cam = None
if isinstance(Ks, np.ndarray):
Ks = torch.Tensor(Ks)
Ks = Ks.view(-1, num_person, 3, 3)
Ks = Ks[start:end]
Ks = Ks.view(-1, 3, 3)
K = K.repeat(num_frames * num_person, 1, 1)
Ks = K.inverse() @ Ks @ K
vertices = vertices.view(num_frames * num_person, -1, 3)
if T is None:
T = torch.zeros(num_frames, num_person, 1, 3)
elif isinstance(T, np.ndarray):
T = torch.Tensor(T)
T = T[start:end]
T = T.view(num_frames * num_person, 1, 3)
vertices = torch.einsum('blc,bvc->bvl', Ks, vertices + T)
R = None
T = None
vertices = vertices.view(num_frames, num_person, -1, 3)
if orig_cam is not None:
if isinstance(orig_cam, np.ndarray):
orig_cam = torch.Tensor(orig_cam)
projection = 'weakperspective'
r = render_resolution[1] / render_resolution[0]
orig_cam = orig_cam[start:end]
orig_cam = orig_cam.view(num_frames, num_person, 4)
# if num_person > 1:
sx, sy, tx, ty = torch.unbind(orig_cam, -1)
vertices[..., 0] += tx.view(num_frames, num_person, 1)
vertices[..., 1] += ty.view(num_frames, num_person, 1)
vertices[..., 0] *= sx.view(num_frames, num_person, 1)
vertices[..., 1] *= sy.view(num_frames, num_person, 1)
orig_cam = torch.tensor([1.0, 1.0, 0.0,
0.0]).view(1, 4).repeat(num_frames, 1)
K, R, T = WeakPerspectiveCameras.convert_orig_cam_to_matrix(
orig_cam=orig_cam,
znear=torch.min(vertices[..., 2] - 1),
aspect_ratio=r)
if num_person > 1:
vertices = vertices.reshape(num_frames, -1, 3)
else:
vertices = vertices.view(num_frames, -1, 3)
meshes = meshes.update_padded(new_verts_padded=vertices)
# orig_cam and K are None, use look_at_view
if K is None:
projection = 'fovperspective'
K, R, T = compute_orbit_cameras(
at=(torch.mean(vertices.view(-1, 3), 0)).detach().cpu(),
orbit_speed=orbit_speed,
batch_size=num_frames,
convention=convention)
convention = 'pytorch3d'
if isinstance(R, np.ndarray):
R = torch.Tensor(R).view(-1, 3, 3)
elif isinstance(R, torch.Tensor):
R = R.view(-1, 3, 3)
elif isinstance(R, list):
R = torch.Tensor(R).view(-1, 3, 3)
elif R is None:
pass
else:
raise ValueError(f'Wrong type of R: {type(R)}!')
if R is not None:
if len(R) > num_frames:
R = R[start:end]
if isinstance(T, np.ndarray):
T = torch.Tensor(T).view(-1, 3)
elif isinstance(T, torch.Tensor):
T = T.view(-1, 3)
elif isinstance(T, list):
T = torch.Tensor(T).view(-1, 3)
elif T is None:
pass
else:
raise ValueError(f'Wrong type of T: {type(T)}!')
if T is not None:
if len(T) > num_frames:
T = T[start:end]
if isinstance(K, np.ndarray):
K = torch.Tensor(K).view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, torch.Tensor):
K = K.view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, list):
K = torch.Tensor(K)
K = K.view(-1, K.shape[-2], K.shape[-1])
else:
raise ValueError(f'Wrong type of K: {type(K)}!')
if K is not None:
if len(K) > num_frames:
K = K[start:end]
assert projection in [
'perspective', 'weakperspective', 'orthographics', 'fovorthographics',
'fovperspective'
], f'Wrong camera projection: {projection}'
if projection in ['fovperspective', 'perspective']:
is_perspective = True
elif projection in [
'fovorthographics', 'weakperspective', 'orthographics'
]:
is_perspective = False
if projection in ['fovperspective', 'fovorthographics', 'weakperspective']:
assert in_ndc
K, R, T = convert_camera_matrix(
convention_dst='pytorch3d',
K=K,
R=R,
T=T,
is_perspective=is_perspective,
convention_src=convention,
resolution_src=render_resolution,
in_ndc_src=in_ndc,
in_ndc_dst=in_ndc)
# initialize the renderer.
renderer = SMPLRenderer(
resolution=render_resolution,
device=device,
output_path=output_path,
return_tensor=return_tensor,
alpha=alpha,
read_img_format=img_format,
render_choice=render_choice,
frames_folder=frames_folder,
plot_kps=plot_kps,
vis_kp_index=vis_kp_index,
final_resolution=final_resolution,
**render_param_dict)
cameras = build_cameras(
dict(
type=projection,
in_ndc=in_ndc,
device=device,
K=K,
R=R,
T=T,
resolution=render_resolution))
if image_array is not None:
image_array = torch.Tensor(image_array)
image_array = align_input_to_padded(
image_array, ndim=4, batch_size=num_frames, padding_mode='ones')
# prepare the render data.
render_data = dict(
images=image_array,
meshes=meshes,
cameras=cameras,
joints=joints,
joints_gt=kp3d,
)
results = render_runner.render(
renderer=renderer,
device=device,
batch_size=batch_size,
output_path=output_path,
return_tensor=return_tensor,
no_grad=no_grad,
verbose=verbose,
**render_data)
if remove_folder:
if Path(frames_folder).is_dir():
shutil.rmtree(frames_folder)
if return_tensor:
return results
else:
return None
The provided code snippet includes necessary dependencies for implementing the `visualize_smpl_calibration` function. Write a Python function `def visualize_smpl_calibration( K, R, T, resolution, **kwargs, ) -> None` to solve the following problem:
Visualize a smpl mesh which has opencv calibration matrix defined in screen.
Here is the function:
def visualize_smpl_calibration(
K,
R,
T,
resolution,
**kwargs,
) -> None:
"""Visualize a smpl mesh which has opencv calibration matrix defined in
screen."""
assert K is not None, '`K` is required.'
assert resolution is not None, '`resolution`(h, w) is required.'
func = partial(
render_smpl,
projection='perspective',
convention='opencv',
orig_cam=None,
in_ndc=False)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(K=K, R=R, T=T, resolution=resolution, **kwargs) | Visualize a smpl mesh which has opencv calibration matrix defined in screen. |
14,309 | import copy
import glob
import os
import os.path as osp
import shutil
import warnings
from functools import partial
from pathlib import Path
from typing import List, Optional, Tuple, Union
import mmcv
import numpy as np
import torch
import torch.nn as nn
from colormap import Color
from mmhuman3d.core.cameras import (
WeakPerspectiveCameras,
compute_orbit_cameras,
)
from mmhuman3d.core.cameras.builder import build_cameras
from mmhuman3d.core.conventions.cameras.convert_convention import \
convert_camera_matrix
from mmhuman3d.core.conventions.segmentation import body_segmentation
from mmhuman3d.core.renderer.torch3d_renderer import render_runner
from mmhuman3d.core.renderer.torch3d_renderer.meshes import \
ParametricMeshes
from mmhuman3d.core.renderer.torch3d_renderer.render_smpl_config import (
RENDER_CONFIGS,
)
from mmhuman3d.core.renderer.torch3d_renderer.smpl_renderer import SMPLRenderer
from mmhuman3d.core.renderer.torch3d_renderer.utils import \
align_input_to_padded
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
convert_bbox_to_intrinsic,
convert_crop_cam_to_orig_img,
convert_kp2d_to_bbox,
get_default_hmr_intrinsic,
get_different_colors,
)
from mmhuman3d.utils.ffmpeg_utils import (
check_input_path,
images_to_array,
prepare_output_path,
vid_info_reader,
video_to_array,
video_to_images,
)
from mmhuman3d.utils.mesh_utils import save_meshes_as_objs, save_meshes_as_plys
from mmhuman3d.utils.path_utils import check_path_suffix
def render_smpl(
# smpl parameters
poses: Optional[Union[torch.Tensor, np.ndarray, dict]] = None,
betas: Optional[Union[torch.Tensor, np.ndarray]] = None,
transl: Optional[Union[torch.Tensor, np.ndarray]] = None,
verts: Optional[Union[torch.Tensor, np.ndarray]] = None,
body_model: Optional[nn.Module] = None,
body_model_config: Optional[dict] = None,
# camera parameters
R: Optional[Union[torch.Tensor, np.ndarray]] = None,
T: Optional[Union[torch.Tensor, np.ndarray]] = None,
K: Optional[Union[torch.Tensor, np.ndarray]] = None,
orig_cam: Optional[Union[torch.Tensor, np.ndarray]] = None,
Ks: Optional[Union[torch.Tensor, np.ndarray]] = None,
in_ndc: bool = True,
convention: str = 'pytorch3d',
projection: Literal['weakperspective', 'perspective', 'fovperspective',
'orthographics',
'fovorthographics'] = 'perspective',
orbit_speed: Union[float, Tuple[float, float]] = 0.0,
# render choice parameters
render_choice: Literal['lq', 'mq', 'hq', 'silhouette', 'depth',
'normal', 'pointcloud',
'part_silhouette'] = 'hq',
palette: Union[List[str], str, np.ndarray, torch.Tensor] = 'white',
texture_image: Union[torch.Tensor, np.ndarray] = None,
resolution: Optional[Union[List[int], Tuple[int, int]]] = None,
start: int = 0,
end: Optional[int] = None,
alpha: float = 1.0,
no_grad: bool = True,
batch_size: int = 10,
device: Union[torch.device, str] = 'cuda',
# file io parameters
return_tensor: bool = False,
output_path: str = None,
origin_frames: Optional[str] = None,
frame_list: Optional[List[str]] = None,
image_array: Optional[Union[np.ndarray, torch.Tensor]] = None,
img_format: str = '%06d.png',
overwrite: bool = False,
mesh_file_path: Optional[str] = None,
read_frames_batch: bool = False,
# visualize keypoints
plot_kps: bool = False,
kp3d: Optional[Union[np.ndarray, torch.Tensor]] = None,
mask: Optional[Union[np.ndarray, List[int]]] = None,
vis_kp_index: bool = False,
verbose: bool = False) -> Union[None, torch.Tensor]:
"""Render SMPL, SMPL-X or STAR mesh or silhouette into differentiable
tensors, and export video or images.
Args:
# smpl parameters:
poses (Union[torch.Tensor, np.ndarray, dict]):
1). `tensor` or `array` and ndim is 2, shape should be
(frame, 72).
2). `tensor` or `array` and ndim is 3, shape should be
(frame, num_person, 72/165). num_person equals 1 means
single-person.
Rendering predicted multi-person should feed together with
multi-person weakperspective cameras. meshes would be computed
and use an identity intrinsic matrix.
3). `dict`, standard dict format defined in smplx.body_models.
will be treated as single-person.
Lower priority than `verts`.
Defaults to None.
betas (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 2, shape should be (frame, 10).
2). ndim is 3, shape should be (frame, num_person, 10). num_person
equals 1 means single-person. If poses are multi-person, betas
should be set to the same person number.
None will use default betas.
Defaults to None.
transl (Optional[Union[torch.Tensor, np.ndarray]], optional):
translations of smpl(x).
1). ndim is 2, shape should be (frame, 3).
2). ndim is 3, shape should be (frame, num_person, 3). num_person
equals 1 means single-person. If poses are multi-person,
transl should be set to the same person number.
Defaults to None.
verts (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 3, shape should be (frame, num_verts, 3).
2). ndim is 4, shape should be (frame, num_person, num_verts, 3).
num_person equals 1 means single-person.
Higher priority over `poses` & `betas` & `transl`.
Defaults to None.
body_model (nn.Module, optional): body_model created from smplx.create.
Higher priority than `body_model_config`. If `body_model` is not
None, it will override `body_model_config`.
Should not both be None.
Defaults to None.
body_model_config (dict, optional): body_model_config for build_model.
Lower priority than `body_model`. Should not both be None.
Defaults to None.
# camera parameters:
K (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4) or (frame, 3, 3), frame could be 1.
if (4, 4) or (3, 3), dim 0 will be added automatically.
Will be default `FovPerspectiveCameras` intrinsic if None.
Lower priority than `orig_cam`.
R (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3, 3), If f equals 1, camera will have
identical rotation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `R` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
T (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3). If f equals 1, camera will have
identical translation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `T` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
orig_cam (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4) or (frame, num_person, 4). If f equals
1, will be repeated to num_frames. num_person should be 1 if single
person. Usually for HMR, VIBE predicted cameras.
Higher priority than `K` & `R` & `T`.
Defaults to None.
Ks (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4).
This is for HMR or SPIN multi-person demo.
in_ndc (bool, optional): . Defaults to True.
convention (str, optional): If want to use an existing convention,
choose in ['opengl', 'opencv', 'pytorch3d', 'pyrender', 'open3d',
'maya', 'blender', 'unity'].
If want to use a new convention, define your convention in
(CAMERA_CONVENTION_FACTORY)[mmhuman3d/core/conventions/cameras/
__init__.py] by the order of right, front and up.
Defaults to 'pytorch3d'.
projection (Literal[, optional): projection mode of camers. Choose in
['orthographics, fovperspective', 'perspective', 'weakperspective',
'fovorthographics']
Defaults to 'perspective'.
orbit_speed (float, optional): orbit speed for viewing when no `K`
provided. `float` for only azim speed and Tuple for `azim` and
`elev`.
# render choice parameters:
render_choice (Literal[, optional):
choose in ['lq', 'mq', 'hq', 'silhouette', 'depth', 'normal',
'pointcloud', 'part_silhouette'] .
`lq`, `mq`, `hq` would output (frame, h, w, 4) FloatTensor.
`lq` means low quality, `mq` means medium quality,
h`q means high quality.
`silhouette` would output (frame, h, w) soft binary FloatTensor.
`part_silhouette` would output (frame, h, w, 1) LongTensor.
Every pixel stores a class index.
`depth` will output a depth map of (frame, h, w, 1) FloatTensor
and 'normal' will output a normal map of (frame, h, w, 1).
`pointcloud` will output a (frame, h, w, 4) FloatTensor.
Defaults to 'mq'.
palette (Union[List[str], str, np.ndarray], optional):
color theme str or list of color str or `array`.
1). If use str to represent the color,
should choose in ['segmentation', 'random'] or color from
Colormap https://en.wikipedia.org/wiki/X11_color_names.
If choose 'segmentation', will get a color for each part.
2). If you have multi-person, better give a list of str or all
will be in the same color.
3). If you want to define your specific color, use an `array`
of shape (3,) for single person and (N, 3) for multiple persons.
If (3,) for multiple persons, all will be in the same color.
Your `array` should be in range [0, 255] for 8 bit color.
Defaults to 'white'.
texture_image (Union[torch.Tensor, np.ndarray], optional):
Texture image to be wrapped on the smpl mesh. If not None,
the `palette` will be ignored, and the `body_model` is required
to have `uv_param_path`.
Should pass list or tensor of shape (num_person, H, W, 3).
The color channel should be `RGB`.
Defaults to None.
resolution (Union[Iterable[int], int], optional):
1). If iterable, should be (height, width) of output images.
2). If int, would be taken as (resolution, resolution).
Defaults to (1024, 1024).
This will influence the overlay results when render with
backgrounds. The output video will be rendered following the
size of background images and finally resized to resolution.
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
None represents include all the frames.
Defaults to None.
alpha (float, optional): Transparency of the mesh.
Range in [0.0, 1.0]
Defaults to 1.0.
no_grad (bool, optional): Set to True if do not need differentiable
render.
Defaults to False.
batch_size (int, optional): Batch size for render.
Related to your gpu memory.
Defaults to 10.
# file io parameters:
return_tensor (bool, optional): Whether return the result tensors.
Defaults to False, will return None.
output_path (str, optional): output video or gif or image folder.
Defaults to None, pass export procedure.
# background frames, priority: image_array > frame_list > origin_frames
origin_frames (Optional[str], optional): origin background frame path,
could be `.mp4`, `.gif`(will be sliced into a folder) or an image
folder.
Defaults to None.
frame_list (Optional[List[str]], optional): list of origin background
frame paths, element in list each should be a image path like
`*.jpg` or `*.png`.
Use this when your file names is hard to sort or you only want to
render a small number frames.
Defaults to None.
image_array: (Optional[Union[np.ndarray, torch.Tensor]], optional):
origin background frame `tensor` or `array`, use this when you
want your frames in memory as array or tensor.
overwrite (bool, optional): whether overwriting the existing files.
Defaults to False.
mesh_file_path (bool, optional): the directory path to store the `.ply`
or '.ply' files. Will be named like 'frame_idx_person_idx.ply'.
Defaults to None.
read_frames_batch (bool, optional): Whether read frames by batch.
Set it as True if your video is large in size.
Defaults to False.
# visualize keypoints
plot_kps (bool, optional): whether plot keypoints on the output video.
Defaults to False.
kp3d (Optional[Union[np.ndarray, torch.Tensor]], optional):
the keypoints of any convention, should pass `mask` if have any
none-sense points. Shape should be (frame, )
Defaults to None.
mask (Optional[Union[np.ndarray, List[int]]], optional):
Mask of keypoints existence.
Defaults to None.
vis_kp_index (bool, optional):
Whether plot keypoint index number on human mesh.
Defaults to False.
# visualize render progress
verbose (bool, optional):
Whether print the progress bar for rendering.
Returns:
Union[None, torch.Tensor]: return the rendered image tensors or None.
"""
# initialize the device
device = torch.device(device) if isinstance(device, str) else device
if isinstance(resolution, int):
resolution = (resolution, resolution)
elif isinstance(resolution, list):
resolution = tuple(resolution)
verts, poses, betas, transl = _prepare_input_pose(verts, poses, betas,
transl)
body_model = _prepare_body_model(body_model, body_model_config)
model_type = body_model.name().replace('-', '').lower()
assert model_type in ['smpl', 'smplx', 'star']
if model_type in ['smpl', 'smplx']:
vertices, joints, num_frames, num_person = _prepare_mesh(
poses, betas, transl, verts, start, end, body_model)
elif model_type == 'star':
model_output = body_model(body_pose=poses, betas=betas, transl=transl)
vertices = model_output['vertices']
num_frames = poses.shape[0]
num_person = 1 # star temporarily only support single person
end = num_frames if end is None else end
vertices = vertices.view(num_frames, num_person, -1, 3)
num_verts = vertices.shape[-2]
if not plot_kps:
joints = None
if kp3d is not None:
warnings.warn('`plot_kps` is False, `kp3d` will be set as None.')
kp3d = None
image_array, remove_folder, frames_folder = _prepare_background(
image_array, frame_list, origin_frames, output_path, start, end,
img_format, overwrite, num_frames, read_frames_batch)
render_resolution = None
if image_array is not None:
render_resolution = (image_array.shape[1], image_array.shape[2])
elif frames_folder is not None:
frame_path_list = glob.glob(osp.join(
frames_folder, '*.jpg')) + glob.glob(
osp.join(frames_folder, '*.png')) + glob.glob(
osp.join(frames_folder, '*.jpeg'))
vid_info = vid_info_reader(frame_path_list[0])
render_resolution = (int(vid_info['height']), int(vid_info['width']))
if resolution is not None:
if render_resolution is not None:
if render_resolution != resolution:
warnings.warn(
f'Size of background: {render_resolution} !='
f' resolution: {resolution}, the output video will be '
f'resized as {resolution}')
final_resolution = resolution
elif render_resolution is None:
render_resolution = final_resolution = resolution
elif resolution is None:
if render_resolution is None:
render_resolution = final_resolution = (1024, 1024)
elif render_resolution is not None:
final_resolution = render_resolution
if isinstance(kp3d, np.ndarray):
kp3d = torch.Tensor(kp3d)
if kp3d is not None:
if mask is not None:
map_index = np.where(np.array(mask) != 0)[0]
kp3d = kp3d[map_index.tolist()]
kp3d = kp3d[start:end]
kp3d = kp3d.view(num_frames, -1, 3)
# prepare render_param_dict
render_param_dict = copy.deepcopy(RENDER_CONFIGS[render_choice.lower()])
if model_type == 'smpl':
render_param_dict.update(num_class=24)
elif model_type == 'smplx':
render_param_dict.update(num_class=27)
if render_choice not in [
'hq', 'mq', 'lq', 'silhouette', 'part_silhouette', 'depth',
'pointcloud', 'normal'
]:
raise ValueError('Please choose the right render_choice.')
# body part colorful visualization should use flat shader to be sharper.
if texture_image is None:
if isinstance(palette, str):
palette = [palette] * num_person
elif isinstance(palette, np.ndarray):
palette = torch.Tensor(palette)
palette = palette.view(-1, 3)
if palette.shape[0] != num_person:
_times = num_person // palette.shape[0]
palette = palette.repeat(_times, 1)[:num_person]
if palette.shape[0] == 1:
print(f'Same color for all the {num_person} people')
else:
print('Repeat palette for multi-person.')
else:
raise ValueError('Wrong input palette type. '
'Palette should be tensor, array or list of strs')
colors_all = _prepare_colors(palette, render_choice, num_person,
num_verts, model_type)
colors_all = colors_all.view(-1, num_person * num_verts, 3)
# verts of ParametricMeshes should be in (N, V, 3)
vertices = vertices.view(num_frames, -1, 3)
meshes = ParametricMeshes(
body_model=body_model,
verts=vertices,
N_individual_overdide=num_person,
model_type=model_type,
texture_image=texture_image,
use_nearest=bool(render_choice == 'part_silhouette'),
vertex_color=colors_all)
# write .ply or .obj files
if mesh_file_path is not None:
mmcv.mkdir_or_exist(mesh_file_path)
for person_idx in range(meshes.shape[1]):
mesh_person = meshes[:, person_idx]
if texture_image is None:
ply_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.ply'
for frame_idx in range(num_frames)
]
save_meshes_as_plys(meshes=mesh_person, files=ply_paths)
else:
obj_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.obj'
for frame_idx in range(num_frames)
]
save_meshes_as_objs(meshes=mesh_person, files=obj_paths)
vertices = meshes.verts_padded().view(num_frames, num_person, -1, 3)
# prepare camera matrixs
if Ks is not None:
projection = 'perspective'
orig_cam = None
if isinstance(Ks, np.ndarray):
Ks = torch.Tensor(Ks)
Ks = Ks.view(-1, num_person, 3, 3)
Ks = Ks[start:end]
Ks = Ks.view(-1, 3, 3)
K = K.repeat(num_frames * num_person, 1, 1)
Ks = K.inverse() @ Ks @ K
vertices = vertices.view(num_frames * num_person, -1, 3)
if T is None:
T = torch.zeros(num_frames, num_person, 1, 3)
elif isinstance(T, np.ndarray):
T = torch.Tensor(T)
T = T[start:end]
T = T.view(num_frames * num_person, 1, 3)
vertices = torch.einsum('blc,bvc->bvl', Ks, vertices + T)
R = None
T = None
vertices = vertices.view(num_frames, num_person, -1, 3)
if orig_cam is not None:
if isinstance(orig_cam, np.ndarray):
orig_cam = torch.Tensor(orig_cam)
projection = 'weakperspective'
r = render_resolution[1] / render_resolution[0]
orig_cam = orig_cam[start:end]
orig_cam = orig_cam.view(num_frames, num_person, 4)
# if num_person > 1:
sx, sy, tx, ty = torch.unbind(orig_cam, -1)
vertices[..., 0] += tx.view(num_frames, num_person, 1)
vertices[..., 1] += ty.view(num_frames, num_person, 1)
vertices[..., 0] *= sx.view(num_frames, num_person, 1)
vertices[..., 1] *= sy.view(num_frames, num_person, 1)
orig_cam = torch.tensor([1.0, 1.0, 0.0,
0.0]).view(1, 4).repeat(num_frames, 1)
K, R, T = WeakPerspectiveCameras.convert_orig_cam_to_matrix(
orig_cam=orig_cam,
znear=torch.min(vertices[..., 2] - 1),
aspect_ratio=r)
if num_person > 1:
vertices = vertices.reshape(num_frames, -1, 3)
else:
vertices = vertices.view(num_frames, -1, 3)
meshes = meshes.update_padded(new_verts_padded=vertices)
# orig_cam and K are None, use look_at_view
if K is None:
projection = 'fovperspective'
K, R, T = compute_orbit_cameras(
at=(torch.mean(vertices.view(-1, 3), 0)).detach().cpu(),
orbit_speed=orbit_speed,
batch_size=num_frames,
convention=convention)
convention = 'pytorch3d'
if isinstance(R, np.ndarray):
R = torch.Tensor(R).view(-1, 3, 3)
elif isinstance(R, torch.Tensor):
R = R.view(-1, 3, 3)
elif isinstance(R, list):
R = torch.Tensor(R).view(-1, 3, 3)
elif R is None:
pass
else:
raise ValueError(f'Wrong type of R: {type(R)}!')
if R is not None:
if len(R) > num_frames:
R = R[start:end]
if isinstance(T, np.ndarray):
T = torch.Tensor(T).view(-1, 3)
elif isinstance(T, torch.Tensor):
T = T.view(-1, 3)
elif isinstance(T, list):
T = torch.Tensor(T).view(-1, 3)
elif T is None:
pass
else:
raise ValueError(f'Wrong type of T: {type(T)}!')
if T is not None:
if len(T) > num_frames:
T = T[start:end]
if isinstance(K, np.ndarray):
K = torch.Tensor(K).view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, torch.Tensor):
K = K.view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, list):
K = torch.Tensor(K)
K = K.view(-1, K.shape[-2], K.shape[-1])
else:
raise ValueError(f'Wrong type of K: {type(K)}!')
if K is not None:
if len(K) > num_frames:
K = K[start:end]
assert projection in [
'perspective', 'weakperspective', 'orthographics', 'fovorthographics',
'fovperspective'
], f'Wrong camera projection: {projection}'
if projection in ['fovperspective', 'perspective']:
is_perspective = True
elif projection in [
'fovorthographics', 'weakperspective', 'orthographics'
]:
is_perspective = False
if projection in ['fovperspective', 'fovorthographics', 'weakperspective']:
assert in_ndc
K, R, T = convert_camera_matrix(
convention_dst='pytorch3d',
K=K,
R=R,
T=T,
is_perspective=is_perspective,
convention_src=convention,
resolution_src=render_resolution,
in_ndc_src=in_ndc,
in_ndc_dst=in_ndc)
# initialize the renderer.
renderer = SMPLRenderer(
resolution=render_resolution,
device=device,
output_path=output_path,
return_tensor=return_tensor,
alpha=alpha,
read_img_format=img_format,
render_choice=render_choice,
frames_folder=frames_folder,
plot_kps=plot_kps,
vis_kp_index=vis_kp_index,
final_resolution=final_resolution,
**render_param_dict)
cameras = build_cameras(
dict(
type=projection,
in_ndc=in_ndc,
device=device,
K=K,
R=R,
T=T,
resolution=render_resolution))
if image_array is not None:
image_array = torch.Tensor(image_array)
image_array = align_input_to_padded(
image_array, ndim=4, batch_size=num_frames, padding_mode='ones')
# prepare the render data.
render_data = dict(
images=image_array,
meshes=meshes,
cameras=cameras,
joints=joints,
joints_gt=kp3d,
)
results = render_runner.render(
renderer=renderer,
device=device,
batch_size=batch_size,
output_path=output_path,
return_tensor=return_tensor,
no_grad=no_grad,
verbose=verbose,
**render_data)
if remove_folder:
if Path(frames_folder).is_dir():
shutil.rmtree(frames_folder)
if return_tensor:
return results
else:
return None
def convert_crop_cam_to_orig_img(cam: np.ndarray,
bbox: np.ndarray,
img_width: int,
img_height: int,
aspect_ratio: float = 1.0,
bbox_scale_factor: float = 1.25,
bbox_format: Literal['xyxy', 'xywh',
'cs'] = 'xyxy'):
"""This function is modified from [VIBE](https://github.com/
mkocabas/VIBE/blob/master/lib/utils/demo_utils.py#L242-L259). Original
license please see docs/additional_licenses.md.
Args:
cam (np.ndarray): cam (ndarray, shape=(frame, 3) or
(frame,num_person, 3)):
weak perspective camera in cropped img coordinates
bbox (np.ndarray): bbox coordinates
img_width (int): original image width
img_height (int): original image height
aspect_ratio (float, optional): Defaults to 1.0.
bbox_scale_factor (float, optional): Defaults to 1.25.
bbox_format (Literal['xyxy', 'xywh', 'cs']): Defaults to 'xyxy'.
'xyxy' means the left-up point and right-bottomn point of the
bbox.
'xywh' means the left-up point and the width and height of the
bbox.
'cs' means the center of the bbox (x,y) and the scale of the
bbox w & h.
Returns:
orig_cam: shape = (frame, 4) or (frame, num_person, 4)
"""
if not isinstance(bbox, np.ndarray):
raise TypeError(
f'Input type is {type(bbox)}, which should be numpy.ndarray.')
bbox = bbox.copy()
if bbox_format == 'xyxy':
bbox_xywh = xyxy2xywh(bbox)
center, scale = box2cs(bbox_xywh, aspect_ratio, bbox_scale_factor)
bbox_cs = np.concatenate([center, scale], axis=-1)
elif bbox_format == 'xywh':
center, scale = box2cs(bbox, aspect_ratio, bbox_scale_factor)
bbox_cs = np.concatenate([center, scale], axis=-1)
elif bbox_format == 'cs':
bbox_cs = bbox
else:
raise ValueError('Only supports the format of `xyxy`, `cs` and `xywh`')
cx, cy, h = bbox_cs[..., 0], bbox_cs[..., 1], bbox_cs[..., 2] + 1e-6
hw, hh = img_width / 2., img_height / 2.
sx = cam[..., 0] * (1. / (img_width / h))
sy = cam[..., 0] * (1. / (img_height / h))
tx = ((cx - hw) / hw / (sx + 1e-6)) + cam[..., 1]
ty = ((cy - hh) / hh / (sy + 1e-6)) + cam[..., 2]
orig_cam = np.stack([sx, sy, tx, ty], axis=-1)
return orig_cam
The provided code snippet includes necessary dependencies for implementing the `visualize_smpl_vibe` function. Write a Python function `def visualize_smpl_vibe(orig_cam=None, pred_cam=None, bbox=None, output_path='sample.mp4', resolution=None, aspect_ratio=1.0, bbox_scale_factor=1.25, bbox_format='xyxy', **kwargs) -> None` to solve the following problem:
Simplest way to visualize pred smpl with origin frames and predicted cameras.
Here is the function:
def visualize_smpl_vibe(orig_cam=None,
pred_cam=None,
bbox=None,
output_path='sample.mp4',
resolution=None,
aspect_ratio=1.0,
bbox_scale_factor=1.25,
bbox_format='xyxy',
**kwargs) -> None:
"""Simplest way to visualize pred smpl with origin frames and predicted
cameras."""
assert resolution is not None
if pred_cam is not None and bbox is not None:
orig_cam = torch.Tensor(
convert_crop_cam_to_orig_img(pred_cam, bbox, resolution[1],
resolution[0], aspect_ratio,
bbox_scale_factor, bbox_format))
assert orig_cam is not None, '`orig_cam` is required.'
func = partial(
render_smpl,
projection='weakperspective',
convention='opencv',
in_ndc=True,
)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(
orig_cam=orig_cam,
output_path=output_path,
resolution=resolution,
**kwargs) | Simplest way to visualize pred smpl with origin frames and predicted cameras. |
14,310 | import copy
import glob
import os
import os.path as osp
import shutil
import warnings
from functools import partial
from pathlib import Path
from typing import List, Optional, Tuple, Union
import mmcv
import numpy as np
import torch
import torch.nn as nn
from colormap import Color
from mmhuman3d.core.cameras import (
WeakPerspectiveCameras,
compute_orbit_cameras,
)
from mmhuman3d.core.cameras.builder import build_cameras
from mmhuman3d.core.conventions.cameras.convert_convention import \
convert_camera_matrix
from mmhuman3d.core.conventions.segmentation import body_segmentation
from mmhuman3d.core.renderer.torch3d_renderer import render_runner
from mmhuman3d.core.renderer.torch3d_renderer.meshes import \
ParametricMeshes
from mmhuman3d.core.renderer.torch3d_renderer.render_smpl_config import (
RENDER_CONFIGS,
)
from mmhuman3d.core.renderer.torch3d_renderer.smpl_renderer import SMPLRenderer
from mmhuman3d.core.renderer.torch3d_renderer.utils import \
align_input_to_padded
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
convert_bbox_to_intrinsic,
convert_crop_cam_to_orig_img,
convert_kp2d_to_bbox,
get_default_hmr_intrinsic,
get_different_colors,
)
from mmhuman3d.utils.ffmpeg_utils import (
check_input_path,
images_to_array,
prepare_output_path,
vid_info_reader,
video_to_array,
video_to_images,
)
from mmhuman3d.utils.mesh_utils import save_meshes_as_objs, save_meshes_as_plys
from mmhuman3d.utils.path_utils import check_path_suffix
def render_smpl(
# smpl parameters
poses: Optional[Union[torch.Tensor, np.ndarray, dict]] = None,
betas: Optional[Union[torch.Tensor, np.ndarray]] = None,
transl: Optional[Union[torch.Tensor, np.ndarray]] = None,
verts: Optional[Union[torch.Tensor, np.ndarray]] = None,
body_model: Optional[nn.Module] = None,
body_model_config: Optional[dict] = None,
# camera parameters
R: Optional[Union[torch.Tensor, np.ndarray]] = None,
T: Optional[Union[torch.Tensor, np.ndarray]] = None,
K: Optional[Union[torch.Tensor, np.ndarray]] = None,
orig_cam: Optional[Union[torch.Tensor, np.ndarray]] = None,
Ks: Optional[Union[torch.Tensor, np.ndarray]] = None,
in_ndc: bool = True,
convention: str = 'pytorch3d',
projection: Literal['weakperspective', 'perspective', 'fovperspective',
'orthographics',
'fovorthographics'] = 'perspective',
orbit_speed: Union[float, Tuple[float, float]] = 0.0,
# render choice parameters
render_choice: Literal['lq', 'mq', 'hq', 'silhouette', 'depth',
'normal', 'pointcloud',
'part_silhouette'] = 'hq',
palette: Union[List[str], str, np.ndarray, torch.Tensor] = 'white',
texture_image: Union[torch.Tensor, np.ndarray] = None,
resolution: Optional[Union[List[int], Tuple[int, int]]] = None,
start: int = 0,
end: Optional[int] = None,
alpha: float = 1.0,
no_grad: bool = True,
batch_size: int = 10,
device: Union[torch.device, str] = 'cuda',
# file io parameters
return_tensor: bool = False,
output_path: str = None,
origin_frames: Optional[str] = None,
frame_list: Optional[List[str]] = None,
image_array: Optional[Union[np.ndarray, torch.Tensor]] = None,
img_format: str = '%06d.png',
overwrite: bool = False,
mesh_file_path: Optional[str] = None,
read_frames_batch: bool = False,
# visualize keypoints
plot_kps: bool = False,
kp3d: Optional[Union[np.ndarray, torch.Tensor]] = None,
mask: Optional[Union[np.ndarray, List[int]]] = None,
vis_kp_index: bool = False,
verbose: bool = False) -> Union[None, torch.Tensor]:
"""Render SMPL, SMPL-X or STAR mesh or silhouette into differentiable
tensors, and export video or images.
Args:
# smpl parameters:
poses (Union[torch.Tensor, np.ndarray, dict]):
1). `tensor` or `array` and ndim is 2, shape should be
(frame, 72).
2). `tensor` or `array` and ndim is 3, shape should be
(frame, num_person, 72/165). num_person equals 1 means
single-person.
Rendering predicted multi-person should feed together with
multi-person weakperspective cameras. meshes would be computed
and use an identity intrinsic matrix.
3). `dict`, standard dict format defined in smplx.body_models.
will be treated as single-person.
Lower priority than `verts`.
Defaults to None.
betas (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 2, shape should be (frame, 10).
2). ndim is 3, shape should be (frame, num_person, 10). num_person
equals 1 means single-person. If poses are multi-person, betas
should be set to the same person number.
None will use default betas.
Defaults to None.
transl (Optional[Union[torch.Tensor, np.ndarray]], optional):
translations of smpl(x).
1). ndim is 2, shape should be (frame, 3).
2). ndim is 3, shape should be (frame, num_person, 3). num_person
equals 1 means single-person. If poses are multi-person,
transl should be set to the same person number.
Defaults to None.
verts (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 3, shape should be (frame, num_verts, 3).
2). ndim is 4, shape should be (frame, num_person, num_verts, 3).
num_person equals 1 means single-person.
Higher priority over `poses` & `betas` & `transl`.
Defaults to None.
body_model (nn.Module, optional): body_model created from smplx.create.
Higher priority than `body_model_config`. If `body_model` is not
None, it will override `body_model_config`.
Should not both be None.
Defaults to None.
body_model_config (dict, optional): body_model_config for build_model.
Lower priority than `body_model`. Should not both be None.
Defaults to None.
# camera parameters:
K (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4) or (frame, 3, 3), frame could be 1.
if (4, 4) or (3, 3), dim 0 will be added automatically.
Will be default `FovPerspectiveCameras` intrinsic if None.
Lower priority than `orig_cam`.
R (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3, 3), If f equals 1, camera will have
identical rotation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `R` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
T (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3). If f equals 1, camera will have
identical translation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `T` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
orig_cam (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4) or (frame, num_person, 4). If f equals
1, will be repeated to num_frames. num_person should be 1 if single
person. Usually for HMR, VIBE predicted cameras.
Higher priority than `K` & `R` & `T`.
Defaults to None.
Ks (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4).
This is for HMR or SPIN multi-person demo.
in_ndc (bool, optional): . Defaults to True.
convention (str, optional): If want to use an existing convention,
choose in ['opengl', 'opencv', 'pytorch3d', 'pyrender', 'open3d',
'maya', 'blender', 'unity'].
If want to use a new convention, define your convention in
(CAMERA_CONVENTION_FACTORY)[mmhuman3d/core/conventions/cameras/
__init__.py] by the order of right, front and up.
Defaults to 'pytorch3d'.
projection (Literal[, optional): projection mode of camers. Choose in
['orthographics, fovperspective', 'perspective', 'weakperspective',
'fovorthographics']
Defaults to 'perspective'.
orbit_speed (float, optional): orbit speed for viewing when no `K`
provided. `float` for only azim speed and Tuple for `azim` and
`elev`.
# render choice parameters:
render_choice (Literal[, optional):
choose in ['lq', 'mq', 'hq', 'silhouette', 'depth', 'normal',
'pointcloud', 'part_silhouette'] .
`lq`, `mq`, `hq` would output (frame, h, w, 4) FloatTensor.
`lq` means low quality, `mq` means medium quality,
h`q means high quality.
`silhouette` would output (frame, h, w) soft binary FloatTensor.
`part_silhouette` would output (frame, h, w, 1) LongTensor.
Every pixel stores a class index.
`depth` will output a depth map of (frame, h, w, 1) FloatTensor
and 'normal' will output a normal map of (frame, h, w, 1).
`pointcloud` will output a (frame, h, w, 4) FloatTensor.
Defaults to 'mq'.
palette (Union[List[str], str, np.ndarray], optional):
color theme str or list of color str or `array`.
1). If use str to represent the color,
should choose in ['segmentation', 'random'] or color from
Colormap https://en.wikipedia.org/wiki/X11_color_names.
If choose 'segmentation', will get a color for each part.
2). If you have multi-person, better give a list of str or all
will be in the same color.
3). If you want to define your specific color, use an `array`
of shape (3,) for single person and (N, 3) for multiple persons.
If (3,) for multiple persons, all will be in the same color.
Your `array` should be in range [0, 255] for 8 bit color.
Defaults to 'white'.
texture_image (Union[torch.Tensor, np.ndarray], optional):
Texture image to be wrapped on the smpl mesh. If not None,
the `palette` will be ignored, and the `body_model` is required
to have `uv_param_path`.
Should pass list or tensor of shape (num_person, H, W, 3).
The color channel should be `RGB`.
Defaults to None.
resolution (Union[Iterable[int], int], optional):
1). If iterable, should be (height, width) of output images.
2). If int, would be taken as (resolution, resolution).
Defaults to (1024, 1024).
This will influence the overlay results when render with
backgrounds. The output video will be rendered following the
size of background images and finally resized to resolution.
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
None represents include all the frames.
Defaults to None.
alpha (float, optional): Transparency of the mesh.
Range in [0.0, 1.0]
Defaults to 1.0.
no_grad (bool, optional): Set to True if do not need differentiable
render.
Defaults to False.
batch_size (int, optional): Batch size for render.
Related to your gpu memory.
Defaults to 10.
# file io parameters:
return_tensor (bool, optional): Whether return the result tensors.
Defaults to False, will return None.
output_path (str, optional): output video or gif or image folder.
Defaults to None, pass export procedure.
# background frames, priority: image_array > frame_list > origin_frames
origin_frames (Optional[str], optional): origin background frame path,
could be `.mp4`, `.gif`(will be sliced into a folder) or an image
folder.
Defaults to None.
frame_list (Optional[List[str]], optional): list of origin background
frame paths, element in list each should be a image path like
`*.jpg` or `*.png`.
Use this when your file names is hard to sort or you only want to
render a small number frames.
Defaults to None.
image_array: (Optional[Union[np.ndarray, torch.Tensor]], optional):
origin background frame `tensor` or `array`, use this when you
want your frames in memory as array or tensor.
overwrite (bool, optional): whether overwriting the existing files.
Defaults to False.
mesh_file_path (bool, optional): the directory path to store the `.ply`
or '.ply' files. Will be named like 'frame_idx_person_idx.ply'.
Defaults to None.
read_frames_batch (bool, optional): Whether read frames by batch.
Set it as True if your video is large in size.
Defaults to False.
# visualize keypoints
plot_kps (bool, optional): whether plot keypoints on the output video.
Defaults to False.
kp3d (Optional[Union[np.ndarray, torch.Tensor]], optional):
the keypoints of any convention, should pass `mask` if have any
none-sense points. Shape should be (frame, )
Defaults to None.
mask (Optional[Union[np.ndarray, List[int]]], optional):
Mask of keypoints existence.
Defaults to None.
vis_kp_index (bool, optional):
Whether plot keypoint index number on human mesh.
Defaults to False.
# visualize render progress
verbose (bool, optional):
Whether print the progress bar for rendering.
Returns:
Union[None, torch.Tensor]: return the rendered image tensors or None.
"""
# initialize the device
device = torch.device(device) if isinstance(device, str) else device
if isinstance(resolution, int):
resolution = (resolution, resolution)
elif isinstance(resolution, list):
resolution = tuple(resolution)
verts, poses, betas, transl = _prepare_input_pose(verts, poses, betas,
transl)
body_model = _prepare_body_model(body_model, body_model_config)
model_type = body_model.name().replace('-', '').lower()
assert model_type in ['smpl', 'smplx', 'star']
if model_type in ['smpl', 'smplx']:
vertices, joints, num_frames, num_person = _prepare_mesh(
poses, betas, transl, verts, start, end, body_model)
elif model_type == 'star':
model_output = body_model(body_pose=poses, betas=betas, transl=transl)
vertices = model_output['vertices']
num_frames = poses.shape[0]
num_person = 1 # star temporarily only support single person
end = num_frames if end is None else end
vertices = vertices.view(num_frames, num_person, -1, 3)
num_verts = vertices.shape[-2]
if not plot_kps:
joints = None
if kp3d is not None:
warnings.warn('`plot_kps` is False, `kp3d` will be set as None.')
kp3d = None
image_array, remove_folder, frames_folder = _prepare_background(
image_array, frame_list, origin_frames, output_path, start, end,
img_format, overwrite, num_frames, read_frames_batch)
render_resolution = None
if image_array is not None:
render_resolution = (image_array.shape[1], image_array.shape[2])
elif frames_folder is not None:
frame_path_list = glob.glob(osp.join(
frames_folder, '*.jpg')) + glob.glob(
osp.join(frames_folder, '*.png')) + glob.glob(
osp.join(frames_folder, '*.jpeg'))
vid_info = vid_info_reader(frame_path_list[0])
render_resolution = (int(vid_info['height']), int(vid_info['width']))
if resolution is not None:
if render_resolution is not None:
if render_resolution != resolution:
warnings.warn(
f'Size of background: {render_resolution} !='
f' resolution: {resolution}, the output video will be '
f'resized as {resolution}')
final_resolution = resolution
elif render_resolution is None:
render_resolution = final_resolution = resolution
elif resolution is None:
if render_resolution is None:
render_resolution = final_resolution = (1024, 1024)
elif render_resolution is not None:
final_resolution = render_resolution
if isinstance(kp3d, np.ndarray):
kp3d = torch.Tensor(kp3d)
if kp3d is not None:
if mask is not None:
map_index = np.where(np.array(mask) != 0)[0]
kp3d = kp3d[map_index.tolist()]
kp3d = kp3d[start:end]
kp3d = kp3d.view(num_frames, -1, 3)
# prepare render_param_dict
render_param_dict = copy.deepcopy(RENDER_CONFIGS[render_choice.lower()])
if model_type == 'smpl':
render_param_dict.update(num_class=24)
elif model_type == 'smplx':
render_param_dict.update(num_class=27)
if render_choice not in [
'hq', 'mq', 'lq', 'silhouette', 'part_silhouette', 'depth',
'pointcloud', 'normal'
]:
raise ValueError('Please choose the right render_choice.')
# body part colorful visualization should use flat shader to be sharper.
if texture_image is None:
if isinstance(palette, str):
palette = [palette] * num_person
elif isinstance(palette, np.ndarray):
palette = torch.Tensor(palette)
palette = palette.view(-1, 3)
if palette.shape[0] != num_person:
_times = num_person // palette.shape[0]
palette = palette.repeat(_times, 1)[:num_person]
if palette.shape[0] == 1:
print(f'Same color for all the {num_person} people')
else:
print('Repeat palette for multi-person.')
else:
raise ValueError('Wrong input palette type. '
'Palette should be tensor, array or list of strs')
colors_all = _prepare_colors(palette, render_choice, num_person,
num_verts, model_type)
colors_all = colors_all.view(-1, num_person * num_verts, 3)
# verts of ParametricMeshes should be in (N, V, 3)
vertices = vertices.view(num_frames, -1, 3)
meshes = ParametricMeshes(
body_model=body_model,
verts=vertices,
N_individual_overdide=num_person,
model_type=model_type,
texture_image=texture_image,
use_nearest=bool(render_choice == 'part_silhouette'),
vertex_color=colors_all)
# write .ply or .obj files
if mesh_file_path is not None:
mmcv.mkdir_or_exist(mesh_file_path)
for person_idx in range(meshes.shape[1]):
mesh_person = meshes[:, person_idx]
if texture_image is None:
ply_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.ply'
for frame_idx in range(num_frames)
]
save_meshes_as_plys(meshes=mesh_person, files=ply_paths)
else:
obj_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.obj'
for frame_idx in range(num_frames)
]
save_meshes_as_objs(meshes=mesh_person, files=obj_paths)
vertices = meshes.verts_padded().view(num_frames, num_person, -1, 3)
# prepare camera matrixs
if Ks is not None:
projection = 'perspective'
orig_cam = None
if isinstance(Ks, np.ndarray):
Ks = torch.Tensor(Ks)
Ks = Ks.view(-1, num_person, 3, 3)
Ks = Ks[start:end]
Ks = Ks.view(-1, 3, 3)
K = K.repeat(num_frames * num_person, 1, 1)
Ks = K.inverse() @ Ks @ K
vertices = vertices.view(num_frames * num_person, -1, 3)
if T is None:
T = torch.zeros(num_frames, num_person, 1, 3)
elif isinstance(T, np.ndarray):
T = torch.Tensor(T)
T = T[start:end]
T = T.view(num_frames * num_person, 1, 3)
vertices = torch.einsum('blc,bvc->bvl', Ks, vertices + T)
R = None
T = None
vertices = vertices.view(num_frames, num_person, -1, 3)
if orig_cam is not None:
if isinstance(orig_cam, np.ndarray):
orig_cam = torch.Tensor(orig_cam)
projection = 'weakperspective'
r = render_resolution[1] / render_resolution[0]
orig_cam = orig_cam[start:end]
orig_cam = orig_cam.view(num_frames, num_person, 4)
# if num_person > 1:
sx, sy, tx, ty = torch.unbind(orig_cam, -1)
vertices[..., 0] += tx.view(num_frames, num_person, 1)
vertices[..., 1] += ty.view(num_frames, num_person, 1)
vertices[..., 0] *= sx.view(num_frames, num_person, 1)
vertices[..., 1] *= sy.view(num_frames, num_person, 1)
orig_cam = torch.tensor([1.0, 1.0, 0.0,
0.0]).view(1, 4).repeat(num_frames, 1)
K, R, T = WeakPerspectiveCameras.convert_orig_cam_to_matrix(
orig_cam=orig_cam,
znear=torch.min(vertices[..., 2] - 1),
aspect_ratio=r)
if num_person > 1:
vertices = vertices.reshape(num_frames, -1, 3)
else:
vertices = vertices.view(num_frames, -1, 3)
meshes = meshes.update_padded(new_verts_padded=vertices)
# orig_cam and K are None, use look_at_view
if K is None:
projection = 'fovperspective'
K, R, T = compute_orbit_cameras(
at=(torch.mean(vertices.view(-1, 3), 0)).detach().cpu(),
orbit_speed=orbit_speed,
batch_size=num_frames,
convention=convention)
convention = 'pytorch3d'
if isinstance(R, np.ndarray):
R = torch.Tensor(R).view(-1, 3, 3)
elif isinstance(R, torch.Tensor):
R = R.view(-1, 3, 3)
elif isinstance(R, list):
R = torch.Tensor(R).view(-1, 3, 3)
elif R is None:
pass
else:
raise ValueError(f'Wrong type of R: {type(R)}!')
if R is not None:
if len(R) > num_frames:
R = R[start:end]
if isinstance(T, np.ndarray):
T = torch.Tensor(T).view(-1, 3)
elif isinstance(T, torch.Tensor):
T = T.view(-1, 3)
elif isinstance(T, list):
T = torch.Tensor(T).view(-1, 3)
elif T is None:
pass
else:
raise ValueError(f'Wrong type of T: {type(T)}!')
if T is not None:
if len(T) > num_frames:
T = T[start:end]
if isinstance(K, np.ndarray):
K = torch.Tensor(K).view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, torch.Tensor):
K = K.view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, list):
K = torch.Tensor(K)
K = K.view(-1, K.shape[-2], K.shape[-1])
else:
raise ValueError(f'Wrong type of K: {type(K)}!')
if K is not None:
if len(K) > num_frames:
K = K[start:end]
assert projection in [
'perspective', 'weakperspective', 'orthographics', 'fovorthographics',
'fovperspective'
], f'Wrong camera projection: {projection}'
if projection in ['fovperspective', 'perspective']:
is_perspective = True
elif projection in [
'fovorthographics', 'weakperspective', 'orthographics'
]:
is_perspective = False
if projection in ['fovperspective', 'fovorthographics', 'weakperspective']:
assert in_ndc
K, R, T = convert_camera_matrix(
convention_dst='pytorch3d',
K=K,
R=R,
T=T,
is_perspective=is_perspective,
convention_src=convention,
resolution_src=render_resolution,
in_ndc_src=in_ndc,
in_ndc_dst=in_ndc)
# initialize the renderer.
renderer = SMPLRenderer(
resolution=render_resolution,
device=device,
output_path=output_path,
return_tensor=return_tensor,
alpha=alpha,
read_img_format=img_format,
render_choice=render_choice,
frames_folder=frames_folder,
plot_kps=plot_kps,
vis_kp_index=vis_kp_index,
final_resolution=final_resolution,
**render_param_dict)
cameras = build_cameras(
dict(
type=projection,
in_ndc=in_ndc,
device=device,
K=K,
R=R,
T=T,
resolution=render_resolution))
if image_array is not None:
image_array = torch.Tensor(image_array)
image_array = align_input_to_padded(
image_array, ndim=4, batch_size=num_frames, padding_mode='ones')
# prepare the render data.
render_data = dict(
images=image_array,
meshes=meshes,
cameras=cameras,
joints=joints,
joints_gt=kp3d,
)
results = render_runner.render(
renderer=renderer,
device=device,
batch_size=batch_size,
output_path=output_path,
return_tensor=return_tensor,
no_grad=no_grad,
verbose=verbose,
**render_data)
if remove_folder:
if Path(frames_folder).is_dir():
shutil.rmtree(frames_folder)
if return_tensor:
return results
else:
return None
The provided code snippet includes necessary dependencies for implementing the `visualize_T_pose` function. Write a Python function `def visualize_T_pose(num_frames, body_model_config=None, body_model=None, orbit_speed=1.0, **kwargs) -> None` to solve the following problem:
Simplest way to visualize a sequence of T pose.
Here is the function:
def visualize_T_pose(num_frames,
body_model_config=None,
body_model=None,
orbit_speed=1.0,
**kwargs) -> None:
"""Simplest way to visualize a sequence of T pose."""
assert num_frames > 0, '`num_frames` is required.'
assert body_model_config is not None or body_model is not None
model_type = body_model_config[
'type'] if body_model_config is not None else body_model.name(
).replace('-', '').lower()
if model_type == 'smpl':
poses = torch.zeros(num_frames, 72)
else:
poses = torch.zeros(num_frames, 165)
func = partial(
render_smpl,
betas=None,
transl=None,
verts=None,
convention='pytorch3d',
projection='fovperspective',
K=None,
R=None,
T=None,
origin_frames=None)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(
poses=poses,
body_model_config=body_model_config,
body_model=body_model,
orbit_speed=orbit_speed,
**kwargs) | Simplest way to visualize a sequence of T pose. |
14,311 | import copy
import glob
import os
import os.path as osp
import shutil
import warnings
from functools import partial
from pathlib import Path
from typing import List, Optional, Tuple, Union
import mmcv
import numpy as np
import torch
import torch.nn as nn
from colormap import Color
from mmhuman3d.core.cameras import (
WeakPerspectiveCameras,
compute_orbit_cameras,
)
from mmhuman3d.core.cameras.builder import build_cameras
from mmhuman3d.core.conventions.cameras.convert_convention import \
convert_camera_matrix
from mmhuman3d.core.conventions.segmentation import body_segmentation
from mmhuman3d.core.renderer.torch3d_renderer import render_runner
from mmhuman3d.core.renderer.torch3d_renderer.meshes import \
ParametricMeshes
from mmhuman3d.core.renderer.torch3d_renderer.render_smpl_config import (
RENDER_CONFIGS,
)
from mmhuman3d.core.renderer.torch3d_renderer.smpl_renderer import SMPLRenderer
from mmhuman3d.core.renderer.torch3d_renderer.utils import \
align_input_to_padded
from mmhuman3d.models.body_models.builder import build_body_model
from mmhuman3d.utils.demo_utils import (
convert_bbox_to_intrinsic,
convert_crop_cam_to_orig_img,
convert_kp2d_to_bbox,
get_default_hmr_intrinsic,
get_different_colors,
)
from mmhuman3d.utils.ffmpeg_utils import (
check_input_path,
images_to_array,
prepare_output_path,
vid_info_reader,
video_to_array,
video_to_images,
)
from mmhuman3d.utils.mesh_utils import save_meshes_as_objs, save_meshes_as_plys
from mmhuman3d.utils.path_utils import check_path_suffix
def render_smpl(
# smpl parameters
poses: Optional[Union[torch.Tensor, np.ndarray, dict]] = None,
betas: Optional[Union[torch.Tensor, np.ndarray]] = None,
transl: Optional[Union[torch.Tensor, np.ndarray]] = None,
verts: Optional[Union[torch.Tensor, np.ndarray]] = None,
body_model: Optional[nn.Module] = None,
body_model_config: Optional[dict] = None,
# camera parameters
R: Optional[Union[torch.Tensor, np.ndarray]] = None,
T: Optional[Union[torch.Tensor, np.ndarray]] = None,
K: Optional[Union[torch.Tensor, np.ndarray]] = None,
orig_cam: Optional[Union[torch.Tensor, np.ndarray]] = None,
Ks: Optional[Union[torch.Tensor, np.ndarray]] = None,
in_ndc: bool = True,
convention: str = 'pytorch3d',
projection: Literal['weakperspective', 'perspective', 'fovperspective',
'orthographics',
'fovorthographics'] = 'perspective',
orbit_speed: Union[float, Tuple[float, float]] = 0.0,
# render choice parameters
render_choice: Literal['lq', 'mq', 'hq', 'silhouette', 'depth',
'normal', 'pointcloud',
'part_silhouette'] = 'hq',
palette: Union[List[str], str, np.ndarray, torch.Tensor] = 'white',
texture_image: Union[torch.Tensor, np.ndarray] = None,
resolution: Optional[Union[List[int], Tuple[int, int]]] = None,
start: int = 0,
end: Optional[int] = None,
alpha: float = 1.0,
no_grad: bool = True,
batch_size: int = 10,
device: Union[torch.device, str] = 'cuda',
# file io parameters
return_tensor: bool = False,
output_path: str = None,
origin_frames: Optional[str] = None,
frame_list: Optional[List[str]] = None,
image_array: Optional[Union[np.ndarray, torch.Tensor]] = None,
img_format: str = '%06d.png',
overwrite: bool = False,
mesh_file_path: Optional[str] = None,
read_frames_batch: bool = False,
# visualize keypoints
plot_kps: bool = False,
kp3d: Optional[Union[np.ndarray, torch.Tensor]] = None,
mask: Optional[Union[np.ndarray, List[int]]] = None,
vis_kp_index: bool = False,
verbose: bool = False) -> Union[None, torch.Tensor]:
"""Render SMPL, SMPL-X or STAR mesh or silhouette into differentiable
tensors, and export video or images.
Args:
# smpl parameters:
poses (Union[torch.Tensor, np.ndarray, dict]):
1). `tensor` or `array` and ndim is 2, shape should be
(frame, 72).
2). `tensor` or `array` and ndim is 3, shape should be
(frame, num_person, 72/165). num_person equals 1 means
single-person.
Rendering predicted multi-person should feed together with
multi-person weakperspective cameras. meshes would be computed
and use an identity intrinsic matrix.
3). `dict`, standard dict format defined in smplx.body_models.
will be treated as single-person.
Lower priority than `verts`.
Defaults to None.
betas (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 2, shape should be (frame, 10).
2). ndim is 3, shape should be (frame, num_person, 10). num_person
equals 1 means single-person. If poses are multi-person, betas
should be set to the same person number.
None will use default betas.
Defaults to None.
transl (Optional[Union[torch.Tensor, np.ndarray]], optional):
translations of smpl(x).
1). ndim is 2, shape should be (frame, 3).
2). ndim is 3, shape should be (frame, num_person, 3). num_person
equals 1 means single-person. If poses are multi-person,
transl should be set to the same person number.
Defaults to None.
verts (Optional[Union[torch.Tensor, np.ndarray]], optional):
1). ndim is 3, shape should be (frame, num_verts, 3).
2). ndim is 4, shape should be (frame, num_person, num_verts, 3).
num_person equals 1 means single-person.
Higher priority over `poses` & `betas` & `transl`.
Defaults to None.
body_model (nn.Module, optional): body_model created from smplx.create.
Higher priority than `body_model_config`. If `body_model` is not
None, it will override `body_model_config`.
Should not both be None.
Defaults to None.
body_model_config (dict, optional): body_model_config for build_model.
Lower priority than `body_model`. Should not both be None.
Defaults to None.
# camera parameters:
K (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4) or (frame, 3, 3), frame could be 1.
if (4, 4) or (3, 3), dim 0 will be added automatically.
Will be default `FovPerspectiveCameras` intrinsic if None.
Lower priority than `orig_cam`.
R (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3, 3), If f equals 1, camera will have
identical rotation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `R` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
T (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 3). If f equals 1, camera will have
identical translation.
If `K` and `orig_cam` is None, will be generated by `look_at_view`.
If have `K` or `orig_cam` and `T` is None, will be generated by
`convert_camera_matrix`.
Defaults to None.
orig_cam (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4) or (frame, num_person, 4). If f equals
1, will be repeated to num_frames. num_person should be 1 if single
person. Usually for HMR, VIBE predicted cameras.
Higher priority than `K` & `R` & `T`.
Defaults to None.
Ks (Optional[Union[torch.Tensor, np.ndarray]], optional):
shape should be (frame, 4, 4).
This is for HMR or SPIN multi-person demo.
in_ndc (bool, optional): . Defaults to True.
convention (str, optional): If want to use an existing convention,
choose in ['opengl', 'opencv', 'pytorch3d', 'pyrender', 'open3d',
'maya', 'blender', 'unity'].
If want to use a new convention, define your convention in
(CAMERA_CONVENTION_FACTORY)[mmhuman3d/core/conventions/cameras/
__init__.py] by the order of right, front and up.
Defaults to 'pytorch3d'.
projection (Literal[, optional): projection mode of camers. Choose in
['orthographics, fovperspective', 'perspective', 'weakperspective',
'fovorthographics']
Defaults to 'perspective'.
orbit_speed (float, optional): orbit speed for viewing when no `K`
provided. `float` for only azim speed and Tuple for `azim` and
`elev`.
# render choice parameters:
render_choice (Literal[, optional):
choose in ['lq', 'mq', 'hq', 'silhouette', 'depth', 'normal',
'pointcloud', 'part_silhouette'] .
`lq`, `mq`, `hq` would output (frame, h, w, 4) FloatTensor.
`lq` means low quality, `mq` means medium quality,
h`q means high quality.
`silhouette` would output (frame, h, w) soft binary FloatTensor.
`part_silhouette` would output (frame, h, w, 1) LongTensor.
Every pixel stores a class index.
`depth` will output a depth map of (frame, h, w, 1) FloatTensor
and 'normal' will output a normal map of (frame, h, w, 1).
`pointcloud` will output a (frame, h, w, 4) FloatTensor.
Defaults to 'mq'.
palette (Union[List[str], str, np.ndarray], optional):
color theme str or list of color str or `array`.
1). If use str to represent the color,
should choose in ['segmentation', 'random'] or color from
Colormap https://en.wikipedia.org/wiki/X11_color_names.
If choose 'segmentation', will get a color for each part.
2). If you have multi-person, better give a list of str or all
will be in the same color.
3). If you want to define your specific color, use an `array`
of shape (3,) for single person and (N, 3) for multiple persons.
If (3,) for multiple persons, all will be in the same color.
Your `array` should be in range [0, 255] for 8 bit color.
Defaults to 'white'.
texture_image (Union[torch.Tensor, np.ndarray], optional):
Texture image to be wrapped on the smpl mesh. If not None,
the `palette` will be ignored, and the `body_model` is required
to have `uv_param_path`.
Should pass list or tensor of shape (num_person, H, W, 3).
The color channel should be `RGB`.
Defaults to None.
resolution (Union[Iterable[int], int], optional):
1). If iterable, should be (height, width) of output images.
2). If int, would be taken as (resolution, resolution).
Defaults to (1024, 1024).
This will influence the overlay results when render with
backgrounds. The output video will be rendered following the
size of background images and finally resized to resolution.
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
None represents include all the frames.
Defaults to None.
alpha (float, optional): Transparency of the mesh.
Range in [0.0, 1.0]
Defaults to 1.0.
no_grad (bool, optional): Set to True if do not need differentiable
render.
Defaults to False.
batch_size (int, optional): Batch size for render.
Related to your gpu memory.
Defaults to 10.
# file io parameters:
return_tensor (bool, optional): Whether return the result tensors.
Defaults to False, will return None.
output_path (str, optional): output video or gif or image folder.
Defaults to None, pass export procedure.
# background frames, priority: image_array > frame_list > origin_frames
origin_frames (Optional[str], optional): origin background frame path,
could be `.mp4`, `.gif`(will be sliced into a folder) or an image
folder.
Defaults to None.
frame_list (Optional[List[str]], optional): list of origin background
frame paths, element in list each should be a image path like
`*.jpg` or `*.png`.
Use this when your file names is hard to sort or you only want to
render a small number frames.
Defaults to None.
image_array: (Optional[Union[np.ndarray, torch.Tensor]], optional):
origin background frame `tensor` or `array`, use this when you
want your frames in memory as array or tensor.
overwrite (bool, optional): whether overwriting the existing files.
Defaults to False.
mesh_file_path (bool, optional): the directory path to store the `.ply`
or '.ply' files. Will be named like 'frame_idx_person_idx.ply'.
Defaults to None.
read_frames_batch (bool, optional): Whether read frames by batch.
Set it as True if your video is large in size.
Defaults to False.
# visualize keypoints
plot_kps (bool, optional): whether plot keypoints on the output video.
Defaults to False.
kp3d (Optional[Union[np.ndarray, torch.Tensor]], optional):
the keypoints of any convention, should pass `mask` if have any
none-sense points. Shape should be (frame, )
Defaults to None.
mask (Optional[Union[np.ndarray, List[int]]], optional):
Mask of keypoints existence.
Defaults to None.
vis_kp_index (bool, optional):
Whether plot keypoint index number on human mesh.
Defaults to False.
# visualize render progress
verbose (bool, optional):
Whether print the progress bar for rendering.
Returns:
Union[None, torch.Tensor]: return the rendered image tensors or None.
"""
# initialize the device
device = torch.device(device) if isinstance(device, str) else device
if isinstance(resolution, int):
resolution = (resolution, resolution)
elif isinstance(resolution, list):
resolution = tuple(resolution)
verts, poses, betas, transl = _prepare_input_pose(verts, poses, betas,
transl)
body_model = _prepare_body_model(body_model, body_model_config)
model_type = body_model.name().replace('-', '').lower()
assert model_type in ['smpl', 'smplx', 'star']
if model_type in ['smpl', 'smplx']:
vertices, joints, num_frames, num_person = _prepare_mesh(
poses, betas, transl, verts, start, end, body_model)
elif model_type == 'star':
model_output = body_model(body_pose=poses, betas=betas, transl=transl)
vertices = model_output['vertices']
num_frames = poses.shape[0]
num_person = 1 # star temporarily only support single person
end = num_frames if end is None else end
vertices = vertices.view(num_frames, num_person, -1, 3)
num_verts = vertices.shape[-2]
if not plot_kps:
joints = None
if kp3d is not None:
warnings.warn('`plot_kps` is False, `kp3d` will be set as None.')
kp3d = None
image_array, remove_folder, frames_folder = _prepare_background(
image_array, frame_list, origin_frames, output_path, start, end,
img_format, overwrite, num_frames, read_frames_batch)
render_resolution = None
if image_array is not None:
render_resolution = (image_array.shape[1], image_array.shape[2])
elif frames_folder is not None:
frame_path_list = glob.glob(osp.join(
frames_folder, '*.jpg')) + glob.glob(
osp.join(frames_folder, '*.png')) + glob.glob(
osp.join(frames_folder, '*.jpeg'))
vid_info = vid_info_reader(frame_path_list[0])
render_resolution = (int(vid_info['height']), int(vid_info['width']))
if resolution is not None:
if render_resolution is not None:
if render_resolution != resolution:
warnings.warn(
f'Size of background: {render_resolution} !='
f' resolution: {resolution}, the output video will be '
f'resized as {resolution}')
final_resolution = resolution
elif render_resolution is None:
render_resolution = final_resolution = resolution
elif resolution is None:
if render_resolution is None:
render_resolution = final_resolution = (1024, 1024)
elif render_resolution is not None:
final_resolution = render_resolution
if isinstance(kp3d, np.ndarray):
kp3d = torch.Tensor(kp3d)
if kp3d is not None:
if mask is not None:
map_index = np.where(np.array(mask) != 0)[0]
kp3d = kp3d[map_index.tolist()]
kp3d = kp3d[start:end]
kp3d = kp3d.view(num_frames, -1, 3)
# prepare render_param_dict
render_param_dict = copy.deepcopy(RENDER_CONFIGS[render_choice.lower()])
if model_type == 'smpl':
render_param_dict.update(num_class=24)
elif model_type == 'smplx':
render_param_dict.update(num_class=27)
if render_choice not in [
'hq', 'mq', 'lq', 'silhouette', 'part_silhouette', 'depth',
'pointcloud', 'normal'
]:
raise ValueError('Please choose the right render_choice.')
# body part colorful visualization should use flat shader to be sharper.
if texture_image is None:
if isinstance(palette, str):
palette = [palette] * num_person
elif isinstance(palette, np.ndarray):
palette = torch.Tensor(palette)
palette = palette.view(-1, 3)
if palette.shape[0] != num_person:
_times = num_person // palette.shape[0]
palette = palette.repeat(_times, 1)[:num_person]
if palette.shape[0] == 1:
print(f'Same color for all the {num_person} people')
else:
print('Repeat palette for multi-person.')
else:
raise ValueError('Wrong input palette type. '
'Palette should be tensor, array or list of strs')
colors_all = _prepare_colors(palette, render_choice, num_person,
num_verts, model_type)
colors_all = colors_all.view(-1, num_person * num_verts, 3)
# verts of ParametricMeshes should be in (N, V, 3)
vertices = vertices.view(num_frames, -1, 3)
meshes = ParametricMeshes(
body_model=body_model,
verts=vertices,
N_individual_overdide=num_person,
model_type=model_type,
texture_image=texture_image,
use_nearest=bool(render_choice == 'part_silhouette'),
vertex_color=colors_all)
# write .ply or .obj files
if mesh_file_path is not None:
mmcv.mkdir_or_exist(mesh_file_path)
for person_idx in range(meshes.shape[1]):
mesh_person = meshes[:, person_idx]
if texture_image is None:
ply_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.ply'
for frame_idx in range(num_frames)
]
save_meshes_as_plys(meshes=mesh_person, files=ply_paths)
else:
obj_paths = [
f'{mesh_file_path}/frame{frame_idx}_'
f'person{person_idx}.obj'
for frame_idx in range(num_frames)
]
save_meshes_as_objs(meshes=mesh_person, files=obj_paths)
vertices = meshes.verts_padded().view(num_frames, num_person, -1, 3)
# prepare camera matrixs
if Ks is not None:
projection = 'perspective'
orig_cam = None
if isinstance(Ks, np.ndarray):
Ks = torch.Tensor(Ks)
Ks = Ks.view(-1, num_person, 3, 3)
Ks = Ks[start:end]
Ks = Ks.view(-1, 3, 3)
K = K.repeat(num_frames * num_person, 1, 1)
Ks = K.inverse() @ Ks @ K
vertices = vertices.view(num_frames * num_person, -1, 3)
if T is None:
T = torch.zeros(num_frames, num_person, 1, 3)
elif isinstance(T, np.ndarray):
T = torch.Tensor(T)
T = T[start:end]
T = T.view(num_frames * num_person, 1, 3)
vertices = torch.einsum('blc,bvc->bvl', Ks, vertices + T)
R = None
T = None
vertices = vertices.view(num_frames, num_person, -1, 3)
if orig_cam is not None:
if isinstance(orig_cam, np.ndarray):
orig_cam = torch.Tensor(orig_cam)
projection = 'weakperspective'
r = render_resolution[1] / render_resolution[0]
orig_cam = orig_cam[start:end]
orig_cam = orig_cam.view(num_frames, num_person, 4)
# if num_person > 1:
sx, sy, tx, ty = torch.unbind(orig_cam, -1)
vertices[..., 0] += tx.view(num_frames, num_person, 1)
vertices[..., 1] += ty.view(num_frames, num_person, 1)
vertices[..., 0] *= sx.view(num_frames, num_person, 1)
vertices[..., 1] *= sy.view(num_frames, num_person, 1)
orig_cam = torch.tensor([1.0, 1.0, 0.0,
0.0]).view(1, 4).repeat(num_frames, 1)
K, R, T = WeakPerspectiveCameras.convert_orig_cam_to_matrix(
orig_cam=orig_cam,
znear=torch.min(vertices[..., 2] - 1),
aspect_ratio=r)
if num_person > 1:
vertices = vertices.reshape(num_frames, -1, 3)
else:
vertices = vertices.view(num_frames, -1, 3)
meshes = meshes.update_padded(new_verts_padded=vertices)
# orig_cam and K are None, use look_at_view
if K is None:
projection = 'fovperspective'
K, R, T = compute_orbit_cameras(
at=(torch.mean(vertices.view(-1, 3), 0)).detach().cpu(),
orbit_speed=orbit_speed,
batch_size=num_frames,
convention=convention)
convention = 'pytorch3d'
if isinstance(R, np.ndarray):
R = torch.Tensor(R).view(-1, 3, 3)
elif isinstance(R, torch.Tensor):
R = R.view(-1, 3, 3)
elif isinstance(R, list):
R = torch.Tensor(R).view(-1, 3, 3)
elif R is None:
pass
else:
raise ValueError(f'Wrong type of R: {type(R)}!')
if R is not None:
if len(R) > num_frames:
R = R[start:end]
if isinstance(T, np.ndarray):
T = torch.Tensor(T).view(-1, 3)
elif isinstance(T, torch.Tensor):
T = T.view(-1, 3)
elif isinstance(T, list):
T = torch.Tensor(T).view(-1, 3)
elif T is None:
pass
else:
raise ValueError(f'Wrong type of T: {type(T)}!')
if T is not None:
if len(T) > num_frames:
T = T[start:end]
if isinstance(K, np.ndarray):
K = torch.Tensor(K).view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, torch.Tensor):
K = K.view(-1, K.shape[-2], K.shape[-1])
elif isinstance(K, list):
K = torch.Tensor(K)
K = K.view(-1, K.shape[-2], K.shape[-1])
else:
raise ValueError(f'Wrong type of K: {type(K)}!')
if K is not None:
if len(K) > num_frames:
K = K[start:end]
assert projection in [
'perspective', 'weakperspective', 'orthographics', 'fovorthographics',
'fovperspective'
], f'Wrong camera projection: {projection}'
if projection in ['fovperspective', 'perspective']:
is_perspective = True
elif projection in [
'fovorthographics', 'weakperspective', 'orthographics'
]:
is_perspective = False
if projection in ['fovperspective', 'fovorthographics', 'weakperspective']:
assert in_ndc
K, R, T = convert_camera_matrix(
convention_dst='pytorch3d',
K=K,
R=R,
T=T,
is_perspective=is_perspective,
convention_src=convention,
resolution_src=render_resolution,
in_ndc_src=in_ndc,
in_ndc_dst=in_ndc)
# initialize the renderer.
renderer = SMPLRenderer(
resolution=render_resolution,
device=device,
output_path=output_path,
return_tensor=return_tensor,
alpha=alpha,
read_img_format=img_format,
render_choice=render_choice,
frames_folder=frames_folder,
plot_kps=plot_kps,
vis_kp_index=vis_kp_index,
final_resolution=final_resolution,
**render_param_dict)
cameras = build_cameras(
dict(
type=projection,
in_ndc=in_ndc,
device=device,
K=K,
R=R,
T=T,
resolution=render_resolution))
if image_array is not None:
image_array = torch.Tensor(image_array)
image_array = align_input_to_padded(
image_array, ndim=4, batch_size=num_frames, padding_mode='ones')
# prepare the render data.
render_data = dict(
images=image_array,
meshes=meshes,
cameras=cameras,
joints=joints,
joints_gt=kp3d,
)
results = render_runner.render(
renderer=renderer,
device=device,
batch_size=batch_size,
output_path=output_path,
return_tensor=return_tensor,
no_grad=no_grad,
verbose=verbose,
**render_data)
if remove_folder:
if Path(frames_folder).is_dir():
shutil.rmtree(frames_folder)
if return_tensor:
return results
else:
return None
The provided code snippet includes necessary dependencies for implementing the `visualize_smpl_pose` function. Write a Python function `def visualize_smpl_pose(poses=None, verts=None, **kwargs) -> None` to solve the following problem:
Simplest way to visualize a sequence of smpl pose. Cameras will focus on the center of smpl mesh. `orbit speed` is recommended.
Here is the function:
def visualize_smpl_pose(poses=None, verts=None, **kwargs) -> None:
"""Simplest way to visualize a sequence of smpl pose.
Cameras will focus on the center of smpl mesh. `orbit speed` is
recommended.
"""
assert (poses
is not None) or (verts
is not None), 'Pass either `poses` or `verts`.'
func = partial(
render_smpl,
convention='opencv',
projection='fovperspective',
K=None,
R=None,
T=None,
in_ndc=True,
origin_frames=None,
frame_list=None,
image_array=None)
for k in func.keywords.keys():
if k in kwargs:
kwargs.pop(k)
return func(poses=poses, verts=verts, **kwargs) | Simplest way to visualize a sequence of smpl pose. Cameras will focus on the center of smpl mesh. `orbit speed` is recommended. |
14,312 | import json
import os
from mmhuman3d.core.cameras.camera_parameters import CameraParameter
from mmhuman3d.core.renderer.vedo_render import VedoRenderer
from mmhuman3d.utils.path_utils import check_path_suffix
class CameraParameter:
logger = None
SUPPORTED_KEYS = _CAMERA_PARAMETER_SUPPORTED_KEYS_
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
_, H = self.validate_item('H', H)
self.parameters_dict['H'] = H
_, W = self.validate_item('W', W)
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self) -> None:
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self) -> np.ndarray:
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_KRT(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def get_KRT(self, k_dim=3) -> List[np.ndarray]:
"""Get intrinsic and extrinsic of a camera.
Args:
k_dim (int, optional):
Dimension of the returned mat K.
Defaults to 3.
Raises:
ValueError: k_dim is neither 3 nor 4.
Returns:
List[np.ndarray]:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
"""
K_3x3 = self.get_mat_np('in_mat')
R_mat = self.get_mat_np('rotation_mat')
T_vec = np.asarray(self.get_value('translation'))
if k_dim == 3:
return [K_3x3, R_mat, T_vec]
elif k_dim == 4:
K_3x3 = np.expand_dims(K_3x3, 0) # shape (1, 3, 3)
K_4x4 = convert_K_3x3_to_4x4(
K=K_3x3, is_perspective=True) # shape (1, 4, 4)
K_4x4 = K_4x4[0, :, :]
return [K_4x4, R_mat, T_vec]
else:
raise ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
TypeError:
mat_numpy is not an np.ndarray.
"""
if not isinstance(mat_numpy, np.ndarray):
raise TypeError
self.set_mat_list(mat_key, mat_numpy.tolist())
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
"""
_, mat_list = self.validate_item(mat_key, mat_list)
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
"""
_, value = self.validate_item(key, value)
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> np.ndarray:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
ndarray:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_kinect_from_smc(self, smc_reader, kinect_id: int) -> None:
"""Load name and parameters of a kinect from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
kinect_id (int):
Id of the target kinect.
"""
name = kinect_id
extrinsics_dict = \
smc_reader.get_kinect_color_extrinsics(
kinect_id, homogeneous=False
)
rot_np = extrinsics_dict['R']
trans_np = extrinsics_dict['T']
intrinsics_np = \
smc_reader.get_kinect_color_intrinsics(
kinect_id
)
resolution = \
smc_reader.get_kinect_color_resolution(
kinect_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_iphone_from_smc(self,
smc_reader,
iphone_id: int = 0,
frame_id: int = 0) -> None:
"""Load name and parameters of an iPhone from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
iphone_id (int):
Id of the target iphone.
Defaults to 0.
frame_id (int):
Frame ID of one selected frame.
It only influences the intrinsics.
Defaults to 0.
"""
name = f'iPhone_{iphone_id}'
extrinsics_mat = \
smc_reader.get_iphone_extrinsics(
iphone_id, homogeneous=True
)
rot_np = extrinsics_mat[:3, :3]
trans_np = extrinsics_mat[:3, 3]
intrinsics_np = \
smc_reader.get_iphone_intrinsics(
iphone_id, frame_id
)
resolution = \
smc_reader.get_iphone_color_resolution(
iphone_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_from_perspective_cameras(cls,
cam,
name: str,
resolution: Union[List, Tuple] = None):
"""Load parameters from a PerspectiveCameras and return a
CameraParameter.
Args:
cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras):
An instance.
name (str):
Name of this camera.
"""
assert isinstance(cam, PerspectiveCameras
), 'Wrong input, support PerspectiveCameras only!'
if len(cam) > 1:
warnings.warn('Will only use the first camera in the batch.')
cam = cam[0]
resolution = resolution if resolution is not None else cam.resolution[
0].tolist()
height, width = int(resolution[0]), int(resolution[1])
cam_param = CameraParameter()
cam_param.__init__(H=height, W=width, name=name)
k_4x4 = cam.K # shape (1, 4, 4)
r_3x3 = cam.R # shape (1, 3, 3)
t_3 = cam.T # shape (1, 3)
is_perspective = cam.is_perspective()
in_ndc = cam.in_ndc()
k_4x4, r_3x3, t_3 = convert_camera_matrix(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
in_ndc_dst=False,
in_ndc_src=in_ndc,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(k_4x4, is_perspective=is_perspective)
k_3x3 = k_3x3.numpy()[0]
r_3x3 = r_3x3.numpy()[0]
t_3 = t_3.numpy()[0]
cam_param.name = name
cam_param.set_mat_np('in_mat', k_3x3)
cam_param.set_mat_np('rotation_mat', r_3x3)
cam_param.set_value('translation', t_3.tolist())
cam_param.parameters_dict.update(H=height)
cam_param.parameters_dict.update(W=width)
return cam_param
def export_to_perspective_cameras(self) -> PerspectiveCameras:
"""Export to a opencv defined screen space PerspectiveCameras.
Returns:
Same defined PerspectiveCameras of batch_size 1.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4, rotation, translation = self.get_KRT(k_dim=4)
k_4x4 = np.expand_dims(k_4x4, 0) # shape (1, 3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K = torch.from_numpy(k_4x4)
new_R = torch.from_numpy(rotation)
new_T = torch.from_numpy(translation)
cam = build_cameras(
dict(
type='PerspectiveCameras',
K=new_K.float(),
R=new_R.float(),
T=new_T.float(),
convention='opencv',
in_ndc=False,
resolution=(height, width)))
return cam
def validate_item(self, key: Any, val: Any) -> List:
"""Check whether the key and its value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
TypeError:
Value's type doesn't match definition.
Returns:
key (Any): The input key.
val (Any): The value casted into correct format.
"""
self.__check_key__(key)
formatted_val = self.__validate_value_type__(key, val)
return key, formatted_val
def __check_key__(self, key: Any) -> None:
"""Check whether the key matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
"""
if key not in self.__class__.SUPPORTED_KEYS:
err_msg = 'Key check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
raise KeyError(err_msg)
def __validate_value_type__(self, key: Any, val: Any) -> Any:
"""Check whether the type of value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
TypeError:
Value is supported but doesn't match definition.
Returns:
val (Any): The value casted into correct format.
"""
np_type_mapping = {int: np.integer, float: np.floating}
supported_keys = self.__class__.SUPPORTED_KEYS
validation_result = _TypeValidation.FAIL
ret_val = None
if supported_keys[key]['type'] == int or\
supported_keys[key]['type'] == float:
type_str = str(type(val))
class_name = type_str.split('\'')[1]
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
elif class_name.startswith('numpy'):
# a value is required, not array
if np.issubdtype(
type(val),
np_type_mapping[supported_keys[key]['type']]):
validation_result = _TypeValidation.MATCH
ret_val = val.astype(supported_keys[key]['type'])
elif np.issubdtype(type(val), np.ndarray):
validation_result = _TypeValidation.ARRAY
elif class_name.startswith('torch'):
# only one element tensors
# can be converted to Python scalars
if len(val.size()) == 0:
val_item = val.item()
if type(val_item) == supported_keys[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val_item
else:
validation_result = _TypeValidation.ARRAY
else:
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
if validation_result != _TypeValidation.MATCH:
err_msg = 'Type check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
if validation_result == _TypeValidation.ARRAY:
err_msg += 'A single value is expected, ' +\
'neither an array nor a slice.\n'
raise TypeError(err_msg)
return ret_val
class VedoRenderer(object):
"""An interactive renderer for camera visualization."""
def __init__(self, scale=0.03):
"""Visualize cameras in an interactive scene supported by vedo.
Args:
scale (float, optional):
Scale factor. Defaults to 0.03.
"""
self.scale = scale
self.axis_list = self.__init_axis()
self.camera_list = []
self.frames_dir_path = ''
self.y_reverse = False
def __init_axis(self, axis_len=80):
"""Prepare arrows for axis.
Args:
axis_len (int, optional):
Length of each axis.
Defaults to 80.
Returns:
List[Arrows]:
A list of three arrows.
"""
arrow_end_np = np.eye(3) * axis_len * self.scale
colors = ['r', 'g', 'b'] # r-x, g-y, b-z
ret_list = []
for axis_index in range(3):
ret_list.append(
vedo.Arrows([[0, 0, 0]],
[arrow_end_np[axis_index]]).c(colors[axis_index]))
return ret_list
def set_y_reverse(self):
"""Set y reverse before add_camera if it is needed.
Vedo defines y+ as up direction. When visualizing kinect cameras, y- is
up, call set_y_reverse in this situation to make text in correct
direction.
"""
self.y_reverse = True
self.y_reverse_rotation = \
scipy_Rotation.from_euler('z', 180, degrees=True)
def add_camera(self, camera_parameter, arrow_len=30):
"""Add an camera to the scene.
Args:
camera_parameter (CameraParameter):
An instance of class CameraParameter which stores
rotation, translation and name of a camera.
arrow_len (int, optional):
Length of the arrow. Defaults to 30.
Returns:
list:
A list of vedo items related to the input camera.
"""
rot_mat = np.asarray(camera_parameter.get_value('rotation_mat'))
translation = np.asarray(camera_parameter.get_value('translation'))
cam_center = -np.linalg.inv(rot_mat).dot(translation)
arrow_end_origin = np.eye(3) * arrow_len * self.scale
colors = ['r', 'g', 'b'] # r-x, g-y, b-z
arrow_end_camera = \
np.einsum('ij,kj->ki', np.linalg.inv(rot_mat), arrow_end_origin)
if self.y_reverse:
cam_center = self.y_reverse_rotation.apply(cam_center)
for axis_index in range(3):
arrow_end_camera[axis_index, :] = \
self.y_reverse_rotation.apply(
arrow_end_camera[axis_index, :]
)
vedo_list = []
for i in range(3):
vedo_list.append(
vedo.Arrows([cam_center],
[cam_center + arrow_end_camera[i]]).c(colors[i]))
vedo_list.append(
vedo.Text3D(camera_parameter.name, cam_center, s=self.scale * 10))
self.camera_list += vedo_list
return vedo_list
def show(self, with_axis=True, interactive=True):
"""Show cameras as well as axis arrow by vedo.show()
Args:
with_axis (bool, optional):
Whether to show the axis arrow. Defaults to True.
interactive (bool, optional):
Pause and interact with window (True) or
continue execution (False).
Defaults to True.
"""
list_to_show = []
list_to_show += self.camera_list
if with_axis:
list_to_show += self.axis_list
vedo.show(*list_to_show, interactive=interactive, axes=1)
vedo.clear()
The provided code snippet includes necessary dependencies for implementing the `visualize_chessboard_kinects_rgb` function. Write a Python function `def visualize_chessboard_kinects_rgb(chessboard_path: str, interactive: bool = True, show: bool = True)` to solve the following problem:
Visualize all the RGB cameras in a chessboard file. Args: chessboard_path (str): Path to the chessboard file. interactive (bool, optional): Pause and interact with window (True) or continue execution (False). Defaults to True. show (bool, optional): Whether to show in a window. Defaults to True.
Here is the function:
def visualize_chessboard_kinects_rgb(chessboard_path: str,
interactive: bool = True,
show: bool = True):
"""Visualize all the RGB cameras in a chessboard file.
Args:
chessboard_path (str):
Path to the chessboard file.
interactive (bool, optional):
Pause and interact with window (True) or
continue execution (False).
Defaults to True.
show (bool, optional):
Whether to show in a window.
Defaults to True.
"""
# Load camera parameter from a json file
camera_para_json_dict = json.load(open(chessboard_path))
camera_para_dict = {}
for camera_id in camera_para_json_dict.keys():
try:
camera_id_int = int(camera_id)
# if camera_id is an instance of int
# and it can be divided by 2, it's an rgb camera
if camera_id_int % 2 == 0:
pass
else:
continue
except ValueError:
continue
temp_camera_parameter = CameraParameter(name=camera_id)
temp_camera_parameter.load_from_chessboard(
camera_para_json_dict[camera_id], camera_id)
camera_para_dict[camera_id] = temp_camera_parameter
camera_vedo_renderer = VedoRenderer()
camera_vedo_renderer.set_y_reverse()
for camera_id in camera_para_dict.keys():
camera_vedo_renderer.add_camera(camera_para_dict[camera_id])
if show:
camera_vedo_renderer.show(with_axis=False, interactive=interactive) | Visualize all the RGB cameras in a chessboard file. Args: chessboard_path (str): Path to the chessboard file. interactive (bool, optional): Pause and interact with window (True) or continue execution (False). Defaults to True. show (bool, optional): Whether to show in a window. Defaults to True. |
14,313 | import json
import os
from mmhuman3d.core.cameras.camera_parameters import CameraParameter
from mmhuman3d.core.renderer.vedo_render import VedoRenderer
from mmhuman3d.utils.path_utils import check_path_suffix
class CameraParameter:
logger = None
SUPPORTED_KEYS = _CAMERA_PARAMETER_SUPPORTED_KEYS_
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
_, H = self.validate_item('H', H)
self.parameters_dict['H'] = H
_, W = self.validate_item('W', W)
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self) -> None:
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self) -> np.ndarray:
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_KRT(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def get_KRT(self, k_dim=3) -> List[np.ndarray]:
"""Get intrinsic and extrinsic of a camera.
Args:
k_dim (int, optional):
Dimension of the returned mat K.
Defaults to 3.
Raises:
ValueError: k_dim is neither 3 nor 4.
Returns:
List[np.ndarray]:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
"""
K_3x3 = self.get_mat_np('in_mat')
R_mat = self.get_mat_np('rotation_mat')
T_vec = np.asarray(self.get_value('translation'))
if k_dim == 3:
return [K_3x3, R_mat, T_vec]
elif k_dim == 4:
K_3x3 = np.expand_dims(K_3x3, 0) # shape (1, 3, 3)
K_4x4 = convert_K_3x3_to_4x4(
K=K_3x3, is_perspective=True) # shape (1, 4, 4)
K_4x4 = K_4x4[0, :, :]
return [K_4x4, R_mat, T_vec]
else:
raise ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
TypeError:
mat_numpy is not an np.ndarray.
"""
if not isinstance(mat_numpy, np.ndarray):
raise TypeError
self.set_mat_list(mat_key, mat_numpy.tolist())
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
"""
_, mat_list = self.validate_item(mat_key, mat_list)
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
"""
_, value = self.validate_item(key, value)
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> np.ndarray:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
ndarray:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_kinect_from_smc(self, smc_reader, kinect_id: int) -> None:
"""Load name and parameters of a kinect from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
kinect_id (int):
Id of the target kinect.
"""
name = kinect_id
extrinsics_dict = \
smc_reader.get_kinect_color_extrinsics(
kinect_id, homogeneous=False
)
rot_np = extrinsics_dict['R']
trans_np = extrinsics_dict['T']
intrinsics_np = \
smc_reader.get_kinect_color_intrinsics(
kinect_id
)
resolution = \
smc_reader.get_kinect_color_resolution(
kinect_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_iphone_from_smc(self,
smc_reader,
iphone_id: int = 0,
frame_id: int = 0) -> None:
"""Load name and parameters of an iPhone from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
iphone_id (int):
Id of the target iphone.
Defaults to 0.
frame_id (int):
Frame ID of one selected frame.
It only influences the intrinsics.
Defaults to 0.
"""
name = f'iPhone_{iphone_id}'
extrinsics_mat = \
smc_reader.get_iphone_extrinsics(
iphone_id, homogeneous=True
)
rot_np = extrinsics_mat[:3, :3]
trans_np = extrinsics_mat[:3, 3]
intrinsics_np = \
smc_reader.get_iphone_intrinsics(
iphone_id, frame_id
)
resolution = \
smc_reader.get_iphone_color_resolution(
iphone_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_from_perspective_cameras(cls,
cam,
name: str,
resolution: Union[List, Tuple] = None):
"""Load parameters from a PerspectiveCameras and return a
CameraParameter.
Args:
cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras):
An instance.
name (str):
Name of this camera.
"""
assert isinstance(cam, PerspectiveCameras
), 'Wrong input, support PerspectiveCameras only!'
if len(cam) > 1:
warnings.warn('Will only use the first camera in the batch.')
cam = cam[0]
resolution = resolution if resolution is not None else cam.resolution[
0].tolist()
height, width = int(resolution[0]), int(resolution[1])
cam_param = CameraParameter()
cam_param.__init__(H=height, W=width, name=name)
k_4x4 = cam.K # shape (1, 4, 4)
r_3x3 = cam.R # shape (1, 3, 3)
t_3 = cam.T # shape (1, 3)
is_perspective = cam.is_perspective()
in_ndc = cam.in_ndc()
k_4x4, r_3x3, t_3 = convert_camera_matrix(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
in_ndc_dst=False,
in_ndc_src=in_ndc,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(k_4x4, is_perspective=is_perspective)
k_3x3 = k_3x3.numpy()[0]
r_3x3 = r_3x3.numpy()[0]
t_3 = t_3.numpy()[0]
cam_param.name = name
cam_param.set_mat_np('in_mat', k_3x3)
cam_param.set_mat_np('rotation_mat', r_3x3)
cam_param.set_value('translation', t_3.tolist())
cam_param.parameters_dict.update(H=height)
cam_param.parameters_dict.update(W=width)
return cam_param
def export_to_perspective_cameras(self) -> PerspectiveCameras:
"""Export to a opencv defined screen space PerspectiveCameras.
Returns:
Same defined PerspectiveCameras of batch_size 1.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4, rotation, translation = self.get_KRT(k_dim=4)
k_4x4 = np.expand_dims(k_4x4, 0) # shape (1, 3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K = torch.from_numpy(k_4x4)
new_R = torch.from_numpy(rotation)
new_T = torch.from_numpy(translation)
cam = build_cameras(
dict(
type='PerspectiveCameras',
K=new_K.float(),
R=new_R.float(),
T=new_T.float(),
convention='opencv',
in_ndc=False,
resolution=(height, width)))
return cam
def validate_item(self, key: Any, val: Any) -> List:
"""Check whether the key and its value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
TypeError:
Value's type doesn't match definition.
Returns:
key (Any): The input key.
val (Any): The value casted into correct format.
"""
self.__check_key__(key)
formatted_val = self.__validate_value_type__(key, val)
return key, formatted_val
def __check_key__(self, key: Any) -> None:
"""Check whether the key matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
"""
if key not in self.__class__.SUPPORTED_KEYS:
err_msg = 'Key check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
raise KeyError(err_msg)
def __validate_value_type__(self, key: Any, val: Any) -> Any:
"""Check whether the type of value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
TypeError:
Value is supported but doesn't match definition.
Returns:
val (Any): The value casted into correct format.
"""
np_type_mapping = {int: np.integer, float: np.floating}
supported_keys = self.__class__.SUPPORTED_KEYS
validation_result = _TypeValidation.FAIL
ret_val = None
if supported_keys[key]['type'] == int or\
supported_keys[key]['type'] == float:
type_str = str(type(val))
class_name = type_str.split('\'')[1]
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
elif class_name.startswith('numpy'):
# a value is required, not array
if np.issubdtype(
type(val),
np_type_mapping[supported_keys[key]['type']]):
validation_result = _TypeValidation.MATCH
ret_val = val.astype(supported_keys[key]['type'])
elif np.issubdtype(type(val), np.ndarray):
validation_result = _TypeValidation.ARRAY
elif class_name.startswith('torch'):
# only one element tensors
# can be converted to Python scalars
if len(val.size()) == 0:
val_item = val.item()
if type(val_item) == supported_keys[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val_item
else:
validation_result = _TypeValidation.ARRAY
else:
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
if validation_result != _TypeValidation.MATCH:
err_msg = 'Type check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
if validation_result == _TypeValidation.ARRAY:
err_msg += 'A single value is expected, ' +\
'neither an array nor a slice.\n'
raise TypeError(err_msg)
return ret_val
class VedoRenderer(object):
"""An interactive renderer for camera visualization."""
def __init__(self, scale=0.03):
"""Visualize cameras in an interactive scene supported by vedo.
Args:
scale (float, optional):
Scale factor. Defaults to 0.03.
"""
self.scale = scale
self.axis_list = self.__init_axis()
self.camera_list = []
self.frames_dir_path = ''
self.y_reverse = False
def __init_axis(self, axis_len=80):
"""Prepare arrows for axis.
Args:
axis_len (int, optional):
Length of each axis.
Defaults to 80.
Returns:
List[Arrows]:
A list of three arrows.
"""
arrow_end_np = np.eye(3) * axis_len * self.scale
colors = ['r', 'g', 'b'] # r-x, g-y, b-z
ret_list = []
for axis_index in range(3):
ret_list.append(
vedo.Arrows([[0, 0, 0]],
[arrow_end_np[axis_index]]).c(colors[axis_index]))
return ret_list
def set_y_reverse(self):
"""Set y reverse before add_camera if it is needed.
Vedo defines y+ as up direction. When visualizing kinect cameras, y- is
up, call set_y_reverse in this situation to make text in correct
direction.
"""
self.y_reverse = True
self.y_reverse_rotation = \
scipy_Rotation.from_euler('z', 180, degrees=True)
def add_camera(self, camera_parameter, arrow_len=30):
"""Add an camera to the scene.
Args:
camera_parameter (CameraParameter):
An instance of class CameraParameter which stores
rotation, translation and name of a camera.
arrow_len (int, optional):
Length of the arrow. Defaults to 30.
Returns:
list:
A list of vedo items related to the input camera.
"""
rot_mat = np.asarray(camera_parameter.get_value('rotation_mat'))
translation = np.asarray(camera_parameter.get_value('translation'))
cam_center = -np.linalg.inv(rot_mat).dot(translation)
arrow_end_origin = np.eye(3) * arrow_len * self.scale
colors = ['r', 'g', 'b'] # r-x, g-y, b-z
arrow_end_camera = \
np.einsum('ij,kj->ki', np.linalg.inv(rot_mat), arrow_end_origin)
if self.y_reverse:
cam_center = self.y_reverse_rotation.apply(cam_center)
for axis_index in range(3):
arrow_end_camera[axis_index, :] = \
self.y_reverse_rotation.apply(
arrow_end_camera[axis_index, :]
)
vedo_list = []
for i in range(3):
vedo_list.append(
vedo.Arrows([cam_center],
[cam_center + arrow_end_camera[i]]).c(colors[i]))
vedo_list.append(
vedo.Text3D(camera_parameter.name, cam_center, s=self.scale * 10))
self.camera_list += vedo_list
return vedo_list
def show(self, with_axis=True, interactive=True):
"""Show cameras as well as axis arrow by vedo.show()
Args:
with_axis (bool, optional):
Whether to show the axis arrow. Defaults to True.
interactive (bool, optional):
Pause and interact with window (True) or
continue execution (False).
Defaults to True.
"""
list_to_show = []
list_to_show += self.camera_list
if with_axis:
list_to_show += self.axis_list
vedo.show(*list_to_show, interactive=interactive, axes=1)
vedo.clear()
def check_path_suffix(path_str: str,
allowed_suffix: Union[str, List[str]] = '') -> bool:
"""Check whether the suffix of the path is allowed.
Args:
path_str (str):
Path to check.
allowed_suffix (List[str], optional):
What extension names are allowed.
Offer a list like ['.jpg', ',jpeg'].
When it's [], all will be received.
Use [''] then directory is allowed.
Defaults to [].
Returns:
bool:
True: suffix test passed
False: suffix test failed
"""
if isinstance(allowed_suffix, str):
allowed_suffix = [allowed_suffix]
pathinfo = Path(path_str)
suffix = pathinfo.suffix.lower()
if len(allowed_suffix) == 0:
return True
if pathinfo.is_dir():
if '' in allowed_suffix:
return True
else:
return False
else:
for index, tmp_suffix in enumerate(allowed_suffix):
if not tmp_suffix.startswith('.'):
tmp_suffix = '.' + tmp_suffix
allowed_suffix[index] = tmp_suffix.lower()
if suffix in allowed_suffix:
return True
else:
return False
The provided code snippet includes necessary dependencies for implementing the `visualize_dumped_camera_parameter` function. Write a Python function `def visualize_dumped_camera_parameter(dumped_dir: str, interactive: bool = True, show: bool = True)` to solve the following problem:
Visualize all cameras dumped in a directory. Args: dumped_dir (str): Path to the directory. interactive (bool, optional): Pause and interact with window (True) or continue execution (False). Defaults to True. show (bool, optional): Whether to show in a window. Defaults to True.
Here is the function:
def visualize_dumped_camera_parameter(dumped_dir: str,
interactive: bool = True,
show: bool = True):
"""Visualize all cameras dumped in a directory.
Args:
dumped_dir (str):
Path to the directory.
interactive (bool, optional):
Pause and interact with window (True) or
continue execution (False).
Defaults to True.
show (bool, optional):
Whether to show in a window.
Defaults to True.
"""
file_list = os.listdir(dumped_dir)
camera_para_list = []
for file_name in file_list:
file_path = os.path.join(dumped_dir, file_name)
if not check_path_suffix(file_path, ['.json']):
continue
else:
cam_para = CameraParameter()
cam_para.load(file_path)
camera_para_list.append(cam_para)
camera_vedo_renderer = VedoRenderer()
camera_vedo_renderer.set_y_reverse()
for camera_para in camera_para_list:
camera_vedo_renderer.add_camera(camera_para)
if show:
camera_vedo_renderer.show(with_axis=False, interactive=interactive) | Visualize all cameras dumped in a directory. Args: dumped_dir (str): Path to the directory. interactive (bool, optional): Pause and interact with window (True) or continue execution (False). Defaults to True. show (bool, optional): Whether to show in a window. Defaults to True. |
14,314 | import glob
import os
import os.path as osp
import shutil
import warnings
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import cv2
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import KEYPOINTS_FACTORY
from mmhuman3d.core.conventions.keypoints_mapping.human_data import (
HUMAN_DATA_LIMBS_INDEX,
HUMAN_DATA_PALETTE,
)
from mmhuman3d.utils.demo_utils import get_different_colors
from mmhuman3d.utils.ffmpeg_utils import images_to_video, video_to_images
from mmhuman3d.utils.keypoint_utils import search_limbs
from mmhuman3d.utils.path_utils import (
Existence,
check_input_path,
check_path_existence,
check_path_suffix,
prepare_output_path,
)
def check_path_suffix(path_str: str,
allowed_suffix: Union[str, List[str]] = '') -> bool:
"""Check whether the suffix of the path is allowed.
Args:
path_str (str):
Path to check.
allowed_suffix (List[str], optional):
What extension names are allowed.
Offer a list like ['.jpg', ',jpeg'].
When it's [], all will be received.
Use [''] then directory is allowed.
Defaults to [].
Returns:
bool:
True: suffix test passed
False: suffix test failed
"""
if isinstance(allowed_suffix, str):
allowed_suffix = [allowed_suffix]
pathinfo = Path(path_str)
suffix = pathinfo.suffix.lower()
if len(allowed_suffix) == 0:
return True
if pathinfo.is_dir():
if '' in allowed_suffix:
return True
else:
return False
else:
for index, tmp_suffix in enumerate(allowed_suffix):
if not tmp_suffix.startswith('.'):
tmp_suffix = '.' + tmp_suffix
allowed_suffix[index] = tmp_suffix.lower()
if suffix in allowed_suffix:
return True
else:
return False
class Existence(Enum):
"""State of file existence."""
FileExist = 0
DirectoryExistEmpty = 1
DirectoryExistNotEmpty = 2
MissingParent = 3
DirectoryNotExist = 4
FileNotExist = 5
def check_path_existence(
path_str: str,
path_type: Literal['file', 'dir', 'auto'] = 'auto',
) -> Existence:
"""Check whether a file or a directory exists at the expected path.
Args:
path_str (str):
Path to check.
path_type (Literal[, optional):
What kind of file do we expect at the path.
Choose among `file`, `dir`, `auto`.
Defaults to 'auto'. path_type = path_type.lower()
Raises:
KeyError: if `path_type` conflicts with `path_str`
Returns:
Existence:
0. FileExist: file at path_str exists.
1. DirectoryExistEmpty: folder at path exists and.
2. DirectoryExistNotEmpty: folder at path_str exists and not empty.
3. MissingParent: its parent doesn't exist.
4. DirectoryNotExist: expect a folder at path_str, but not found.
5. FileNotExist: expect a file at path_str, but not found.
"""
path_type = path_type.lower()
assert path_type in {'file', 'dir', 'auto'}
pathinfo = Path(path_str)
if not pathinfo.parent.is_dir():
return Existence.MissingParent
suffix = pathinfo.suffix.lower()
if path_type == 'dir' or\
path_type == 'auto' and suffix == '':
if pathinfo.is_dir():
if len(os.listdir(path_str)) == 0:
return Existence.DirectoryExistEmpty
else:
return Existence.DirectoryExistNotEmpty
else:
return Existence.DirectoryNotExist
elif path_type == 'file' or\
path_type == 'auto' and suffix != '':
if pathinfo.is_file():
return Existence.FileExist
elif pathinfo.is_dir():
if len(os.listdir(path_str)) == 0:
return Existence.DirectoryExistEmpty
else:
return Existence.DirectoryExistNotEmpty
if path_str.endswith('/'):
return Existence.DirectoryNotExist
else:
return Existence.FileNotExist
The provided code snippet includes necessary dependencies for implementing the `_check_frame_path` function. Write a Python function `def _check_frame_path(frame_list)` to solve the following problem:
Check frame path.
Here is the function:
def _check_frame_path(frame_list):
"""Check frame path."""
for frame_path in frame_list:
if check_path_existence(frame_path, 'file') != Existence.FileExist or \
not check_path_suffix(frame_path, ['.png', '.jpg', '.jpeg']):
raise FileNotFoundError(
f'The frame should be .png or .jp(e)g: {frame_path}') | Check frame path. |
14,315 | import glob
import os
import os.path as osp
import shutil
import warnings
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import cv2
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import KEYPOINTS_FACTORY
from mmhuman3d.core.conventions.keypoints_mapping.human_data import (
HUMAN_DATA_LIMBS_INDEX,
HUMAN_DATA_PALETTE,
)
from mmhuman3d.utils.demo_utils import get_different_colors
from mmhuman3d.utils.ffmpeg_utils import images_to_video, video_to_images
from mmhuman3d.utils.keypoint_utils import search_limbs
from mmhuman3d.utils.path_utils import (
Existence,
check_input_path,
check_path_existence,
check_path_suffix,
prepare_output_path,
)
def _plot_kp2d_frame(kp2d_person: np.ndarray,
canvas: np.ndarray,
limbs: Union[list, dict,
np.ndarray] = HUMAN_DATA_LIMBS_INDEX,
palette: Optional[Union[dict, np.ndarray]] = None,
draw_bbox: bool = False,
with_number: bool = False,
font_size: Union[float, int] = 0.5,
disable_limbs: bool = False) -> np.ndarray:
"""Plot a single frame(array) with keypoints, limbs, bbox, index.
Args:
kp2d_person (np.ndarray): `np.ndarray` shape of (J * 2).
canvas (np.ndarray): cv2 image, (H * W * 3) array.
limbs (Union[list, dict, np.ndarray], optional): limbs in form of
`dict` or 2-dimensional `list` or `np.ndarray` of shape
(num_limb, 2).
`dict` is used mainly for function `visualize_kp2d`, you can also
get the limbs by function `search_limbs`.
Defaults to `HUMAN_DATA_LIMBS_INDEX`.
palette (Optional[Union[dict, np.ndarray, list]], optional):
Pass an (1, 3) `np.ndarray` or `list` [B, G, R] if want the whole
limbs and keypoints will be in same color.
Pass `None` to use our colorful palette.
Pass an (num_limb, 3) `np.ndarray` to get each limb your specific
color.
`dict` is used mainly for function `visualize_kp2d`, you can also
get the palette by function `search_limbs`.
Defaults to `HUMAN_DATA_PALETTE`.
draw_bbox (bool, optional): whether need to draw bounding boxes.
Defaults to False.
with_number (bool, optional): whether need to draw index numbers.
Defaults to False.
font_size (Union[float, int], optional): the font size of the index.
Defaults to 0.5.
disable_limbs (bool, optional): whether need to disable drawing limbs.
Defaults to False.
Returns:
np.ndarray: opencv image of shape (H * W * 3).
"""
# slice the kp2d array
kp2d_person = kp2d_person.copy()
if kp2d_person.shape[-1] >= 3:
kp2d_person = kp2d_person[..., :-1]
warnings.warn(
'The input array has more than 2-Dimensional coordinates, will'
'keep only the first 2-Dimensions of the last axis. The new'
f'array shape: {kp2d_person.shape}')
if kp2d_person.ndim == 3 and kp2d_person.shape[0] == 1:
kp2d_person = kp2d_person[0]
assert kp2d_person.ndim == 2 and kp2d_person.shape[
-1] == 2, f'Wrong input array shape {kp2d_person.shape}, \
should be (num_kp, 2)'
if draw_bbox:
bbox = _get_bbox(kp2d_person, canvas, expand=True)
else:
bbox = None
# determine the limb connections and palette
if not disable_limbs:
if isinstance(limbs, list):
limbs = {'body': limbs}
elif isinstance(limbs, np.ndarray):
limbs = {'body': limbs.reshape(-1, 2).astype(np.int32).tolist()}
else:
assert set(limbs.keys()).issubset(HUMAN_DATA_LIMBS_INDEX)
if palette is None:
palette = {'body': None}
elif isinstance(palette, dict):
assert set(palette.keys()) == set(limbs.keys())
else:
limbs = {'body': None}
# draw by part to specify the thickness and color
for part_name, part_limbs in limbs.items():
# scatter_points_index means the limb end points
if not disable_limbs:
scatter_points_index = list(
set(np.array([part_limbs]).reshape(-1).tolist()))
else:
scatter_points_index = list(range(len(kp2d_person)))
if isinstance(palette, dict) and part_name == 'body':
thickness = 2
radius = 3
color = get_different_colors(len(scatter_points_index))
elif disable_limbs and palette is None:
radius = 2
color = get_different_colors(len(scatter_points_index))
else:
thickness = 2
radius = 2
if isinstance(palette, np.ndarray):
color = palette.astype(np.int32)
elif isinstance(palette, dict):
color = np.array(palette[part_name]).astype(np.int32)
elif isinstance(palette, list):
color = np.array(palette).reshape(-1, 3).astype(np.int32)
if not disable_limbs:
for limb_index, limb in enumerate(part_limbs):
limb_index = min(limb_index, len(color) - 1)
cv2.line(
canvas,
tuple(kp2d_person[limb[0]].astype(np.int32)),
tuple(kp2d_person[limb[1]].astype(np.int32)),
color=tuple(color[limb_index].tolist()),
thickness=thickness)
# draw the points inside the image region
for index in scatter_points_index:
x, y = kp2d_person[index, :2]
if np.isnan(x) or np.isnan(y):
continue
if 0 <= x < canvas.shape[1] and 0 <= y < canvas.shape[0]:
if disable_limbs:
point_color = color[index].tolist()
else:
point_color = color[min(color.shape[0] - 1,
len(scatter_points_index) -
1)].tolist()
cv2.circle(
canvas, (int(x), int(y)),
radius,
point_color,
thickness=-1)
if with_number:
cv2.putText(
canvas, str(index), (int(x), int(y)),
cv2.FONT_HERSHEY_SIMPLEX, font_size,
np.array([255, 255, 255]).astype(np.int32).tolist(), 2)
# draw the bboxes
if bbox is not None:
bbox = bbox.astype(np.int32)
cv2.rectangle(canvas, (bbox[0], bbox[2]), (bbox[1], bbox[3]),
(0, 255, 255), 1)
return canvas
def _prepare_limb_palette(limbs,
palette,
pop_parts,
data_source,
mask,
search_limbs_func=search_limbs):
"""Prepare limbs and their palette for plotting.
Args:
limbs (Union[np.ndarray, List[int]]):
The preset limbs. This option is for free skeletons like BVH file.
In most cases, it's set to None,
this function will search a result for limbs automatically.
palette (Iterable):
The preset palette for limbs. Specified palette,
three int represents (B, G, R). Should be tuple or list.
In most cases, it's set to None,
a palette will be generated with the result of search_limbs.
pop_parts (Iterable[str]):
The body part names you do not
want to visualize.
When it's none, nothing will be removed.
data_source (str):
Data source type.
mask (Union[list, np.ndarray):
A mask to mask out the incorrect points.
Returns:
Tuple[dict, dict]: (limbs_target, limbs_palette).
"""
if limbs is not None:
limbs_target, limbs_palette = {
'body': limbs.tolist() if isinstance(limbs, np.ndarray) else limbs
}, get_different_colors(len(limbs))
else:
limbs_target, limbs_palette = search_limbs_func(
data_source=data_source, mask=mask)
if palette:
limbs_palette = np.array(palette, dtype=np.uint8)[None]
# check and pop the pop_parts
assert set(pop_parts).issubset(
HUMAN_DATA_PALETTE
), f'wrong part_names in pop_parts, supported parts are\
{set(HUMAN_DATA_PALETTE.keys())}'
for part_name in pop_parts:
if part_name in limbs_target:
limbs_target.pop(part_name)
limbs_palette.pop(part_name)
return limbs_target, limbs_palette
def _prepare_output_path(output_path, overwrite):
"""Prepare output path."""
prepare_output_path(
output_path,
allowed_suffix=['.mp4', ''],
tag='output video',
path_type='auto',
overwrite=overwrite)
# output_path is a directory
if check_path_suffix(output_path, ['']):
temp_folder = output_path
os.makedirs(temp_folder, exist_ok=True)
else:
temp_folder = output_path + '_temp_images'
if check_path_existence(temp_folder, 'dir') in [
Existence.DirectoryExistNotEmpty, Existence.DirectoryExistEmpty
]:
shutil.rmtree(temp_folder)
os.makedirs(temp_folder, exist_ok=True)
return temp_folder
def _check_temp_path(temp_folder, frame_list, overwrite):
"""Check temp frame folder path."""
if not overwrite and frame_list is not None and len(frame_list) > 0:
if Path(temp_folder).absolute() == \
Path(frame_list[0]).parent.absolute():
raise FileExistsError(
f'{temp_folder} exists (set --overwrite to overwrite).')
class _CavasProducer:
"""Prepare background canvas, pure white if not set."""
def __init__(self,
frame_list,
resolution,
kp2d=None,
image_array=None,
default_scale=1.5):
"""Initialize a canvas writer."""
# check the origin background frames
if frame_list is not None:
_check_frame_path(frame_list)
self.frame_list = frame_list
else:
self.frame_list = []
self.resolution = resolution
self.kp2d = kp2d
# with numpy array frames
self.image_array = image_array
if self.resolution is None:
if self.image_array is not None:
self.auto_resolution = self.image_array.shape[1:3]
elif len(self.frame_list) > 1 and \
check_path_existence(
self.frame_list[0], 'file') == Existence.FileExist:
tmp_image_array = cv2.imread(self.frame_list[0])
self.auto_resolution = tmp_image_array.shape[:2]
else:
self.auto_resolution = [
int(np.max(kp2d) * default_scale),
int(np.max(kp2d) * default_scale)
]
self.len = kp2d.shape[0]
if self.image_array is None:
self.len_frame = len(self.frame_list)
else:
self.len_frame = self.image_array.shape[0]
def __getitem__(self, frame_index):
"""Get frame data from frame_list of image_array."""
# frame file exists, resolution not set
if frame_index < self.len_frame and self.resolution is None:
if self.image_array is not None:
canvas = self.image_array[frame_index]
else:
canvas = cv2.imread(self.frame_list[frame_index])
if self.kp2d is None:
kp2d_frame = None
else:
kp2d_frame = self.kp2d[frame_index]
# no frame file, resolution has been set
elif frame_index >= self.len_frame and self.resolution is not None:
canvas = np.ones((self.resolution[0], self.resolution[1], 3),
dtype=np.uint8) * 255
if self.kp2d is None:
kp2d_frame = None
else:
kp2d_frame = self.kp2d[frame_index]
# frame file exists, resolution has been set
elif frame_index < self.len_frame and self.resolution is not None:
if self.image_array is not None:
canvas = self.image_array[frame_index]
else:
canvas = cv2.imread(self.frame_list[frame_index])
w_scale = self.resolution[1] / canvas.shape[1]
h_scale = self.resolution[0] / canvas.shape[0]
canvas = cv2.resize(canvas,
(self.resolution[1], self.resolution[0]),
cv2.INTER_CUBIC)
if self.kp2d is None:
kp2d_frame = None
else:
kp2d_frame = np.array([[w_scale, h_scale]
]) * self.kp2d[frame_index]
# no frame file, no resolution
else:
canvas = np.ones(
(self.auto_resolution[0], self.auto_resolution[1], 3),
dtype=np.uint8) * 255
if self.kp2d is None:
kp2d_frame = None
else:
kp2d_frame = self.kp2d[frame_index]
return canvas, kp2d_frame
def __len__(self):
return self.len
def update_frame_list(frame_list, origin_frames, img_format, start, end):
"""Update frame list if have origin_frames."""
input_temp_folder = None
# choose in frame_list or origin_frames
if frame_list is None and origin_frames is None:
print('No background provided, will use pure white background.')
elif frame_list is not None and origin_frames is not None:
warnings.warn('Redundant input, will only use frame_list.')
origin_frames = None
if origin_frames is not None:
check_input_path(
input_path=origin_frames,
allowed_suffix=['.mp4', '.gif', ''],
tag='origin frames',
path_type='auto')
if Path(origin_frames).is_file():
input_temp_folder = origin_frames + '_temp_images/'
video_to_images(
origin_frames, input_temp_folder, start=start, end=end)
frame_list = glob.glob(osp.join(input_temp_folder, '*.png'))
frame_list.sort()
else:
if img_format is None:
frame_list = []
for im_name in os.listdir(origin_frames):
if Path(im_name).suffix.lower() in [
'.png', '.jpg', '.jpeg'
]:
frame_list.append(osp.join(origin_frames, im_name))
else:
frame_list = []
for index in range(start, end):
frame_path = osp.join(origin_frames, img_format % index)
if osp.exists(frame_path):
frame_list.append(frame_path)
frame_list.sort()
return frame_list, input_temp_folder
KEYPOINTS_FACTORY = {
'human_data': human_data.HUMAN_DATA,
'agora': agora.AGORA_KEYPOINTS,
'coco': coco.COCO_KEYPOINTS,
'coco_wholebody': coco_wholebody.COCO_WHOLEBODY_KEYPOINTS,
'crowdpose': crowdpose.CROWDPOSE_KEYPOINTS,
'smplx': smplx.SMPLX_KEYPOINTS,
'smpl': smpl.SMPL_KEYPOINTS,
'smpl_45': smpl.SMPL_45_KEYPOINTS,
'smpl_54': smpl.SMPL_54_KEYPOINTS,
'smpl_49': smpl.SMPL_49_KEYPOINTS,
'smpl_24': smpl.SMPL_24_KEYPOINTS,
'star': star.STAR_KEYPOINTS,
'mpi_inf_3dhp': mpi_inf_3dhp.MPI_INF_3DHP_KEYPOINTS,
'mpi_inf_3dhp_test': mpi_inf_3dhp.MPI_INF_3DHP_TEST_KEYPOINTS,
'penn_action': penn_action.PENN_ACTION_KEYPOINTS,
'h36m': h36m.H36M_KEYPOINTS,
'h36m_mmpose': h36m.H36M_KEYPOINTS_MMPOSE,
'h36m_smplx': h36m.H36M_KEYPOINTS_SMPLX,
'pw3d': pw3d.PW3D_KEYPOINTS,
'mpii': mpii.MPII_KEYPOINTS,
'lsp': lsp.LSP_KEYPOINTS,
'posetrack': posetrack.POSETRACK_KEYPOINTS,
'instavariety': instavariety.INSTAVARIETY_KEYPOINTS,
'openpose_25': openpose.OPENPOSE_25_KEYPOINTS,
'openpose_118': openpose.OPENPOSE_118_KEYPOINTS,
'openpose_135': openpose.OPENPOSE_135_KEYPOINTS,
'openpose_137': openpose.OPENPOSE_137_KEYPOINTS,
'hybrik_29': hybrik.HYBRIK_29_KEYPOINTS,
'hybrik_hp3d': mpi_inf_3dhp.HYBRIK_MPI_INF_3DHP_KEYPOINTS,
'pymafx_49':
openpose.OPENPOSE_25_KEYPOINTS + pymafx_smplx.PYMAFX_SMPLX_KEYPOINTS,
'gta': gta.GTA_KEYPOINTS,
'flame': flame.FLAME_73_KEYPOINTS,
'face3d': face3d.FACE3D_IND,
'spin_smplx': spin_smplx.SPIN_SMPLX_KEYPOINTS,
'mano': mano.MANO_KEYPOINTS,
'mano_left': mano.MANO_LEFT_KEYPOINTS,
'mano_right': mano.MANO_RIGHT_KEYPOINTS,
'mano_hands': mano.MANO_HANDS_KEYPOINTS,
'mano_left_reorder': mano.MANO_LEFT_REORDER_KEYPOINTS,
'mano_right_reorder': mano.MANO_RIGHT_REORDER_KEYPOINTS,
'mano_hands_reorder': mano.MANO_HANDS_REORDER_KEYPOINTS,
'mediapipe_whole_body': mediapipe.MP_WHOLE_BODY_KEYPOINTS,
'mediapipe_body': mediapipe.MP_BODY_KEYPOINTS,
}
def get_different_colors(number_of_colors,
flag=0,
alpha: float = 1.0,
mode: str = 'bgr',
int_dtype: bool = True):
"""Get a numpy of colors of shape (N, 3)."""
mode = mode.lower()
assert set(mode).issubset({'r', 'g', 'b', 'a'})
nst0 = np.random.get_state()
np.random.seed(flag)
colors = []
for i in np.arange(0., 360., 360. / number_of_colors):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
colors_np = np.asarray(colors)
if int_dtype:
colors_bgr = (255 * colors_np).astype(np.uint8)
else:
colors_bgr = colors_np.astype(np.float32)
# recover the random state
np.random.set_state(nst0)
color_dict = {}
if 'a' in mode:
color_dict['a'] = np.ones((colors_bgr.shape[0], 3)) * alpha
color_dict['b'] = colors_bgr[:, 0:1]
color_dict['g'] = colors_bgr[:, 1:2]
color_dict['r'] = colors_bgr[:, 2:3]
colors_final = []
for channel in mode:
colors_final.append(color_dict[channel])
colors_final = np.concatenate(colors_final, -1)
return colors_final
def images_to_video(input_folder: str,
output_path: str,
remove_raw_file: bool = False,
img_format: str = '%06d.png',
fps: Union[int, float] = 30,
resolution: Optional[Union[Tuple[int, int],
Tuple[float, float]]] = None,
start: int = 0,
end: Optional[int] = None,
disable_log: bool = False) -> None:
"""Convert a folder of images to a video.
Args:
input_folder (str): input image folder
output_path (str): output video file path
remove_raw_file (bool, optional): whether remove raw images.
Defaults to False.
img_format (str, optional): format to name the images].
Defaults to '%06d.png'.
fps (Union[int, float], optional): output video fps. Defaults to 30.
resolution (Optional[Union[Tuple[int, int], Tuple[float, float]]],
optional): (height, width) of output.
defaults to None.
start (int, optional): start frame index. Inclusive.
If < 0, will be converted to frame_index range in [0, frame_num].
Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
If None, all frames from start till the last frame are included.
Defaults to None.
disable_log (bool, optional): whether close the ffmepg command info.
Defaults to False.
Raises:
FileNotFoundError: check the input path.
FileNotFoundError: check the output path.
Returns:
None
"""
check_input_path(
input_folder,
allowed_suffix=[],
tag='input image folder',
path_type='dir')
prepare_output_path(
output_path,
allowed_suffix=['.mp4'],
tag='output video',
path_type='file',
overwrite=True)
input_folderinfo = Path(input_folder)
num_frames = len(os.listdir(input_folder))
start = (min(start, num_frames - 1) + num_frames) % num_frames
end = (min(end, num_frames - 1) +
num_frames) % num_frames if end is not None else num_frames
temp_input_folder = None
if img_format is None:
temp_input_folder = os.path.join(input_folderinfo.parent,
input_folderinfo.name + '_temp')
img_format = images_to_sorted_images(input_folder, temp_input_folder)
command = [
'ffmpeg',
'-y',
'-threads',
'4',
'-start_number',
f'{start}',
'-r',
f'{fps}',
'-i',
f'{input_folder}/{img_format}'
if temp_input_folder is None else f'{temp_input_folder}/{img_format}',
'-frames:v',
f'{end - start}',
'-profile:v',
'baseline',
'-level',
'3.0',
'-c:v',
'libx264',
'-pix_fmt',
'yuv420p',
'-an',
'-v',
'error',
'-loglevel',
'error',
output_path,
]
if resolution:
height, width = resolution
width += width % 2
height += height % 2
command.insert(1, '-s')
command.insert(2, '%dx%d' % (width, height))
if not disable_log:
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
if remove_raw_file:
if Path(input_folder).is_dir():
shutil.rmtree(input_folder)
if temp_input_folder is not None:
if Path(temp_input_folder).is_dir():
shutil.rmtree(temp_input_folder)
def check_path_suffix(path_str: str,
allowed_suffix: Union[str, List[str]] = '') -> bool:
"""Check whether the suffix of the path is allowed.
Args:
path_str (str):
Path to check.
allowed_suffix (List[str], optional):
What extension names are allowed.
Offer a list like ['.jpg', ',jpeg'].
When it's [], all will be received.
Use [''] then directory is allowed.
Defaults to [].
Returns:
bool:
True: suffix test passed
False: suffix test failed
"""
if isinstance(allowed_suffix, str):
allowed_suffix = [allowed_suffix]
pathinfo = Path(path_str)
suffix = pathinfo.suffix.lower()
if len(allowed_suffix) == 0:
return True
if pathinfo.is_dir():
if '' in allowed_suffix:
return True
else:
return False
else:
for index, tmp_suffix in enumerate(allowed_suffix):
if not tmp_suffix.startswith('.'):
tmp_suffix = '.' + tmp_suffix
allowed_suffix[index] = tmp_suffix.lower()
if suffix in allowed_suffix:
return True
else:
return False
The provided code snippet includes necessary dependencies for implementing the `visualize_kp2d` function. Write a Python function `def visualize_kp2d( kp2d: np.ndarray, output_path: Optional[str] = None, frame_list: Optional[List[str]] = None, origin_frames: Optional[str] = None, image_array: Optional[np.ndarray] = None, limbs: Optional[Union[np.ndarray, List[int]]] = None, palette: Optional[Iterable[int]] = None, data_source: str = 'coco', mask: Optional[Union[list, np.ndarray]] = None, img_format: str = '%06d.png', start: int = 0, end: int = -1, overwrite: bool = False, with_file_name: bool = True, resolution: Optional[Union[Tuple[int, int], list]] = None, fps: Union[float, int] = 30, draw_bbox: bool = False, with_number: bool = False, pop_parts: Iterable[str] = None, disable_tqdm: bool = False, disable_limbs: bool = False, return_array: Optional[bool] = False, keypoints_factory: dict = KEYPOINTS_FACTORY, remove_raw_file: bool = True, ) -> Union[None, np.ndarray]` to solve the following problem:
Visualize 2d keypoints to a video or into a folder of frames. Args: kp2d (np.ndarray): should be array of shape (f * J * 2) or (f * n * J * 2)] output_path (str): output video path or image folder. frame_list (Optional[List[str]], optional): list of origin background frame paths, element in list each should be a image path like `*.jpg` or `*.png`. Higher priority than `origin_frames`. Use this when your file names is hard to sort or you only want to render a small number frames. Defaults to None. origin_frames (Optional[str], optional): origin background frame path, could be `.mp4`, `.gif`(will be sliced into a folder) or an image folder. Lower priority than `frame_list`. Defaults to None. limbs (Optional[Union[np.ndarray, List[int]]], optional): if not specified, the limbs will be searched by search_limbs, this option is for free skeletons like BVH file. Defaults to None. palette (Iterable, optional): specified palette, three int represents (B, G, R). Should be tuple or list. Defaults to None. data_source (str, optional): data source type. Defaults to 'coco'. mask (Optional[Union[list, np.ndarray]], optional): mask to mask out the incorrect point. Pass a `np.ndarray` of shape (J,) or `list` of length J. Defaults to None. img_format (str, optional): input image format. Default to '%06d.png', start (int, optional): start frame index. Defaults to 0. end (int, optional): end frame index. Defaults to -1. overwrite (bool, optional): whether replace the origin frames. Defaults to False. with_file_name (bool, optional): whether write origin frame name on the images. Defaults to True. resolution (Optional[Union[Tuple[int, int], list]], optional): (height, width) of the output video will be the same size as the original images if not specified. Defaults to None. fps (Union[float, int], optional): fps. Defaults to 30. draw_bbox (bool, optional): whether need to draw bounding boxes. Defaults to False. with_number (bool, optional): whether draw index number. Defaults to False. pop_parts (Iterable[str], optional): The body part names you do not want to visualize. Supported parts are ['left_eye','right_eye' ,'nose', 'mouth', 'face', 'left_hand', 'right_hand']. Defaults to [].frame_list disable_tqdm (bool, optional): Whether to disable the entire progressbar wrapper. Defaults to False. disable_limbs (bool, optional): whether need to disable drawing limbs. Defaults to False. return_array (bool, optional): Whether to return images as a opencv array. Defaults to None. keypoints_factory (dict, optional): Dict of all the conventions. Defaults to KEYPOINTS_FACTORY. Raises: FileNotFoundError: check output video path. FileNotFoundError: check input frame paths. Returns: Union[None, np.ndarray].
Here is the function:
def visualize_kp2d(
kp2d: np.ndarray,
output_path: Optional[str] = None,
frame_list: Optional[List[str]] = None,
origin_frames: Optional[str] = None,
image_array: Optional[np.ndarray] = None,
limbs: Optional[Union[np.ndarray, List[int]]] = None,
palette: Optional[Iterable[int]] = None,
data_source: str = 'coco',
mask: Optional[Union[list, np.ndarray]] = None,
img_format: str = '%06d.png',
start: int = 0,
end: int = -1,
overwrite: bool = False,
with_file_name: bool = True,
resolution: Optional[Union[Tuple[int, int], list]] = None,
fps: Union[float, int] = 30,
draw_bbox: bool = False,
with_number: bool = False,
pop_parts: Iterable[str] = None,
disable_tqdm: bool = False,
disable_limbs: bool = False,
return_array: Optional[bool] = False,
keypoints_factory: dict = KEYPOINTS_FACTORY,
remove_raw_file: bool = True,
) -> Union[None, np.ndarray]:
"""Visualize 2d keypoints to a video or into a folder of frames.
Args:
kp2d (np.ndarray): should be array of shape (f * J * 2)
or (f * n * J * 2)]
output_path (str): output video path or image folder.
frame_list (Optional[List[str]], optional): list of origin background
frame paths, element in list each should be a image path like
`*.jpg` or `*.png`. Higher priority than `origin_frames`.
Use this when your file names is hard to sort or you only want to
render a small number frames.
Defaults to None.
origin_frames (Optional[str], optional): origin background frame path,
could be `.mp4`, `.gif`(will be sliced into a folder) or an image
folder. Lower priority than `frame_list`.
Defaults to None.
limbs (Optional[Union[np.ndarray, List[int]]], optional):
if not specified, the limbs will be searched by search_limbs,
this option is for free skeletons like BVH file.
Defaults to None.
palette (Iterable, optional): specified palette, three int represents
(B, G, R). Should be tuple or list.
Defaults to None.
data_source (str, optional): data source type. Defaults to 'coco'.
mask (Optional[Union[list, np.ndarray]], optional):
mask to mask out the incorrect point.
Pass a `np.ndarray` of shape (J,) or `list` of length J.
Defaults to None.
img_format (str, optional): input image format. Default to '%06d.png',
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index. Defaults to -1.
overwrite (bool, optional): whether replace the origin frames.
Defaults to False.
with_file_name (bool, optional): whether write origin frame name on
the images. Defaults to True.
resolution (Optional[Union[Tuple[int, int], list]], optional):
(height, width) of the output video
will be the same size as the original images if not specified.
Defaults to None.
fps (Union[float, int], optional): fps. Defaults to 30.
draw_bbox (bool, optional): whether need to draw bounding boxes.
Defaults to False.
with_number (bool, optional): whether draw index number.
Defaults to False.
pop_parts (Iterable[str], optional): The body part names you do not
want to visualize. Supported parts are ['left_eye','right_eye'
,'nose', 'mouth', 'face', 'left_hand', 'right_hand'].
Defaults to [].frame_list
disable_tqdm (bool, optional):
Whether to disable the entire progressbar wrapper.
Defaults to False.
disable_limbs (bool, optional): whether need to disable drawing limbs.
Defaults to False.
return_array (bool, optional): Whether to return images as a opencv
array. Defaults to None.
keypoints_factory (dict, optional): Dict of all the conventions.
Defaults to KEYPOINTS_FACTORY.
Raises:
FileNotFoundError: check output video path.
FileNotFoundError: check input frame paths.
Returns:
Union[None, np.ndarray].
"""
# check the input array shape, reshape to (num_frames, num_person, J, 2)
kp2d = kp2d[..., :2].copy()
if kp2d.ndim == 3:
kp2d = kp2d[:, np.newaxis]
assert kp2d.ndim == 4
num_frames, num_person = kp2d.shape[0], kp2d.shape[1]
# slice the input array temporally
end = (min(num_frames - 1, end) + num_frames) % num_frames
kp2d = kp2d[start:end + 1]
if image_array is not None:
origin_frames = None
frame_list = None
return_array = True
input_temp_folder = None
else:
frame_list, input_temp_folder = update_frame_list(
frame_list, origin_frames, img_format, start, end)
kp2d = kp2d[:num_frames]
# check output path
if output_path is not None:
output_temp_folder = _prepare_output_path(output_path, overwrite)
# check whether temp_folder will overwrite frame_list by accident
_check_temp_path(output_temp_folder, frame_list, overwrite)
else:
output_temp_folder = None
# check data_source & mask
if data_source not in keypoints_factory:
raise ValueError('Wrong data_source. Should choose in'
f'{list(keypoints_factory.keys())}')
if mask is not None:
if isinstance(mask, list):
mask = np.array(mask).reshape(-1)
assert mask.shape == (
len(keypoints_factory[data_source]),
), f'mask length should fit with keypoints number \
{len(keypoints_factory[data_source])}'
# search the limb connections and palettes from superset smplx
# check and pop the pop_parts
if pop_parts is None:
pop_parts = []
if disable_limbs:
limbs_target, limbs_palette = None, None
else:
# *** changed by wyj ***
limbs_target, limbs_palette = _prepare_limb_palette(
limbs, palette, pop_parts, data_source, mask)
# limbs_target, limbs_palette = limbs, palette
canvas_producer = _CavasProducer(frame_list, resolution, kp2d, image_array)
out_image_array = []
# start plotting by frame
for frame_index in tqdm(range(kp2d.shape[0]), disable=disable_tqdm):
canvas, kp2d_frame = canvas_producer[frame_index]
# start plotting by person
for person_index in range(num_person):
if num_person >= 2 and not disable_limbs:
limbs_palette = get_different_colors(
num_person)[person_index].reshape(1, 3)
canvas = _plot_kp2d_frame(
kp2d_person=kp2d_frame[person_index],
canvas=canvas,
limbs=limbs_target,
palette=limbs_palette,
draw_bbox=draw_bbox,
with_number=with_number,
font_size=0.5,
disable_limbs=disable_limbs)
if with_file_name and frame_list is not None:
h, w, _ = canvas.shape
if frame_index <= len(frame_list) - 1:
cv2.putText(
canvas, str(Path(frame_list[frame_index]).name),
(w // 2, h // 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5 * h / 500,
np.array([255, 255, 255]).astype(np.int32).tolist(), 2)
if output_path is not None:
# write the frame with opencv
if frame_list is not None and check_path_suffix(
output_path,
'') and len(frame_list) >= len(canvas_producer):
frame_path = os.path.join(output_temp_folder,
Path(frame_list[frame_index]).name)
img_format = None
else:
frame_path = \
os.path.join(output_temp_folder, f'{frame_index:06d}.png')
img_format = '%06d.png'
cv2.imwrite(frame_path, canvas)
if return_array:
out_image_array.append(canvas[None])
if input_temp_folder is not None:
shutil.rmtree(input_temp_folder)
# convert frames to video
if output_path is not None:
if check_path_suffix(output_path, ['.mp4']):
images_to_video(
input_folder=output_temp_folder,
output_path=output_path,
remove_raw_file=remove_raw_file,
img_format=img_format,
fps=fps)
if return_array:
out_image_array = np.concatenate(out_image_array)
return out_image_array | Visualize 2d keypoints to a video or into a folder of frames. Args: kp2d (np.ndarray): should be array of shape (f * J * 2) or (f * n * J * 2)] output_path (str): output video path or image folder. frame_list (Optional[List[str]], optional): list of origin background frame paths, element in list each should be a image path like `*.jpg` or `*.png`. Higher priority than `origin_frames`. Use this when your file names is hard to sort or you only want to render a small number frames. Defaults to None. origin_frames (Optional[str], optional): origin background frame path, could be `.mp4`, `.gif`(will be sliced into a folder) or an image folder. Lower priority than `frame_list`. Defaults to None. limbs (Optional[Union[np.ndarray, List[int]]], optional): if not specified, the limbs will be searched by search_limbs, this option is for free skeletons like BVH file. Defaults to None. palette (Iterable, optional): specified palette, three int represents (B, G, R). Should be tuple or list. Defaults to None. data_source (str, optional): data source type. Defaults to 'coco'. mask (Optional[Union[list, np.ndarray]], optional): mask to mask out the incorrect point. Pass a `np.ndarray` of shape (J,) or `list` of length J. Defaults to None. img_format (str, optional): input image format. Default to '%06d.png', start (int, optional): start frame index. Defaults to 0. end (int, optional): end frame index. Defaults to -1. overwrite (bool, optional): whether replace the origin frames. Defaults to False. with_file_name (bool, optional): whether write origin frame name on the images. Defaults to True. resolution (Optional[Union[Tuple[int, int], list]], optional): (height, width) of the output video will be the same size as the original images if not specified. Defaults to None. fps (Union[float, int], optional): fps. Defaults to 30. draw_bbox (bool, optional): whether need to draw bounding boxes. Defaults to False. with_number (bool, optional): whether draw index number. Defaults to False. pop_parts (Iterable[str], optional): The body part names you do not want to visualize. Supported parts are ['left_eye','right_eye' ,'nose', 'mouth', 'face', 'left_hand', 'right_hand']. Defaults to [].frame_list disable_tqdm (bool, optional): Whether to disable the entire progressbar wrapper. Defaults to False. disable_limbs (bool, optional): whether need to disable drawing limbs. Defaults to False. return_array (bool, optional): Whether to return images as a opencv array. Defaults to None. keypoints_factory (dict, optional): Dict of all the conventions. Defaults to KEYPOINTS_FACTORY. Raises: FileNotFoundError: check output video path. FileNotFoundError: check input frame paths. Returns: Union[None, np.ndarray]. |
14,316 | from mmcv.utils import Registry
from pytorch3d.renderer import TexturesAtlas, TexturesUV, TexturesVertex
from .textures import TexturesNearest
TEXTURES = Registry('textures')
TEXTURES.register_module(
name=['TexturesAtlas', 'textures_atlas', 'atlas', 'Atlas'],
module=TexturesAtlas)
TEXTURES.register_module(
name=['TexturesNearest', 'textures_nearest', 'nearest', 'Nearest'],
module=TexturesNearest)
TEXTURES.register_module(
name=['TexturesUV', 'textures_uv', 'uv'], module=TexturesUV)
TEXTURES.register_module(
name=['TexturesVertex', 'textures_vertex', 'vertex', 'vc'],
module=TexturesVertex)
The provided code snippet includes necessary dependencies for implementing the `build_textures` function. Write a Python function `def build_textures(cfg)` to solve the following problem:
Build textures.
Here is the function:
def build_textures(cfg):
"""Build textures."""
return TEXTURES.build(cfg) | Build textures. |
14,317 | from typing import List, Union
import numpy as np
import torch
from pytorch3d.structures import list_to_padded
def normalize(value,
origin_value_range=None,
out_value_range=(0, 1),
dtype=None,
clip=False) -> Union[torch.Tensor, np.ndarray]:
"""Normalize the tensor or array and convert dtype."""
if origin_value_range is not None:
value = (value - origin_value_range[0]) / (
origin_value_range[1] - origin_value_range[0] + 1e-9)
else:
value = (value - value.min()) / (value.max() - value.min())
value = value * (out_value_range[1] -
out_value_range[0]) + out_value_range[0]
if clip:
value = torch.clip(
value, min=out_value_range[0], max=out_value_range[1])
if isinstance(value, torch.Tensor):
if dtype is not None:
return value.type(dtype)
else:
return value
elif isinstance(value, np.ndarray):
if dtype is not None:
return value.astype(dtype)
else:
return value
The provided code snippet includes necessary dependencies for implementing the `tensor2array` function. Write a Python function `def tensor2array(image: torch.Tensor) -> np.ndarray` to solve the following problem:
Convert image tensor to array.
Here is the function:
def tensor2array(image: torch.Tensor) -> np.ndarray:
"""Convert image tensor to array."""
image = image.detach().cpu().numpy()
image = normalize(
image,
origin_value_range=(0, 1),
out_value_range=(0, 255),
dtype=np.uint8)
return image | Convert image tensor to array. |
14,318 | from typing import List, Union
import numpy as np
import torch
from pytorch3d.structures import list_to_padded
def normalize(value,
origin_value_range=None,
out_value_range=(0, 1),
dtype=None,
clip=False) -> Union[torch.Tensor, np.ndarray]:
"""Normalize the tensor or array and convert dtype."""
if origin_value_range is not None:
value = (value - origin_value_range[0]) / (
origin_value_range[1] - origin_value_range[0] + 1e-9)
else:
value = (value - value.min()) / (value.max() - value.min())
value = value * (out_value_range[1] -
out_value_range[0]) + out_value_range[0]
if clip:
value = torch.clip(
value, min=out_value_range[0], max=out_value_range[1])
if isinstance(value, torch.Tensor):
if dtype is not None:
return value.type(dtype)
else:
return value
elif isinstance(value, np.ndarray):
if dtype is not None:
return value.astype(dtype)
else:
return value
The provided code snippet includes necessary dependencies for implementing the `array2tensor` function. Write a Python function `def array2tensor(image: np.ndarray) -> torch.Tensor` to solve the following problem:
Convert image array to tensor.
Here is the function:
def array2tensor(image: np.ndarray) -> torch.Tensor:
"""Convert image array to tensor."""
image = torch.Tensor(image)
image = normalize(
image,
origin_value_range=(0, 255),
out_value_range=(0, 1),
dtype=torch.float32)
return image | Convert image array to tensor. |
14,319 | from typing import List, Union
import numpy as np
import torch
from pytorch3d.structures import list_to_padded
The provided code snippet includes necessary dependencies for implementing the `rgb2bgr` function. Write a Python function `def rgb2bgr(rgbs) -> Union[torch.Tensor, np.ndarray]` to solve the following problem:
Convert color channels.
Here is the function:
def rgb2bgr(rgbs) -> Union[torch.Tensor, np.ndarray]:
"""Convert color channels."""
bgrs = [rgbs[..., 2, None], rgbs[..., 1, None], rgbs[..., 0, None]]
if isinstance(rgbs, torch.Tensor):
bgrs = torch.cat(bgrs, -1)
elif isinstance(rgbs, np.ndarray):
bgrs = np.concatenate(bgrs, -1)
return bgrs | Convert color channels. |
14,320 | from mmcv.utils import Registry
from .lights import AmbientLights, DirectionalLights, PointLights
LIGHTS = Registry('lights')
LIGHTS.register_module(
name=['directional', 'directional_lights', 'DirectionalLights'],
module=DirectionalLights)
LIGHTS.register_module(
name=['point', 'point_lights', 'PointLights'], module=PointLights)
LIGHTS.register_module(
name=['ambient', 'ambient_lights', 'AmbientLights'], module=AmbientLights)
The provided code snippet includes necessary dependencies for implementing the `build_lights` function. Write a Python function `def build_lights(cfg)` to solve the following problem:
Build lights.
Here is the function:
def build_lights(cfg):
"""Build lights."""
return LIGHTS.build(cfg) | Build lights. |
14,321 | from mmcv.utils import Registry
from pytorch3d.renderer import (
HardFlatShader,
HardGouraudShader,
HardPhongShader,
SoftGouraudShader,
SoftPhongShader,
)
from .shader import (
DepthShader,
NoLightShader,
NormalShader,
SegmentationShader,
SilhouetteShader,
)
SHADER = Registry('shader')
SHADER.register_module(
name=[
'flat', 'hard_flat_shader', 'hard_flat', 'HardFlat', 'HardFlatShader'
],
module=HardFlatShader)
SHADER.register_module(
name=['hard_phong', 'HardPhong', 'HardPhongShader'],
module=HardPhongShader)
SHADER.register_module(
name=['hard_gouraud', 'HardGouraud', 'HardGouraudShader'],
module=HardGouraudShader)
SHADER.register_module(
name=['soft_gouraud', 'SoftGouraud', 'SoftGouraudShader'],
module=SoftGouraudShader)
SHADER.register_module(
name=['soft_phong', 'SoftPhong', 'SoftPhongShader'],
module=SoftPhongShader)
SHADER.register_module(
name=['silhouette', 'Silhouette', 'SilhouetteShader'],
module=SilhouetteShader)
SHADER.register_module(
name=['nolight', 'nolight_shader', 'NoLight', 'NoLightShader'],
module=NoLightShader)
SHADER.register_module(
name=['normal', 'normal_shader', 'Normal', 'NormalShader'],
module=NormalShader)
SHADER.register_module(
name=['depth', 'depth_shader', 'Depth', 'DepthShader'], module=DepthShader)
SHADER.register_module(
name=[
'segmentation', 'segmentation_shader', 'Segmentation',
'SegmentationShader'
],
module=SegmentationShader)
The provided code snippet includes necessary dependencies for implementing the `build_shader` function. Write a Python function `def build_shader(cfg)` to solve the following problem:
Build shader.
Here is the function:
def build_shader(cfg):
"""Build shader."""
return SHADER.build(cfg) | Build shader. |
14,322 | from typing import Iterable, List, Union
import numpy as np
import torch
import torch.nn as nn
from pytorch3d.renderer import TexturesUV, TexturesVertex
from pytorch3d.renderer.mesh.textures import TexturesBase
from pytorch3d.structures import Meshes, list_to_padded, padded_to_list
from mmhuman3d.models.body_models.builder import SMPL, SMPLX, STAR
from mmhuman3d.utils.mesh_utils import \
join_meshes_as_batch as _join_meshes_as_batch
from .builder import build_renderer
from .textures.textures import TexturesNearest
from .utils import align_input_to_padded
class ParametricMeshes(Meshes):
"""Mesh structure for parametric body models, E.g., smpl, smplx, mano,
flame.
There are 3 ways to initialize the verts:
1): Pass the verts directly as verts_padded (N, V, 3) or verts_list
(list of (N, 3)).
2): Pass body_model and pose_params.
3): Pass meshes. Could be Meshes or ParametricMeshes.
Will use the verts from the meshes.
There are 3 ways to initialize the faces:
1): Pass the faces directly as faces_padded (N, F, 3) or faces_list
(list of (F, 3)).
2): Pass body_model and will use body_model.faces_tensor.
3): Pass meshes. Could be Meshes or ParametricMeshes.
Will use the faces from the meshes.
There are 4 ways to initialize the textures.
1): Pass the textures directly.
2): Pass the texture_images of shape (H, W, 3) for single person or
(_N_individual, H, W, 3) for multi-person. `body_model` should be
passed and should has `uv_renderer`.
3): Pass the vertex_color of shape (3) or (V, 3) or (N, V, 3).
4): Pass meshes. Could be Meshes or ParametricMeshes.
Will use the textures directly from the meshes.
"""
# TODO: More model class to be added (FLAME, MANO)
MODEL_CLASSES = {'smpl': SMPL, 'smplx': SMPLX, 'star': STAR}
def __init__(self,
verts: Union[List[torch.Tensor], torch.Tensor] = None,
faces: Union[List[torch.Tensor], torch.Tensor] = None,
textures: TexturesBase = None,
meshes: Meshes = None,
body_model: Union[nn.Module, dict] = None,
uv_renderer: Union[nn.Module, dict] = None,
vertex_color: Union[Iterable[float], torch.Tensor,
np.ndarray] = ((1, 1, 1), ),
use_nearest: bool = False,
texture_images: Union[torch.Tensor, List[torch.Tensor],
None] = None,
model_type: str = 'smpl',
N_individual_override: int = None,
*,
verts_normals: torch.Tensor = None,
**pose_params) -> None:
if isinstance(meshes, Meshes):
verts = meshes.verts_padded()
faces = meshes.faces_padded()
textures = meshes.textures
self.model_type = body_model._get_name().lower(
) if body_model is not None else model_type
self.model_class = self.MODEL_CLASSES[self.model_type]
use_list = False
# formart verts as verts_padded: (N, V, 3)
if verts is None:
assert body_model is not None
verts = body_model(**pose_params)['vertices']
elif isinstance(verts, list):
verts = list_to_padded(verts)
use_list = True
# specify number of individuals
if N_individual_override is not None:
verts = verts.view(
-1, self.model_class.NUM_VERTS * N_individual_override, 3)
# the information of _N_individual should be revealed in verts's shape
self._N_individual = int(verts.shape[-2] // self.model_class.NUM_VERTS)
assert verts.shape[1] % self.model_class.NUM_VERTS == 0
verts = verts.view(-1, self.model_class.NUM_VERTS * self._N_individual,
3)
device = verts.device
N, V, _ = verts.shape
# formart faces as faces_padded: (N, F, 3)
if isinstance(faces, list):
faces = list_to_padded(faces)
self.face_individual = faces[0][:self.model_class.NUM_FACES].to(
device)
elif faces is None:
assert body_model is not None
self.face_individual = body_model.faces_tensor[None].to(device)
faces = self.get_faces_padded(N, self._N_individual)
elif isinstance(faces, torch.Tensor):
faces = align_input_to_padded(faces, ndim=3, batch_size=N)
self.face_individual = faces[:1, :self.model_class.NUM_FACES].to(
device)
else:
raise ValueError(f'Wrong type of faces: {type(faces)}.')
assert faces.shape == (N,
self.model_class.NUM_FACES * self._N_individual,
3)
F = faces.shape[1]
if textures is None:
if texture_images is None:
# input vertex_color should be
# (3), (1, 3), (1, 1, 3). all the same color
# (V, 3), (1, V, 3), each vertex has a single color
# (N, V, 3), each batch each vertex has a single color
if isinstance(vertex_color, (tuple, list)):
vertex_color = torch.Tensor(vertex_color)
elif isinstance(vertex_color, np.ndarray):
vertex_color = torch.from_numpy(vertex_color)
if vertex_color.numel() == 3:
vertex_color = vertex_color.view(1, 3).repeat(V, 1)
vertex_color = align_input_to_padded(
vertex_color, ndim=3, batch_size=N)
assert vertex_color.shape == verts.shape
if use_nearest:
textures = TexturesNearest(
verts_features=vertex_color).to(device)
else:
textures = TexturesVertex(
verts_features=vertex_color).to(device)
else:
texture_images = align_input_to_padded(
texture_images, ndim=4, batch_size=N).to(device)
assert uv_renderer is not None
if isinstance(uv_renderer, dict):
uv_renderer = build_renderer(uv_renderer)
uv_renderer = uv_renderer.to(device)
textures = uv_renderer.wrap_texture(texture_images).to(device)
if self._N_individual > 1:
textures = textures.join_scene()
textures = textures.extend(N)
num_verts_per_mesh = [V for _ in range(N)]
num_faces_per_mesh = [F for _ in range(N)]
if use_list:
verts = padded_to_list(verts, num_verts_per_mesh)
faces = padded_to_list(faces, num_faces_per_mesh)
super().__init__(
verts=verts,
faces=faces,
textures=textures,
verts_normals=verts_normals,
)
def get_faces_padded(self, N_batch, N_individual):
faces = self.face_individual.repeat(N_batch, N_individual, 1)
faces_offset = torch.arange(N_individual).view(N_individual, 1).repeat(
1, self.model_class.NUM_FACES).view(1, -1, 1).to(faces.device)
faces = faces + faces_offset * self.model_class.NUM_VERTS
return faces
def _compute_list(self):
self._faces_list = self.faces_list()
self._verts_list = self.verts_list()
def extend(self, N_batch: int, N_scene: int = 1):
if N_batch == 1:
meshes_batch = self
else:
meshes_batch = join_meshes_as_batch([self for _ in range(N_batch)])
if N_scene == 1:
meshes = meshes_batch
else:
meshes = join_batch_meshes_as_scene(
[meshes_batch for _ in range(N_scene)])
return meshes
def clone(self):
"""Modified from pytorch3d and add `model_type` in
__class__.__init__."""
verts_list = self.verts_list()
faces_list = self.faces_list()
new_verts_list = [v.clone() for v in verts_list]
new_faces_list = [f.clone() for f in faces_list]
other = self.__class__(
verts=new_verts_list,
faces=new_faces_list,
model_type=self.model_type)
for k in self._INTERNAL_TENSORS:
v = getattr(self, k)
if torch.is_tensor(v):
setattr(other, k, v.clone())
# Textures is not a tensor but has a clone method
if self.textures is not None:
other.textures = self.textures.clone()
return other
def detach(self):
"""Modified from pytorch3d and add `model_type` in
__class__.__init__."""
verts_list = self.verts_list()
faces_list = self.faces_list()
new_verts_list = [v.detach() for v in verts_list]
new_faces_list = [f.detach() for f in faces_list]
other = self.__class__(
verts=new_verts_list,
faces=new_faces_list,
model_type=self.model_type)
for k in self._INTERNAL_TENSORS:
v = getattr(self, k)
if torch.is_tensor(v):
setattr(other, k, v.detach())
# Textures is not a tensor but has a detach method
if self.textures is not None:
other.textures = self.textures.detach()
return other
def update_padded(self, new_verts_padded: torch.Tensor):
"""Modified from pytorch3d and add `model_type` in
__class__.__init__."""
def check_shapes(x, size):
if x.shape[0] != size[0]:
raise ValueError('new values must have the same batch size.')
if x.shape[1] != size[1]:
raise ValueError(
'new values must have the same number of points.')
if x.shape[2] != size[2]:
raise ValueError('new values must have the same dimension.')
check_shapes(new_verts_padded, [self._N, self._V, 3])
new = self.__class__(
verts=new_verts_padded,
faces=self.faces_padded(),
model_type=self.model_type)
if new._N != self._N or new._V != self._V or new._F != self._F:
raise ValueError('Inconsistent sizes after construction.')
# overwrite the equisized flag
new.equisized = self.equisized
# overwrite textures if any
new.textures = self.textures
# copy auxiliary tensors
copy_tensors = ['_num_verts_per_mesh', '_num_faces_per_mesh', 'valid']
for k in copy_tensors:
v = getattr(self, k)
if torch.is_tensor(v):
setattr(new, k, v) # shallow copy
# shallow copy of faces_list if any, st new.faces_list()
# does not re-compute from _faces_padded
new._faces_list = self._faces_list
# update verts/faces packed if they are computed in self
if self._verts_packed is not None:
copy_tensors = [
'_faces_packed',
'_verts_packed_to_mesh_idx',
'_faces_packed_to_mesh_idx',
'_mesh_to_verts_packed_first_idx',
'_mesh_to_faces_packed_first_idx',
]
for k in copy_tensors:
v = getattr(self, k)
assert torch.is_tensor(v)
setattr(new, k, v) # shallow copy
# update verts_packed
pad_to_packed = self.verts_padded_to_packed_idx()
new_verts_packed = new_verts_padded.reshape(-1,
3)[pad_to_packed, :]
new._verts_packed = new_verts_packed
new._verts_padded_to_packed_idx = pad_to_packed
# update edges packed if they are computed in self
if self._edges_packed is not None:
copy_tensors = [
'_edges_packed',
'_edges_packed_to_mesh_idx',
'_mesh_to_edges_packed_first_idx',
'_faces_packed_to_edges_packed',
'_num_edges_per_mesh',
]
for k in copy_tensors:
v = getattr(self, k)
assert torch.is_tensor(v)
setattr(new, k, v) # shallow copy
# update laplacian if it is compute in self
if self._laplacian_packed is not None:
new._laplacian_packed = self._laplacian_packed
assert new._verts_list is None
assert new._verts_normals_packed is None
assert new._faces_normals_packed is None
assert new._faces_areas_packed is None
return new
def __getitem__(self, index: Union[tuple, int, list, slice, torch.Tensor]):
"""Slice the meshes by the batch dim like pytorch3d Meshes. And slice
by scene dim due to the topology of the parametric meshes.
Args:
index (Union[tuple, int, list, slice, torch.Tensor]): indexes, if
pass only one augment, will ignore the scene dim.
"""
if isinstance(index, tuple):
batch_index, individual_index = index
else:
batch_index, individual_index = index, None
if isinstance(batch_index, int):
batch_index = [batch_index]
elif isinstance(batch_index, (tuple, list, slice)):
batch_index = torch.arange(self._N)[batch_index]
batch_index = torch.tensor(batch_index) if not isinstance(
batch_index, torch.Tensor) else batch_index
batch_index = batch_index.to(self.device, dtype=torch.long)
if (batch_index >= self._N).any():
raise IndexError('list index out of range')
if individual_index is None:
return self.__class__(
verts=self.verts_padded()[batch_index],
faces=self.faces_padded()[batch_index],
textures=self.textures[batch_index]
if self.textures is not None else None,
model_type=self.model_type)
if isinstance(individual_index, int):
individual_index = [individual_index]
elif isinstance(individual_index, (tuple, list, slice)):
individual_index = torch.arange(
self._N_individual)[individual_index]
individual_index = torch.tensor(individual_index) if not isinstance(
individual_index, torch.Tensor) else individual_index
if (individual_index > self._N_individual).any():
raise IndexError('list index out of range')
vertex_index = [
torch.arange(self.model_class.NUM_VERTS) +
idx * self.model_class.NUM_VERTS for idx in individual_index
]
vertex_index = torch.cat(vertex_index).to(self.device).long()
new_face_num = self.model_class.NUM_FACES * len(individual_index)
verts_padded = self.verts_padded()[batch_index][:, vertex_index]
faces_padded = self.get_faces_padded(
len(verts_padded), len(individual_index))
textures_batch = self.textures[batch_index]
if isinstance(textures_batch, TexturesUV):
# TODO: there is still some problem with `TexturesUV`
# slice and need to fix the function `join_meshes_as_scene`.
# It is recommended that we re-inplement the `TexturesUV`
# as `ParametricTexturesUV`, mainly for the `__getitem__`
# and `join_scene` functions.
# textures_batch.get('unique_map_index ')
# This version only consider the maps tensor as different id.
maps = textures_batch.maps_padded()
width_individual = maps.shape[-2] // self._N_individual
maps_index = [
torch.arange(width_individual * idx,
width_individual * (idx + 1))
for idx in individual_index
]
maps_index = torch.cat(maps_index).to(self.device)
verts_uvs_padded = textures_batch.verts_uvs_padded(
)[:, :len(vertex_index)] * torch.Tensor([
self._N_individual / len(individual_index), 1
]).view(1, 1, 2).to(self.device)
faces_uvs_padded = textures_batch.faces_uvs_padded(
)[:, :new_face_num]
maps_padded = maps[:, :, maps_index]
textures = TexturesUV(
faces_uvs=faces_uvs_padded,
verts_uvs=verts_uvs_padded,
maps=maps_padded)
elif isinstance(textures_batch, (TexturesVertex, TexturesNearest)):
verts_features_padded = textures_batch.verts_features_padded(
)[:, vertex_index]
textures = textures_batch.__class__(verts_features_padded)
meshes = self.__class__(
verts=verts_padded,
faces=faces_padded,
textures=textures,
model_type=self.model_type)
return meshes
def shape(self, ):
return (len(self), self._N_individual)
def join_meshes_as_batch(meshes: List[ParametricMeshes],
include_textures: bool = True) -> ParametricMeshes:
"""Join the meshes along the batch dim.
Args:
meshes (Union[ParametricMeshes, List[ParametricMeshes, Meshes,
List[Meshes]]]): Meshes object that contains a batch of meshes,
or a list of Meshes objects.
include_textures (bool, optional): whether to try to join the textures.
Defaults to True.
Returns:
ParametricMeshes: the joined ParametricMeshes.
"""
if isinstance(meshes, ParametricMeshes):
raise ValueError('Wrong first argument to join_meshes_as_batch.')
first = meshes[0]
assert all(mesh.model_type == first.model_type
for mesh in meshes), 'model_type should all be the same.'
meshes = _join_meshes_as_batch(meshes, include_textures=include_textures)
return ParametricMeshes(model_type=first.model_type, meshes=meshes)
def join_meshes_as_scene(meshes: Union[ParametricMeshes,
List[ParametricMeshes]],
include_textures: bool = True) -> ParametricMeshes:
"""Join the meshes along the scene dim.
Args:
meshes (Union[ParametricMeshes, List[ParametricMeshes]]):
ParametricMeshes object that contains a batch of meshes,
or a list of ParametricMeshes objects.
include_textures (bool, optional): whether to try to join the textures.
Defaults to True.
Returns:
ParametricMeshes: the joined ParametricMeshes.
"""
first = meshes[0]
assert all(mesh.model_type == first.model_type
for mesh in meshes), 'model_type should all be the same.'
if isinstance(meshes, List):
meshes = join_meshes_as_batch(
meshes, include_textures=include_textures)
if len(meshes) == 1:
return meshes
verts = meshes.verts_packed() # (sum(V_n), 3)
# Offset automatically done by faces_packed
faces = meshes.faces_packed() # (sum(F_n), 3)
textures = None
if include_textures and meshes.textures is not None:
textures = meshes.textures.join_scene()
mesh = ParametricMeshes(
verts=verts.unsqueeze(0),
faces=faces.unsqueeze(0),
textures=textures,
model_type=first.model_type)
return mesh
The provided code snippet includes necessary dependencies for implementing the `join_batch_meshes_as_scene` function. Write a Python function `def join_batch_meshes_as_scene( meshes: List[ParametricMeshes], include_textures: bool = True) -> ParametricMeshes` to solve the following problem:
Join `meshes` as a scene each batch. For ParametricMeshes. The Meshes must share the same batch size, and topology could be different. They must all be on the same device. If `include_textures` is true, the textures should be the same type, all be None is not accepted. If `include_textures` is False, textures are ignored. The return meshes will have no textures. Args: meshes (List[ParametricMeshes]): Meshes object that contains a list of Meshes objects. include_textures (bool, optional): whether to try to join the textures. Defaults to True. Returns: New Meshes which has join different Meshes by each batch.
Here is the function:
def join_batch_meshes_as_scene(
meshes: List[ParametricMeshes],
include_textures: bool = True) -> ParametricMeshes:
"""Join `meshes` as a scene each batch. For ParametricMeshes. The Meshes
must share the same batch size, and topology could be different. They must
all be on the same device. If `include_textures` is true, the textures
should be the same type, all be None is not accepted. If `include_textures`
is False, textures are ignored. The return meshes will have no textures.
Args:
meshes (List[ParametricMeshes]): Meshes object that contains a list of
Meshes objects.
include_textures (bool, optional): whether to try to join the textures.
Defaults to True.
Returns:
New Meshes which has join different Meshes by each batch.
"""
first = meshes[0]
assert all(mesh.model_type == first.model_type
for mesh in meshes), 'model_type should all be the same.'
assert all(len(mesh) == len(first) for mesh in meshes)
if not all(mesh.shape[1] == first.shape[1] for mesh in meshes):
meshes_temp = []
for mesh_scene in meshes:
meshes_temp.extend([
mesh_scene[:, individual_index]
for individual_index in range(mesh_scene._N_individual)
])
meshes = meshes_temp
for mesh in meshes:
mesh._verts_list = padded_to_list(mesh.verts_padded(),
mesh.num_verts_per_mesh().tolist())
num_scene_size = len(meshes)
num_batch_size = len(meshes[0])
meshes_all = []
for j in range(num_batch_size):
meshes_batch = []
for i in range(num_scene_size):
meshes_batch.append(meshes[i][j])
meshes_all.append(join_meshes_as_scene(meshes_batch, include_textures))
meshes_final = join_meshes_as_batch(meshes_all, include_textures)
return meshes_final | Join `meshes` as a scene each batch. For ParametricMeshes. The Meshes must share the same batch size, and topology could be different. They must all be on the same device. If `include_textures` is true, the textures should be the same type, all be None is not accepted. If `include_textures` is False, textures are ignored. The return meshes will have no textures. Args: meshes (List[ParametricMeshes]): Meshes object that contains a list of Meshes objects. include_textures (bool, optional): whether to try to join the textures. Defaults to True. Returns: New Meshes which has join different Meshes by each batch. |
14,323 | import io
import os
import shutil
from pathlib import Path
from typing import Iterable, List, Optional, Union
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from mmhuman3d.core.conventions.cameras.convert_convention import \
enc_camera_convention
from mmhuman3d.utils.demo_utils import get_different_colors
from mmhuman3d.utils.ffmpeg_utils import images_to_video
from mmhuman3d.utils.path_utils import check_path_suffix
The provided code snippet includes necessary dependencies for implementing the `_set_new_pose` function. Write a Python function `def _set_new_pose(pose_np, sign, axis)` to solve the following problem:
set new pose with axis convention.
Here is the function:
def _set_new_pose(pose_np, sign, axis):
"""set new pose with axis convention."""
target_sign = [-1, 1, -1]
target_axis = ['x', 'z', 'y']
pose_rearrange_axis_result = pose_np.copy()
for axis_index, axis_name in enumerate(target_axis):
src_axis_index = axis.index(axis_name)
pose_rearrange_axis_result[..., axis_index] = \
pose_np[..., src_axis_index]
for dim_index in range(pose_rearrange_axis_result.shape[-1]):
pose_rearrange_axis_result[
..., dim_index] = sign[dim_index] / target_sign[
dim_index] * pose_rearrange_axis_result[..., dim_index]
return pose_rearrange_axis_result | set new pose with axis convention. |
14,324 | import io
import os
import shutil
from pathlib import Path
from typing import Iterable, List, Optional, Union
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from mmhuman3d.core.conventions.cameras.convert_convention import \
enc_camera_convention
from mmhuman3d.utils.demo_utils import get_different_colors
from mmhuman3d.utils.ffmpeg_utils import images_to_video
from mmhuman3d.utils.path_utils import check_path_suffix
The provided code snippet includes necessary dependencies for implementing the `_plot_line_on_fig` function. Write a Python function `def _plot_line_on_fig(ax, point1_location, point2_location, color, linewidth=1)` to solve the following problem:
Draw line on fig with matplotlib.
Here is the function:
def _plot_line_on_fig(ax,
point1_location,
point2_location,
color,
linewidth=1):
"""Draw line on fig with matplotlib."""
ax.plot([point1_location[0], point2_location[0]],
[point1_location[1], point2_location[1]],
[point1_location[2], point2_location[2]],
color=color,
linewidth=linewidth)
return ax | Draw line on fig with matplotlib. |
14,325 | import io
import os
import shutil
from pathlib import Path
from typing import Iterable, List, Optional, Union
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from mmhuman3d.core.conventions.cameras.convert_convention import \
enc_camera_convention
from mmhuman3d.utils.demo_utils import get_different_colors
from mmhuman3d.utils.ffmpeg_utils import images_to_video
from mmhuman3d.utils.path_utils import check_path_suffix
The provided code snippet includes necessary dependencies for implementing the `_get_cv2mat_from_buf` function. Write a Python function `def _get_cv2mat_from_buf(fig, dpi=180)` to solve the following problem:
Get numpy image from IO.
Here is the function:
def _get_cv2mat_from_buf(fig, dpi=180):
"""Get numpy image from IO."""
buf = io.BytesIO()
fig.savefig(buf, format='png', dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img | Get numpy image from IO. |
14,326 | import torch
def vis_z_buffer(z, percentile=1, vis_pad=0.2):
z = z[:, :, 0]
mask = z > 1e-5
if torch.sum(mask) == 0:
z[...] = 0
else:
vmin = torch.quantile(z[mask], percentile / 100)
vmax = torch.quantile(z[mask], 1 - percentile / 100)
pad = (vmax - vmin) * vis_pad
vmin_padded = vmin - pad
vmax_padded = vmax + pad
z[mask] = vmin + vmax - z[mask]
z = (z - vmin_padded) / (vmax_padded - vmin_padded)
z = torch.clip(torch.round(z * 255), 0, 255)
z_cpu = z.to(dtype=torch.uint8).detach().cpu().numpy()
return z_cpu | null |
14,327 | import torch
def vis_normals(coords, normals, vis_pad=0.2):
mask = coords[:, :, 2] > 0
coords_masked = -coords[mask]
normals_masked = normals[mask]
coords_len = torch.sqrt(torch.sum(coords_masked**2, dim=1))
dot = torch.sum(coords_masked * normals_masked, dim=1) / coords_len
h, w = normals.shape[:2]
vis = torch.zeros((h, w), dtype=coords.dtype, device=coords.device)
vis[mask] = torch.clamp(dot, 0, 1) * (1 - 2 * vis_pad) + vis_pad
vis = (vis * 255).to(dtype=torch.uint8)
return vis | null |
14,328 | import torch
The provided code snippet includes necessary dependencies for implementing the `estimate_normals` function. Write a Python function `def estimate_normals(vertices, faces, pinhole, vertices_filter=None)` to solve the following problem:
Estimate the vertices normals with the specified faces and camera. Args: vertices (torch.tensor): Shape should be (num_verts, 3). faces (torch.tensor): The faces of the vertices. pinhole (object): The object of the camera. Returns: coords (torch.tensor): The estimated coordinates. normals (torch.tensor): The estimated normals.
Here is the function:
def estimate_normals(vertices, faces, pinhole, vertices_filter=None):
"""Estimate the vertices normals with the specified faces and camera.
Args:
vertices (torch.tensor): Shape should be (num_verts, 3).
faces (torch.tensor): The faces of the vertices.
pinhole (object): The object of the camera.
Returns:
coords (torch.tensor): The estimated coordinates.
normals (torch.tensor): The estimated normals.
"""
if vertices_filter is None:
assert torch.is_tensor(vertices)
assert vertices.is_cuda
assert len(vertices.shape) == 2
n = vertices.shape[0]
vertices_filter = torch.ones((n),
dtype=torch.uint8,
device=vertices.device)
vertices = vertices.contiguous()
vertices_ndc = pinhole.project_ndc(vertices)
coords, normals = estimate_normals_cuda(vertices_ndc, faces, vertices,
vertices_filter, pinhole.h,
pinhole.w)
return coords, normals | Estimate the vertices normals with the specified faces and camera. Args: vertices (torch.tensor): Shape should be (num_verts, 3). faces (torch.tensor): The faces of the vertices. pinhole (object): The object of the camera. Returns: coords (torch.tensor): The estimated coordinates. normals (torch.tensor): The estimated normals. |
14,329 | import torch
The provided code snippet includes necessary dependencies for implementing the `project_mesh` function. Write a Python function `def project_mesh(vertices, faces, vertice_values, pinhole, vertices_filter=None)` to solve the following problem:
Project mesh to the image plane with the specified faces and camera. Args: vertices (torch.tensor): Shape should be (num_verts, 3). faces (torch.tensor): The faces of the vertices. vertice_values (torch.tensor): The depth of the each vertex. pinhole (object): The object of the camera. Returns: torch.tensor: The projected mesh.
Here is the function:
def project_mesh(vertices,
faces,
vertice_values,
pinhole,
vertices_filter=None):
"""Project mesh to the image plane with the specified faces and camera.
Args:
vertices (torch.tensor): Shape should be (num_verts, 3).
faces (torch.tensor): The faces of the vertices.
vertice_values (torch.tensor): The depth of the each vertex.
pinhole (object): The object of the camera.
Returns:
torch.tensor: The projected mesh.
"""
if vertices_filter is None:
assert torch.is_tensor(vertices)
assert vertices.is_cuda
assert len(vertices.shape) == 2
n = vertices.shape[0]
vertices_filter = torch.ones((n),
dtype=torch.uint8,
device=vertices.device)
vertices = vertices.contiguous()
vertices_ndc = pinhole.project_ndc(vertices)
return project_mesh_cuda(vertices_ndc, faces, vertice_values,
vertices_filter, pinhole.h, pinhole.w) | Project mesh to the image plane with the specified faces and camera. Args: vertices (torch.tensor): Shape should be (num_verts, 3). faces (torch.tensor): The faces of the vertices. vertice_values (torch.tensor): The depth of the each vertex. pinhole (object): The object of the camera. Returns: torch.tensor: The projected mesh. |
14,330 | import numpy as np
import trimesh
from trimesh.proximity import closest_point
from .mesh_eval import compute_similarity_transform
def compute_similarity_transform(source_points,
target_points,
return_tform=False):
"""Computes a similarity transform (sR, t) that takes a set of 3D points
source_points (N x 3) closest to a set of 3D points target_points, where R
is an 3x3 rotation matrix, t 3x1 translation, s scale.
And return the
transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal
Procrutes problem.
Notes:
Points number: N
Args:
source_points (np.ndarray([N, 3])): Source point set.
target_points (np.ndarray([N, 3])): Target point set.
return_tform (bool) : Whether return transform
Returns:
source_points_hat (np.ndarray([N, 3])): Transformed source point set.
transform (dict): Returns if return_tform is True.
Returns rotation: r, 'scale': s, 'translation':t.
"""
assert target_points.shape[0] == source_points.shape[0]
assert target_points.shape[1] == 3 and source_points.shape[1] == 3
source_points = source_points.T
target_points = target_points.T
# 1. Remove mean.
mu1 = source_points.mean(axis=1, keepdims=True)
mu2 = target_points.mean(axis=1, keepdims=True)
X1 = source_points - mu1
X2 = target_points - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, _, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale * (R.dot(mu1))
# 7. Transform the source points:
source_points_hat = scale * R.dot(source_points) + t
source_points_hat = source_points_hat.T
if return_tform:
return source_points_hat, {
'rotation': R,
'scale': scale,
'translation': t
}
return source_points_hat
The provided code snippet includes necessary dependencies for implementing the `keypoint_mpjpe` function. Write a Python function `def keypoint_mpjpe(pred, gt, mask, alignment='none')` to solve the following problem:
Calculate the mean per-joint position error (MPJPE) and the error after rigid alignment with the ground truth (PA-MPJPE). batch_size: N num_keypoints: K keypoint_dims: C Args: pred (np.ndarray[N, K, C]): Predicted keypoint location. gt (np.ndarray[N, K, C]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. alignment (str, optional): method to align the prediction with the groundtruth. Supported options are: - ``'none'``: no alignment will be applied - ``'scale'``: align in the least-square sense in scale - ``'procrustes'``: align in the least-square sense in scale, rotation and translation. Returns: tuple: A tuple containing joint position errors - mpjpe (float|np.ndarray[N]): mean per-joint position error. - pa-mpjpe (float|np.ndarray[N]): mpjpe after rigid alignment with the ground truth
Here is the function:
def keypoint_mpjpe(pred, gt, mask, alignment='none'):
"""Calculate the mean per-joint position error (MPJPE) and the error after
rigid alignment with the ground truth (PA-MPJPE).
batch_size: N
num_keypoints: K
keypoint_dims: C
Args:
pred (np.ndarray[N, K, C]): Predicted keypoint location.
gt (np.ndarray[N, K, C]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
alignment (str, optional): method to align the prediction with the
groundtruth. Supported options are:
- ``'none'``: no alignment will be applied
- ``'scale'``: align in the least-square sense in scale
- ``'procrustes'``: align in the least-square sense in scale,
rotation and translation.
Returns:
tuple: A tuple containing joint position errors
- mpjpe (float|np.ndarray[N]): mean per-joint position error.
- pa-mpjpe (float|np.ndarray[N]): mpjpe after rigid alignment with the
ground truth
"""
assert mask.any()
if alignment == 'none':
pass
elif alignment == 'procrustes':
pred = np.stack([
compute_similarity_transform(pred_i, gt_i)
for pred_i, gt_i in zip(pred, gt)
])
elif alignment == 'scale':
pred_dot_pred = np.einsum('nkc,nkc->n', pred, pred)
pred_dot_gt = np.einsum('nkc,nkc->n', pred, gt)
scale_factor = pred_dot_gt / pred_dot_pred
pred = pred * scale_factor[:, None, None]
else:
raise ValueError(f'Invalid value for alignment: {alignment}')
error = np.linalg.norm(pred - gt, ord=2, axis=-1)[mask].mean()
return error | Calculate the mean per-joint position error (MPJPE) and the error after rigid alignment with the ground truth (PA-MPJPE). batch_size: N num_keypoints: K keypoint_dims: C Args: pred (np.ndarray[N, K, C]): Predicted keypoint location. gt (np.ndarray[N, K, C]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. alignment (str, optional): method to align the prediction with the groundtruth. Supported options are: - ``'none'``: no alignment will be applied - ``'scale'``: align in the least-square sense in scale - ``'procrustes'``: align in the least-square sense in scale, rotation and translation. Returns: tuple: A tuple containing joint position errors - mpjpe (float|np.ndarray[N]): mean per-joint position error. - pa-mpjpe (float|np.ndarray[N]): mpjpe after rigid alignment with the ground truth |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.