id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
159,136 | import mimetypes
import os
import time
from argparse import ArgumentParser
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmpose.apis import inference_bottomup, init_model
from mmpose.registry import VISUALIZERS
from mmpose.structures import split_instances
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--input', type=str, default='', help='Image/Video file')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--output-root',
type=str,
default='',
help='root of the output img file. '
'Default not saving the visualization images.')
parser.add_argument(
'--save-predictions',
action='store_true',
default=False,
help='whether to save predicted results')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--draw-heatmap',
action='store_true',
help='Visualize the predicted heatmap')
parser.add_argument(
'--show-kpt-idx',
action='store_true',
default=False,
help='Whether to show the index of keypoints')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
parser.add_argument(
'--radius',
type=int,
default=3,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
parser.add_argument(
'--show-interval', type=int, default=0, help='Sleep seconds per frame')
args = parser.parse_args()
return args | null |
159,137 | from argparse import ArgumentParser
from typing import Dict
from mmpose.apis.inferencers import MMPoseInferencer, get_model_aliases
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs',
type=str,
nargs='?',
help='Input image/video path or folder path.')
parser.add_argument(
'--pose2d',
type=str,
default=None,
help='Pretrained 2D pose estimation algorithm. It\'s the path to the '
'config file or the model name defined in metafile.')
parser.add_argument(
'--pose2d-weights',
type=str,
default=None,
help='Path to the custom checkpoint file of the selected pose model. '
'If it is not specified and "pose2d" is a model name of metafile, '
'the weights will be loaded from metafile.')
parser.add_argument(
'--pose3d',
type=str,
default=None,
help='Pretrained 3D pose estimation algorithm. It\'s the path to the '
'config file or the model name defined in metafile.')
parser.add_argument(
'--pose3d-weights',
type=str,
default=None,
help='Path to the custom checkpoint file of the selected pose model. '
'If it is not specified and "pose3d" is a model name of metafile, '
'the weights will be loaded from metafile.')
parser.add_argument(
'--det-model',
type=str,
default=None,
help='Config path or alias of detection model.')
parser.add_argument(
'--det-weights',
type=str,
default=None,
help='Path to the checkpoints of detection model.')
parser.add_argument(
'--det-cat-ids',
type=int,
nargs='+',
default=0,
help='Category id for detection model.')
parser.add_argument(
'--scope',
type=str,
default='mmpose',
help='Scope where modules are defined.')
parser.add_argument(
'--device',
type=str,
default=None,
help='Device used for inference. '
'If not specified, the available device will be automatically used.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image/video in a popup window.')
parser.add_argument(
'--draw-bbox',
action='store_true',
help='Whether to draw the bounding boxes.')
parser.add_argument(
'--draw-heatmap',
action='store_true',
default=False,
help='Whether to draw the predicted heatmaps.')
parser.add_argument(
'--bbox-thr',
type=float,
default=0.3,
help='Bounding box score threshold')
parser.add_argument(
'--nms-thr',
type=float,
default=0.3,
help='IoU threshold for bounding box NMS')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
parser.add_argument(
'--tracking-thr', type=float, default=0.3, help='Tracking threshold')
parser.add_argument(
'--use-oks-tracking',
action='store_true',
help='Whether to use OKS as similarity in tracking')
parser.add_argument(
'--norm-pose-2d',
action='store_true',
help='Scale the bbox (along with the 2D pose) to the average bbox '
'scale of the dataset, and move the bbox (along with the 2D pose) to '
'the average bbox center of the dataset. This is useful when bbox '
'is small, especially in multi-person scenarios.')
parser.add_argument(
'--rebase-keypoint-height',
action='store_true',
help='Rebase the predicted 3D pose so its lowest keypoint has a '
'height of 0 (landing on the ground). This is useful for '
'visualization when the model do not predict the global position '
'of the 3D pose.')
parser.add_argument(
'--radius',
type=int,
default=3,
help='Keypoint radius for visualization.')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization.')
parser.add_argument(
'--skeleton-style',
default='mmpose',
type=str,
choices=['mmpose', 'openpose'],
help='Skeleton style selection')
parser.add_argument(
'--black-background',
action='store_true',
help='Plot predictions on a black image')
parser.add_argument(
'--vis-out-dir',
type=str,
default='',
help='Directory for saving visualized results.')
parser.add_argument(
'--pred-out-dir',
type=str,
default='',
help='Directory for saving inference results.')
parser.add_argument(
'--show-alias',
action='store_true',
help='Display all the available model aliases.')
call_args = vars(parser.parse_args())
init_kws = [
'pose2d', 'pose2d_weights', 'scope', 'device', 'det_model',
'det_weights', 'det_cat_ids', 'pose3d', 'pose3d_weights'
]
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
diaplay_alias = call_args.pop('show_alias')
return init_args, call_args, diaplay_alias | null |
159,138 | from argparse import ArgumentParser
from typing import Dict
from mmpose.apis.inferencers import MMPoseInferencer, get_model_aliases
The provided code snippet includes necessary dependencies for implementing the `display_model_aliases` function. Write a Python function `def display_model_aliases(model_aliases: Dict[str, str]) -> None` to solve the following problem:
Display the available model aliases and their corresponding model names.
Here is the function:
def display_model_aliases(model_aliases: Dict[str, str]) -> None:
"""Display the available model aliases and their corresponding model
names."""
aliases = list(model_aliases.keys())
max_alias_length = max(map(len, aliases))
print(f'{"ALIAS".ljust(max_alias_length+2)}MODEL_NAME')
for alias in sorted(aliases):
print(f'{alias.ljust(max_alias_length+2)}{model_aliases[alias]}') | Display the available model aliases and their corresponding model names. |
159,139 | import mimetypes
import os
import time
from argparse import ArgumentParser
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmpose.apis import inference_topdown
from mmpose.apis import init_model as init_pose_estimator
from mmpose.evaluation.functional import nms
from mmpose.registry import VISUALIZERS
from mmpose.structures import merge_data_samples, split_instances
from mmpose.utils import adapt_mmdet_pipeline
The provided code snippet includes necessary dependencies for implementing the `process_one_image` function. Write a Python function `def process_one_image(args, img, detector, pose_estimator, visualizer=None, show_interval=0)` to solve the following problem:
Visualize predicted keypoints (and heatmaps) of one image.
Here is the function:
def process_one_image(args,
img,
detector,
pose_estimator,
visualizer=None,
show_interval=0):
"""Visualize predicted keypoints (and heatmaps) of one image."""
# predict bbox
det_result = inference_detector(detector, img)
pred_instance = det_result.pred_instances.cpu().numpy()
bboxes = np.concatenate(
(pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)
bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id,
pred_instance.scores > args.bbox_thr)]
bboxes = bboxes[nms(bboxes, args.nms_thr), :4]
# predict keypoints
pose_results = inference_topdown(pose_estimator, img, bboxes)
data_samples = merge_data_samples(pose_results)
# show the results
if isinstance(img, str):
img = mmcv.imread(img, channel_order='rgb')
elif isinstance(img, np.ndarray):
img = mmcv.bgr2rgb(img)
if visualizer is not None:
visualizer.add_datasample(
'result',
img,
data_sample=data_samples,
draw_gt=False,
draw_heatmap=args.draw_heatmap,
draw_bbox=args.draw_bbox,
show_kpt_idx=args.show_kpt_idx,
skeleton_style=args.skeleton_style,
show=args.show,
wait_time=show_interval,
kpt_thr=args.kpt_thr)
# if there is no instance detected, return None
return data_samples.get('pred_instances', None) | Visualize predicted keypoints (and heatmaps) of one image. |
159,140 | import mimetypes
import os
import time
from argparse import ArgumentParser
from functools import partial
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmengine.structures import InstanceData
from mmpose.apis import (_track_by_iou, _track_by_oks, collect_multi_frames,
convert_keypoint_definition, extract_pose_sequence,
inference_pose_lifter_model, inference_topdown,
init_model)
from mmpose.models.pose_estimators import PoseLifter
from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator
from mmpose.registry import VISUALIZERS
from mmpose.structures import (PoseDataSample, merge_data_samples,
split_instances)
from mmpose.utils import adapt_mmdet_pipeline
def parse_args():
parser = ArgumentParser()
parser.add_argument('det_config', help='Config file for detection')
parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
parser.add_argument(
'pose_estimator_config',
type=str,
default=None,
help='Config file for the 1st stage 2D pose estimator')
parser.add_argument(
'pose_estimator_checkpoint',
type=str,
default=None,
help='Checkpoint file for the 1st stage 2D pose estimator')
parser.add_argument(
'pose_lifter_config',
help='Config file for the 2nd stage pose lifter model')
parser.add_argument(
'pose_lifter_checkpoint',
help='Checkpoint file for the 2nd stage pose lifter model')
parser.add_argument('--input', type=str, default='', help='Video path')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='Whether to show visualizations')
parser.add_argument(
'--rebase-keypoint-height',
action='store_true',
help='Rebase the predicted 3D pose so its lowest keypoint has a '
'height of 0 (landing on the ground). This is useful for '
'visualization when the model do not predict the global position '
'of the 3D pose.')
parser.add_argument(
'--norm-pose-2d',
action='store_true',
help='Scale the bbox (along with the 2D pose) to the average bbox '
'scale of the dataset, and move the bbox (along with the 2D pose) to '
'the average bbox center of the dataset. This is useful when bbox '
'is small, especially in multi-person scenarios.')
parser.add_argument(
'--num-instances',
type=int,
default=-1,
help='The number of 3D poses to be visualized in every frame. If '
'less than 0, it will be set to the number of pose results in the '
'first frame.')
parser.add_argument(
'--output-root',
type=str,
default='',
help='Root of the output video file. '
'Default not saving the visualization video.')
parser.add_argument(
'--save-predictions',
action='store_true',
default=False,
help='whether to save predicted results')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--det-cat-id',
type=int,
default=0,
help='Category id for bounding box detection model')
parser.add_argument(
'--bbox-thr',
type=float,
default=0.9,
help='Bounding box score threshold')
parser.add_argument('--kpt-thr', type=float, default=0.3)
parser.add_argument(
'--use-oks-tracking', action='store_true', help='Using OKS tracking')
parser.add_argument(
'--tracking-thr', type=float, default=0.3, help='Tracking threshold')
parser.add_argument(
'--show-interval', type=int, default=0, help='Sleep seconds per frame')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
parser.add_argument(
'--radius',
type=int,
default=3,
help='Keypoint radius for visualization')
parser.add_argument(
'--use-multi-frames',
action='store_true',
default=False,
help='whether to use multi frames for inference in the 2D pose'
'detection stage. Default: False.')
args = parser.parse_args()
return args | null |
159,141 | import mimetypes
import os
import time
from argparse import ArgumentParser
from functools import partial
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmengine.structures import InstanceData
from mmpose.apis import (_track_by_iou, _track_by_oks, collect_multi_frames,
convert_keypoint_definition, extract_pose_sequence,
inference_pose_lifter_model, inference_topdown,
init_model)
from mmpose.models.pose_estimators import PoseLifter
from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator
from mmpose.registry import VISUALIZERS
from mmpose.structures import (PoseDataSample, merge_data_samples,
split_instances)
from mmpose.utils import adapt_mmdet_pipeline
def get_area(results):
for i, data_sample in enumerate(results):
pred_instance = data_sample.pred_instances.cpu().numpy()
if 'bboxes' in pred_instance:
bboxes = pred_instance.bboxes
results[i].pred_instances.set_field(
np.array([(bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
for bbox in bboxes]), 'areas')
else:
keypoints = pred_instance.keypoints
areas, bboxes = [], []
for keypoint in keypoints:
xmin = np.min(keypoint[:, 0][keypoint[:, 0] > 0], initial=1e10)
xmax = np.max(keypoint[:, 0])
ymin = np.min(keypoint[:, 1][keypoint[:, 1] > 0], initial=1e10)
ymax = np.max(keypoint[:, 1])
areas.append((xmax - xmin) * (ymax - ymin))
bboxes.append([xmin, ymin, xmax, ymax])
results[i].pred_instances.areas = np.array(areas)
results[i].pred_instances.bboxes = np.array(bboxes)
return results
def get_pose_est_results(args, pose_estimator, frame, bboxes,
pose_est_results_last, next_id, pose_lift_dataset):
pose_det_dataset = pose_estimator.cfg.test_dataloader.dataset
# make person results for current image
pose_est_results = inference_topdown(pose_estimator, frame, bboxes)
pose_est_results = get_area(pose_est_results)
if args.use_oks_tracking:
_track = partial(_track_by_oks)
else:
_track = _track_by_iou
for i, result in enumerate(pose_est_results):
track_id, pose_est_results_last, match_result = _track(
result, pose_est_results_last, args.tracking_thr)
if track_id == -1:
pred_instances = result.pred_instances.cpu().numpy()
keypoints = pred_instances.keypoints
if np.count_nonzero(keypoints[:, :, 1]) >= 3:
pose_est_results[i].set_field(next_id, 'track_id')
next_id += 1
else:
# If the number of keypoints detected is small,
# delete that person instance.
keypoints[:, :, 1] = -10
pose_est_results[i].pred_instances.set_field(
keypoints, 'keypoints')
bboxes = pred_instances.bboxes * 0
pose_est_results[i].pred_instances.set_field(bboxes, 'bboxes')
pose_est_results[i].set_field(-1, 'track_id')
pose_est_results[i].set_field(pred_instances, 'pred_instances')
else:
pose_est_results[i].set_field(track_id, 'track_id')
del match_result
pose_est_results_converted = []
for pose_est_result in pose_est_results:
pose_est_result_converted = PoseDataSample()
gt_instances = InstanceData()
pred_instances = InstanceData()
for k in pose_est_result.gt_instances.keys():
gt_instances.set_field(pose_est_result.gt_instances[k], k)
for k in pose_est_result.pred_instances.keys():
pred_instances.set_field(pose_est_result.pred_instances[k], k)
pose_est_result_converted.gt_instances = gt_instances
pose_est_result_converted.pred_instances = pred_instances
pose_est_result_converted.track_id = pose_est_result.track_id
keypoints = convert_keypoint_definition(pred_instances.keypoints,
pose_det_dataset['type'],
pose_lift_dataset['type'])
pose_est_result_converted.pred_instances.keypoints = keypoints
pose_est_results_converted.append(pose_est_result_converted)
return pose_est_results, pose_est_results_converted, next_id | null |
159,142 | import mimetypes
import os
import time
from argparse import ArgumentParser
from functools import partial
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmengine.structures import InstanceData
from mmpose.apis import (_track_by_iou, _track_by_oks, collect_multi_frames,
convert_keypoint_definition, extract_pose_sequence,
inference_pose_lifter_model, inference_topdown,
init_model)
from mmpose.models.pose_estimators import PoseLifter
from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator
from mmpose.registry import VISUALIZERS
from mmpose.structures import (PoseDataSample, merge_data_samples,
split_instances)
from mmpose.utils import adapt_mmdet_pipeline
def get_pose_lift_results(args, visualizer, pose_lifter, pose_est_results_list,
frame, frame_idx, pose_est_results):
pose_lift_dataset = pose_lifter.cfg.test_dataloader.dataset
# extract and pad input pose2d sequence
pose_seq_2d = extract_pose_sequence(
pose_est_results_list,
frame_idx=frame_idx,
causal=pose_lift_dataset.get('causal', False),
seq_len=pose_lift_dataset.get('seq_len', 1),
step=pose_lift_dataset.get('seq_step', 1))
# 2D-to-3D pose lifting
width, height = frame.shape[:2]
pose_lift_results = inference_pose_lifter_model(
pose_lifter,
pose_seq_2d,
image_size=(width, height),
norm_pose_2d=args.norm_pose_2d)
# Pose processing
for idx, pose_lift_res in enumerate(pose_lift_results):
pose_lift_res.track_id = pose_est_results[idx].get('track_id', 1e4)
pred_instances = pose_lift_res.pred_instances
keypoints = pred_instances.keypoints
keypoint_scores = pred_instances.keypoint_scores
if keypoint_scores.ndim == 3:
keypoint_scores = np.squeeze(keypoint_scores, axis=1)
pose_lift_results[
idx].pred_instances.keypoint_scores = keypoint_scores
if keypoints.ndim == 4:
keypoints = np.squeeze(keypoints, axis=1)
keypoints = keypoints[..., [0, 2, 1]]
keypoints[..., 0] = -keypoints[..., 0]
keypoints[..., 2] = -keypoints[..., 2]
# rebase height (z-axis)
if args.rebase_keypoint_height:
keypoints[..., 2] -= np.min(
keypoints[..., 2], axis=-1, keepdims=True)
pose_lift_results[idx].pred_instances.keypoints = keypoints
pose_lift_results = sorted(
pose_lift_results, key=lambda x: x.get('track_id', 1e4))
pred_3d_data_samples = merge_data_samples(pose_lift_results)
det_data_sample = merge_data_samples(pose_est_results)
if args.num_instances < 0:
args.num_instances = len(pose_lift_results)
# Visualization
if visualizer is not None:
visualizer.add_datasample(
'result',
frame,
data_sample=pred_3d_data_samples,
det_data_sample=det_data_sample,
draw_gt=False,
show=args.show,
draw_bbox=True,
kpt_thr=args.kpt_thr,
num_instances=args.num_instances,
wait_time=args.show_interval)
return pred_3d_data_samples.get('pred_instances', None) | null |
159,143 | import mimetypes
import os
import time
from argparse import ArgumentParser
from functools import partial
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmengine.structures import InstanceData
from mmpose.apis import (_track_by_iou, _track_by_oks, collect_multi_frames,
convert_keypoint_definition, extract_pose_sequence,
inference_pose_lifter_model, inference_topdown,
init_model)
from mmpose.models.pose_estimators import PoseLifter
from mmpose.models.pose_estimators.topdown import TopdownPoseEstimator
from mmpose.registry import VISUALIZERS
from mmpose.structures import (PoseDataSample, merge_data_samples,
split_instances)
from mmpose.utils import adapt_mmdet_pipeline
def get_bbox(args, detector, frame):
det_result = inference_detector(detector, frame)
pred_instance = det_result.pred_instances.cpu().numpy()
bboxes = pred_instance.bboxes
bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id,
pred_instance.scores > args.bbox_thr)]
return bboxes | null |
159,144 | version_info = parse_version_info(__version__)
The provided code snippet includes necessary dependencies for implementing the `parse_version_info` function. Write a Python function `def parse_version_info(version_str)` to solve the following problem:
Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
Here is the function:
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
elif x.find('b') != -1:
patch_version = x.split('b')
version_info.append(int(patch_version[0]))
version_info.append(f'b{patch_version[1]}')
return tuple(version_info) | Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). |
159,145 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bbox_xyxy2xywh` function. Write a Python function `def bbox_xyxy2xywh(bbox_xyxy: np.ndarray) -> np.ndarray` to solve the following problem:
Transform the bbox format from x1y1x2y2 to xywh. Args: bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, right, bottom, [score]) Returns: np.ndarray: Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, width, height, [score])
Here is the function:
def bbox_xyxy2xywh(bbox_xyxy: np.ndarray) -> np.ndarray:
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0]
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1]
return bbox_xywh | Transform the bbox format from x1y1x2y2 to xywh. Args: bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, right, bottom, [score]) Returns: np.ndarray: Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, width, height, [score]) |
159,146 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bbox_xywh2xyxy` function. Write a Python function `def bbox_xywh2xyxy(bbox_xywh: np.ndarray) -> np.ndarray` to solve the following problem:
Transform the bbox format from xywh to x1y1x2y2. Args: bbox_xywh (ndarray): Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, width, height, [score]) Returns: np.ndarray: Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, right, bottom, [score])
Here is the function:
def bbox_xywh2xyxy(bbox_xywh: np.ndarray) -> np.ndarray:
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (ndarray): Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
"""
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0]
bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1]
return bbox_xyxy | Transform the bbox format from xywh to x1y1x2y2. Args: bbox_xywh (ndarray): Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, width, height, [score]) Returns: np.ndarray: Bounding boxes (with scores), shaped (n, 4) or (n, 5). (left, top, right, bottom, [score]) |
159,147 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bbox_xyxy2cs` function. Write a Python function `def bbox_xyxy2cs(bbox: np.ndarray, padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Transform the bbox format from (x,y,w,h) into (center, scale) Args: bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted as (left, top, right, bottom) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: tuple: A tuple containing center and scale. - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or (n, 2) - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or (n, 2)
Here is the function:
def bbox_xyxy2cs(bbox: np.ndarray,
padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]:
"""Transform the bbox format from (x,y,w,h) into (center, scale)
Args:
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
as (left, top, right, bottom)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
(n, 2)
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
(n, 2)
"""
# convert single bbox from (4, ) to (1, 4)
dim = bbox.ndim
if dim == 1:
bbox = bbox[None, :]
x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
center = np.hstack([x1 + x2, y1 + y2]) * 0.5
scale = np.hstack([x2 - x1, y2 - y1]) * padding
if dim == 1:
center = center[0]
scale = scale[0]
return center, scale | Transform the bbox format from (x,y,w,h) into (center, scale) Args: bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted as (left, top, right, bottom) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: tuple: A tuple containing center and scale. - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or (n, 2) - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or (n, 2) |
159,148 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bbox_xywh2cs` function. Write a Python function `def bbox_xywh2cs(bbox: np.ndarray, padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Transform the bbox format from (x,y,w,h) into (center, scale) Args: bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted as (x, y, h, w) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: tuple: A tuple containing center and scale. - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or (n, 2) - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or (n, 2)
Here is the function:
def bbox_xywh2cs(bbox: np.ndarray,
padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]:
"""Transform the bbox format from (x,y,w,h) into (center, scale)
Args:
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
as (x, y, h, w)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
(n, 2)
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
(n, 2)
"""
# convert single bbox from (4, ) to (1, 4)
dim = bbox.ndim
if dim == 1:
bbox = bbox[None, :]
x, y, w, h = np.hsplit(bbox, [1, 2, 3])
center = np.hstack([x + w * 0.5, y + h * 0.5])
scale = np.hstack([w, h]) * padding
if dim == 1:
center = center[0]
scale = scale[0]
return center, scale | Transform the bbox format from (x,y,w,h) into (center, scale) Args: bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted as (x, y, h, w) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: tuple: A tuple containing center and scale. - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or (n, 2) - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or (n, 2) |
159,149 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bbox_cs2xyxy` function. Write a Python function `def bbox_cs2xyxy(center: np.ndarray, scale: np.ndarray, padding: float = 1.) -> np.ndarray` to solve the following problem:
Transform the bbox format from (center, scale) to (x1,y1,x2,y2). Args: center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: ndarray[float32]: BBox (x1, y1, x2, y2) in shape (4, ) or (n, 4)
Here is the function:
def bbox_cs2xyxy(center: np.ndarray,
scale: np.ndarray,
padding: float = 1.) -> np.ndarray:
"""Transform the bbox format from (center, scale) to (x1,y1,x2,y2).
Args:
center (ndarray): BBox center (x, y) in shape (2,) or (n, 2)
scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
ndarray[float32]: BBox (x1, y1, x2, y2) in shape (4, ) or (n, 4)
"""
dim = center.ndim
assert scale.ndim == dim
if dim == 1:
center = center[None, :]
scale = scale[None, :]
wh = scale / padding
xy = center - 0.5 * wh
bbox = np.hstack((xy, xy + wh))
if dim == 1:
bbox = bbox[0]
return bbox | Transform the bbox format from (center, scale) to (x1,y1,x2,y2). Args: center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: ndarray[float32]: BBox (x1, y1, x2, y2) in shape (4, ) or (n, 4) |
159,150 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bbox_cs2xywh` function. Write a Python function `def bbox_cs2xywh(center: np.ndarray, scale: np.ndarray, padding: float = 1.) -> np.ndarray` to solve the following problem:
Transform the bbox format from (center, scale) to (x,y,w,h). Args: center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: ndarray[float32]: BBox (x, y, w, h) in shape (4, ) or (n, 4)
Here is the function:
def bbox_cs2xywh(center: np.ndarray,
scale: np.ndarray,
padding: float = 1.) -> np.ndarray:
"""Transform the bbox format from (center, scale) to (x,y,w,h).
Args:
center (ndarray): BBox center (x, y) in shape (2,) or (n, 2)
scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
ndarray[float32]: BBox (x, y, w, h) in shape (4, ) or (n, 4)
"""
dim = center.ndim
assert scale.ndim == dim
if dim == 1:
center = center[None, :]
scale = scale[None, :]
wh = scale / padding
xy = center - 0.5 * wh
bbox = np.hstack((xy, wh))
if dim == 1:
bbox = bbox[0]
return bbox | Transform the bbox format from (center, scale) to (x,y,w,h). Args: center (ndarray): BBox center (x, y) in shape (2,) or (n, 2) scale (ndarray): BBox scale (w, h) in shape (2,) or (n, 2) padding (float): BBox padding factor that will be multilied to scale. Default: 1.0 Returns: ndarray[float32]: BBox (x, y, w, h) in shape (4, ) or (n, 4) |
159,151 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `flip_bbox` function. Write a Python function `def flip_bbox(bbox: np.ndarray, image_size: Tuple[int, int], bbox_format: str = 'xywh', direction: str = 'horizontal') -> np.ndarray` to solve the following problem:
Flip the bbox in the given direction. Args: bbox (np.ndarray): The bounding boxes. The shape should be (..., 4) if ``bbox_format`` is ``'xyxy'`` or ``'xywh'``, and (..., 2) if ``bbox_format`` is ``'center'`` image_size (tuple): The image shape in [w, h] bbox_format (str): The bbox format. Options are ``'xywh'``, ``'xyxy'`` and ``'center'``. direction (str): The flip direction. Options are ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` Returns: np.ndarray: The flipped bounding boxes.
Here is the function:
def flip_bbox(bbox: np.ndarray,
image_size: Tuple[int, int],
bbox_format: str = 'xywh',
direction: str = 'horizontal') -> np.ndarray:
"""Flip the bbox in the given direction.
Args:
bbox (np.ndarray): The bounding boxes. The shape should be (..., 4)
if ``bbox_format`` is ``'xyxy'`` or ``'xywh'``, and (..., 2) if
``bbox_format`` is ``'center'``
image_size (tuple): The image shape in [w, h]
bbox_format (str): The bbox format. Options are ``'xywh'``, ``'xyxy'``
and ``'center'``.
direction (str): The flip direction. Options are ``'horizontal'``,
``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'``
Returns:
np.ndarray: The flipped bounding boxes.
"""
direction_options = {'horizontal', 'vertical', 'diagonal'}
assert direction in direction_options, (
f'Invalid flipping direction "{direction}". '
f'Options are {direction_options}')
format_options = {'xywh', 'xyxy', 'center'}
assert bbox_format in format_options, (
f'Invalid bbox format "{bbox_format}". '
f'Options are {format_options}')
bbox_flipped = bbox.copy()
w, h = image_size
# TODO: consider using "integer corner" coordinate system
if direction == 'horizontal':
if bbox_format == 'xywh' or bbox_format == 'center':
bbox_flipped[..., 0] = w - bbox[..., 0] - 1
elif bbox_format == 'xyxy':
bbox_flipped[..., ::2] = w - bbox[..., ::2] - 1
elif direction == 'vertical':
if bbox_format == 'xywh' or bbox_format == 'center':
bbox_flipped[..., 1] = h - bbox[..., 1] - 1
elif bbox_format == 'xyxy':
bbox_flipped[..., 1::2] = h - bbox[..., 1::2] - 1
elif direction == 'diagonal':
if bbox_format == 'xywh' or bbox_format == 'center':
bbox_flipped[..., :2] = [w, h] - bbox[..., :2] - 1
elif bbox_format == 'xyxy':
bbox_flipped[...] = [w, h, w, h] - bbox - 1
return bbox_flipped | Flip the bbox in the given direction. Args: bbox (np.ndarray): The bounding boxes. The shape should be (..., 4) if ``bbox_format`` is ``'xyxy'`` or ``'xywh'``, and (..., 2) if ``bbox_format`` is ``'center'`` image_size (tuple): The image shape in [w, h] bbox_format (str): The bbox format. Options are ``'xywh'``, ``'xyxy'`` and ``'center'``. direction (str): The flip direction. Options are ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` Returns: np.ndarray: The flipped bounding boxes. |
159,152 | import math
from typing import Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `get_udp_warp_matrix` function. Write a Python function `def get_udp_warp_matrix( center: np.ndarray, scale: np.ndarray, rot: float, output_size: Tuple[int, int], ) -> np.ndarray` to solve the following problem:
Calculate the affine transformation matrix under the unbiased constraint. See `UDP (CVPR 2020)`_ for details. Note: - The bbox number: N Args: center (np.ndarray[2, ]): Center of the bounding box (x, y). scale (np.ndarray[2, ]): Scale of the bounding box wrt [width, height]. rot (float): Rotation angle (degree). output_size (tuple): Size ([w, h]) of the output image Returns: np.ndarray: A 2x3 transformation matrix .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524
Here is the function:
def get_udp_warp_matrix(
center: np.ndarray,
scale: np.ndarray,
rot: float,
output_size: Tuple[int, int],
) -> np.ndarray:
"""Calculate the affine transformation matrix under the unbiased
constraint. See `UDP (CVPR 2020)`_ for details.
Note:
- The bbox number: N
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (tuple): Size ([w, h]) of the output image
Returns:
np.ndarray: A 2x3 transformation matrix
.. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524
"""
assert len(center) == 2
assert len(scale) == 2
assert len(output_size) == 2
input_size = center * 2
rot_rad = np.deg2rad(rot)
warp_mat = np.zeros((2, 3), dtype=np.float32)
scale_x = (output_size[0] - 1) / scale[0]
scale_y = (output_size[1] - 1) / scale[1]
warp_mat[0, 0] = math.cos(rot_rad) * scale_x
warp_mat[0, 1] = -math.sin(rot_rad) * scale_x
warp_mat[0, 2] = scale_x * (-0.5 * input_size[0] * math.cos(rot_rad) +
0.5 * input_size[1] * math.sin(rot_rad) +
0.5 * scale[0])
warp_mat[1, 0] = math.sin(rot_rad) * scale_y
warp_mat[1, 1] = math.cos(rot_rad) * scale_y
warp_mat[1, 2] = scale_y * (-0.5 * input_size[0] * math.sin(rot_rad) -
0.5 * input_size[1] * math.cos(rot_rad) +
0.5 * scale[1])
return warp_mat | Calculate the affine transformation matrix under the unbiased constraint. See `UDP (CVPR 2020)`_ for details. Note: - The bbox number: N Args: center (np.ndarray[2, ]): Center of the bounding box (x, y). scale (np.ndarray[2, ]): Scale of the bounding box wrt [width, height]. rot (float): Rotation angle (degree). output_size (tuple): Size ([w, h]) of the output image Returns: np.ndarray: A 2x3 transformation matrix .. _`UDP (CVPR 2020)`: https://arxiv.org/abs/1911.07524 |
159,153 | from typing import List, Optional, Tuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `flip_keypoints` function. Write a Python function `def flip_keypoints(keypoints: np.ndarray, keypoints_visible: Optional[np.ndarray], image_size: Tuple[int, int], flip_indices: List[int], direction: str = 'horizontal' ) -> Tuple[np.ndarray, Optional[np.ndarray]]` to solve the following problem:
Flip keypoints in the given direction. Note: - keypoint number: K - keypoint dimension: D Args: keypoints (np.ndarray): Keypoints in shape (..., K, D) keypoints_visible (np.ndarray, optional): The visibility of keypoints in shape (..., K, 1). Set ``None`` if the keypoint visibility is unavailable image_size (tuple): The image shape in [w, h] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint direction (str): The flip direction. Options are ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` Returns: tuple: - keypoints_flipped (np.ndarray): Flipped keypoints in shape (..., K, D) - keypoints_visible_flipped (np.ndarray, optional): Flipped keypoints' visibility in shape (..., K, 1). Return ``None`` if the input ``keypoints_visible`` is ``None``
Here is the function:
def flip_keypoints(keypoints: np.ndarray,
keypoints_visible: Optional[np.ndarray],
image_size: Tuple[int, int],
flip_indices: List[int],
direction: str = 'horizontal'
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Flip keypoints in the given direction.
Note:
- keypoint number: K
- keypoint dimension: D
Args:
keypoints (np.ndarray): Keypoints in shape (..., K, D)
keypoints_visible (np.ndarray, optional): The visibility of keypoints
in shape (..., K, 1). Set ``None`` if the keypoint visibility is
unavailable
image_size (tuple): The image shape in [w, h]
flip_indices (List[int]): The indices of each keypoint's symmetric
keypoint
direction (str): The flip direction. Options are ``'horizontal'``,
``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'``
Returns:
tuple:
- keypoints_flipped (np.ndarray): Flipped keypoints in shape
(..., K, D)
- keypoints_visible_flipped (np.ndarray, optional): Flipped keypoints'
visibility in shape (..., K, 1). Return ``None`` if the input
``keypoints_visible`` is ``None``
"""
assert keypoints.shape[:-1] == keypoints_visible.shape, (
f'Mismatched shapes of keypoints {keypoints.shape} and '
f'keypoints_visible {keypoints_visible.shape}')
direction_options = {'horizontal', 'vertical', 'diagonal'}
assert direction in direction_options, (
f'Invalid flipping direction "{direction}". '
f'Options are {direction_options}')
# swap the symmetric keypoint pairs
if direction == 'horizontal' or direction == 'vertical':
keypoints = keypoints[..., flip_indices, :]
if keypoints_visible is not None:
keypoints_visible = keypoints_visible[..., flip_indices]
# flip the keypoints
w, h = image_size
if direction == 'horizontal':
keypoints[..., 0] = w - 1 - keypoints[..., 0]
elif direction == 'vertical':
keypoints[..., 1] = h - 1 - keypoints[..., 1]
else:
keypoints = [w, h] - keypoints - 1
return keypoints, keypoints_visible | Flip keypoints in the given direction. Note: - keypoint number: K - keypoint dimension: D Args: keypoints (np.ndarray): Keypoints in shape (..., K, D) keypoints_visible (np.ndarray, optional): The visibility of keypoints in shape (..., K, 1). Set ``None`` if the keypoint visibility is unavailable image_size (tuple): The image shape in [w, h] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint direction (str): The flip direction. Options are ``'horizontal'``, ``'vertical'`` and ``'diagonal'``. Defaults to ``'horizontal'`` Returns: tuple: - keypoints_flipped (np.ndarray): Flipped keypoints in shape (..., K, D) - keypoints_visible_flipped (np.ndarray, optional): Flipped keypoints' visibility in shape (..., K, 1). Return ``None`` if the input ``keypoints_visible`` is ``None`` |
159,154 | from typing import List, Optional, Tuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `flip_keypoints_custom_center` function. Write a Python function `def flip_keypoints_custom_center(keypoints: np.ndarray, keypoints_visible: np.ndarray, flip_indices: List[int], center_mode: str = 'static', center_x: float = 0.5, center_index: int = 0)` to solve the following problem:
Flip human joints horizontally. Note: - num_keypoint: K - dimension: D Args: keypoints (np.ndarray([..., K, D])): Coordinates of keypoints. keypoints_visible (np.ndarray([..., K])): Visibility item of keypoints. flip_indices (list[int]): The indices to flip the keypoints. center_mode (str): The mode to set the center location on the x-axis to flip around. Options are: - static: use a static x value (see center_x also) - root: use a root joint (see center_index also) Defaults: ``'static'``. center_x (float): Set the x-axis location of the flip center. Only used when ``center_mode`` is ``'static'``. Defaults: 0.5. center_index (int): Set the index of the root joint, whose x location will be used as the flip center. Only used when ``center_mode`` is ``'root'``. Defaults: 0. Returns: np.ndarray([..., K, C]): Flipped joints.
Here is the function:
def flip_keypoints_custom_center(keypoints: np.ndarray,
keypoints_visible: np.ndarray,
flip_indices: List[int],
center_mode: str = 'static',
center_x: float = 0.5,
center_index: int = 0):
"""Flip human joints horizontally.
Note:
- num_keypoint: K
- dimension: D
Args:
keypoints (np.ndarray([..., K, D])): Coordinates of keypoints.
keypoints_visible (np.ndarray([..., K])): Visibility item of keypoints.
flip_indices (list[int]): The indices to flip the keypoints.
center_mode (str): The mode to set the center location on the x-axis
to flip around. Options are:
- static: use a static x value (see center_x also)
- root: use a root joint (see center_index also)
Defaults: ``'static'``.
center_x (float): Set the x-axis location of the flip center. Only used
when ``center_mode`` is ``'static'``. Defaults: 0.5.
center_index (int): Set the index of the root joint, whose x location
will be used as the flip center. Only used when ``center_mode`` is
``'root'``. Defaults: 0.
Returns:
np.ndarray([..., K, C]): Flipped joints.
"""
assert keypoints.ndim >= 2, f'Invalid pose shape {keypoints.shape}'
allowed_center_mode = {'static', 'root'}
assert center_mode in allowed_center_mode, 'Get invalid center_mode ' \
f'{center_mode}, allowed choices are {allowed_center_mode}'
if center_mode == 'static':
x_c = center_x
elif center_mode == 'root':
assert keypoints.shape[-2] > center_index
x_c = keypoints[..., center_index, 0]
keypoints_flipped = keypoints.copy()
keypoints_visible_flipped = keypoints_visible.copy()
# Swap left-right parts
for left, right in enumerate(flip_indices):
keypoints_flipped[..., left, :] = keypoints[..., right, :]
keypoints_visible_flipped[..., left] = keypoints_visible[..., right]
# Flip horizontally
keypoints_flipped[..., 0] = x_c * 2 - keypoints_flipped[..., 0]
return keypoints_flipped, keypoints_visible_flipped | Flip human joints horizontally. Note: - num_keypoint: K - dimension: D Args: keypoints (np.ndarray([..., K, D])): Coordinates of keypoints. keypoints_visible (np.ndarray([..., K])): Visibility item of keypoints. flip_indices (list[int]): The indices to flip the keypoints. center_mode (str): The mode to set the center location on the x-axis to flip around. Options are: - static: use a static x value (see center_x also) - root: use a root joint (see center_index also) Defaults: ``'static'``. center_x (float): Set the x-axis location of the flip center. Only used when ``center_mode`` is ``'static'``. Defaults: 0.5. center_index (int): Set the index of the root joint, whose x location will be used as the flip center. Only used when ``center_mode`` is ``'root'``. Defaults: 0. Returns: np.ndarray([..., K, C]): Flipped joints. |
159,155 | import warnings
from typing import List
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmengine.utils import is_list_of
from .bbox.transforms import get_warp_matrix
from .pose_data_sample import PoseDataSample
def revert_heatmap(heatmap, bbox_center, bbox_scale, img_shape):
"""Revert predicted heatmap on the original image.
Args:
heatmap (np.ndarray or torch.tensor): predicted heatmap.
bbox_center (np.ndarray): bounding box center coordinate.
bbox_scale (np.ndarray): bounding box scale.
img_shape (tuple or list): size of original image.
"""
if torch.is_tensor(heatmap):
heatmap = heatmap.cpu().detach().numpy()
ndim = heatmap.ndim
# [K, H, W] -> [H, W, K]
if ndim == 3:
heatmap = heatmap.transpose(1, 2, 0)
hm_h, hm_w = heatmap.shape[:2]
img_h, img_w = img_shape
warp_mat = get_warp_matrix(
bbox_center.reshape((2, )),
bbox_scale.reshape((2, )),
rot=0,
output_size=(hm_w, hm_h),
inv=True)
heatmap = cv2.warpAffine(
heatmap, warp_mat, (img_w, img_h), flags=cv2.INTER_LINEAR)
# [H, W, K] -> [K, H, W]
if ndim == 3:
heatmap = heatmap.transpose(2, 0, 1)
return heatmap
class PoseDataSample(BaseDataElement):
"""The base data structure of MMPose that is used as the interface between
modules.
The attributes of ``PoseDataSample`` includes:
- ``gt_instances``(InstanceData): Ground truth of instances with
keypoint annotations
- ``pred_instances``(InstanceData): Instances with keypoint
predictions
- ``gt_fields``(PixelData): Ground truth of spatial distribution
annotations like keypoint heatmaps and part affine fields (PAF)
- ``pred_fields``(PixelData): Predictions of spatial distributions
Examples:
>>> import torch
>>> from mmengine.structures import InstanceData, PixelData
>>> from mmpose.structures import PoseDataSample
>>> pose_meta = dict(img_shape=(800, 1216),
... crop_size=(256, 192),
... heatmap_size=(64, 48))
>>> gt_instances = InstanceData()
>>> gt_instances.bboxes = torch.rand((1, 4))
>>> gt_instances.keypoints = torch.rand((1, 17, 2))
>>> gt_instances.keypoints_visible = torch.rand((1, 17, 1))
>>> gt_fields = PixelData()
>>> gt_fields.heatmaps = torch.rand((17, 64, 48))
>>> data_sample = PoseDataSample(gt_instances=gt_instances,
... gt_fields=gt_fields,
... metainfo=pose_meta)
>>> assert 'img_shape' in data_sample
>>> len(data_sample.gt_intances)
1
"""
def gt_instances(self) -> InstanceData:
return self._gt_instances
def gt_instances(self, value: InstanceData):
self.set_field(value, '_gt_instances', dtype=InstanceData)
def gt_instances(self):
del self._gt_instances
def gt_instance_labels(self) -> InstanceData:
return self._gt_instance_labels
def gt_instance_labels(self, value: InstanceData):
self.set_field(value, '_gt_instance_labels', dtype=InstanceData)
def gt_instance_labels(self):
del self._gt_instance_labels
def pred_instances(self) -> InstanceData:
return self._pred_instances
def pred_instances(self, value: InstanceData):
self.set_field(value, '_pred_instances', dtype=InstanceData)
def pred_instances(self):
del self._pred_instances
def gt_fields(self) -> Union[PixelData, MultilevelPixelData]:
return self._gt_fields
def gt_fields(self, value: Union[PixelData, MultilevelPixelData]):
self.set_field(value, '_gt_fields', dtype=type(value))
def gt_fields(self):
del self._gt_fields
def pred_fields(self) -> PixelData:
return self._pred_heatmaps
def pred_fields(self, value: PixelData):
self.set_field(value, '_pred_heatmaps', dtype=PixelData)
def pred_fields(self):
del self._pred_heatmaps
The provided code snippet includes necessary dependencies for implementing the `merge_data_samples` function. Write a Python function `def merge_data_samples(data_samples: List[PoseDataSample]) -> PoseDataSample` to solve the following problem:
Merge the given data samples into a single data sample. This function can be used to merge the top-down predictions with bboxes from the same image. The merged data sample will contain all instances from the input data samples, and the identical metainfo with the first input data sample. Args: data_samples (List[:obj:`PoseDataSample`]): The data samples to merge Returns: PoseDataSample: The merged data sample.
Here is the function:
def merge_data_samples(data_samples: List[PoseDataSample]) -> PoseDataSample:
"""Merge the given data samples into a single data sample.
This function can be used to merge the top-down predictions with
bboxes from the same image. The merged data sample will contain all
instances from the input data samples, and the identical metainfo with
the first input data sample.
Args:
data_samples (List[:obj:`PoseDataSample`]): The data samples to
merge
Returns:
PoseDataSample: The merged data sample.
"""
if not is_list_of(data_samples, PoseDataSample):
raise ValueError('Invalid input type, should be a list of '
':obj:`PoseDataSample`')
if len(data_samples) == 0:
warnings.warn('Try to merge an empty list of data samples.')
return PoseDataSample()
merged = PoseDataSample(metainfo=data_samples[0].metainfo)
if 'gt_instances' in data_samples[0]:
merged.gt_instances = InstanceData.cat(
[d.gt_instances for d in data_samples])
if 'pred_instances' in data_samples[0]:
merged.pred_instances = InstanceData.cat(
[d.pred_instances for d in data_samples])
if 'pred_fields' in data_samples[0] and 'heatmaps' in data_samples[
0].pred_fields:
reverted_heatmaps = [
revert_heatmap(data_sample.pred_fields.heatmaps,
data_sample.gt_instances.bbox_centers,
data_sample.gt_instances.bbox_scales,
data_sample.ori_shape)
for data_sample in data_samples
]
merged_heatmaps = np.max(reverted_heatmaps, axis=0)
pred_fields = PixelData()
pred_fields.set_data(dict(heatmaps=merged_heatmaps))
merged.pred_fields = pred_fields
if 'gt_fields' in data_samples[0] and 'heatmaps' in data_samples[
0].gt_fields:
reverted_heatmaps = [
revert_heatmap(data_sample.gt_fields.heatmaps,
data_sample.gt_instances.bbox_centers,
data_sample.gt_instances.bbox_scales,
data_sample.ori_shape)
for data_sample in data_samples
]
merged_heatmaps = np.max(reverted_heatmaps, axis=0)
gt_fields = PixelData()
gt_fields.set_data(dict(heatmaps=merged_heatmaps))
merged.gt_fields = gt_fields
return merged | Merge the given data samples into a single data sample. This function can be used to merge the top-down predictions with bboxes from the same image. The merged data sample will contain all instances from the input data samples, and the identical metainfo with the first input data sample. Args: data_samples (List[:obj:`PoseDataSample`]): The data samples to merge Returns: PoseDataSample: The merged data sample. |
159,156 | import warnings
from typing import List
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmengine.utils import is_list_of
from .bbox.transforms import get_warp_matrix
from .pose_data_sample import PoseDataSample
The provided code snippet includes necessary dependencies for implementing the `split_instances` function. Write a Python function `def split_instances(instances: InstanceData) -> List[InstanceData]` to solve the following problem:
Convert instances into a list where each element is a dict that contains information about one instance.
Here is the function:
def split_instances(instances: InstanceData) -> List[InstanceData]:
"""Convert instances into a list where each element is a dict that contains
information about one instance."""
results = []
# return an empty list if there is no instance detected by the model
if instances is None:
return results
for i in range(len(instances.keypoints)):
result = dict(
keypoints=instances.keypoints[i].tolist(),
keypoint_scores=instances.keypoint_scores[i].tolist(),
)
if 'bboxes' in instances:
result['bbox'] = instances.bboxes[i].tolist(),
if 'bbox_scores' in instances:
result['bbox_score'] = instances.bbox_scores[i]
results.append(result)
return results | Convert instances into a list where each element is a dict that contains information about one instance. |
159,157 | import numpy as np
import torch
from mmengine.dataset import Compose, pseudo_collate
from mmengine.registry import init_default_scope
from mmengine.structures import InstanceData
from mmpose.structures import PoseDataSample
The provided code snippet includes necessary dependencies for implementing the `convert_keypoint_definition` function. Write a Python function `def convert_keypoint_definition(keypoints, pose_det_dataset, pose_lift_dataset)` to solve the following problem:
Convert pose det dataset keypoints definition to pose lifter dataset keypoints definition, so that they are compatible with the definitions required for 3D pose lifting. Args: keypoints (ndarray[N, K, 2 or 3]): 2D keypoints to be transformed. pose_det_dataset, (str): Name of the dataset for 2D pose detector. pose_lift_dataset (str): Name of the dataset for pose lifter model. Returns: ndarray[K, 2 or 3]: the transformed 2D keypoints.
Here is the function:
def convert_keypoint_definition(keypoints, pose_det_dataset,
pose_lift_dataset):
"""Convert pose det dataset keypoints definition to pose lifter dataset
keypoints definition, so that they are compatible with the definitions
required for 3D pose lifting.
Args:
keypoints (ndarray[N, K, 2 or 3]): 2D keypoints to be transformed.
pose_det_dataset, (str): Name of the dataset for 2D pose detector.
pose_lift_dataset (str): Name of the dataset for pose lifter model.
Returns:
ndarray[K, 2 or 3]: the transformed 2D keypoints.
"""
assert pose_lift_dataset in [
'Human36mDataset'], '`pose_lift_dataset` should be ' \
f'`Human36mDataset`, but got {pose_lift_dataset}.'
coco_style_datasets = [
'CocoDataset', 'PoseTrack18VideoDataset', 'PoseTrack18Dataset'
]
keypoints_new = np.zeros((keypoints.shape[0], 17, keypoints.shape[2]),
dtype=keypoints.dtype)
if pose_lift_dataset == 'Human36mDataset':
if pose_det_dataset in ['Human36mDataset']:
keypoints_new = keypoints
elif pose_det_dataset in coco_style_datasets:
# pelvis (root) is in the middle of l_hip and r_hip
keypoints_new[:, 0] = (keypoints[:, 11] + keypoints[:, 12]) / 2
# thorax is in the middle of l_shoulder and r_shoulder
keypoints_new[:, 8] = (keypoints[:, 5] + keypoints[:, 6]) / 2
# spine is in the middle of thorax and pelvis
keypoints_new[:,
7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2
# in COCO, head is in the middle of l_eye and r_eye
# in PoseTrack18, head is in the middle of head_bottom and head_top
keypoints_new[:, 10] = (keypoints[:, 1] + keypoints[:, 2]) / 2
# rearrange other keypoints
keypoints_new[:, [1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16]] = \
keypoints[:, [12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]]
elif pose_det_dataset in ['AicDataset']:
# pelvis (root) is in the middle of l_hip and r_hip
keypoints_new[:, 0] = (keypoints[:, 9] + keypoints[:, 6]) / 2
# thorax is in the middle of l_shoulder and r_shoulder
keypoints_new[:, 8] = (keypoints[:, 3] + keypoints[:, 0]) / 2
# spine is in the middle of thorax and pelvis
keypoints_new[:,
7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2
# neck base (top end of neck) is 1/4 the way from
# neck (bottom end of neck) to head top
keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4
# head (spherical centre of head) is 7/12 the way from
# neck (bottom end of neck) to head top
keypoints_new[:, 10] = (5 * keypoints[:, 13] +
7 * keypoints[:, 12]) / 12
keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \
keypoints[:, [6, 7, 8, 9, 10, 11, 3, 4, 5, 0, 1, 2]]
elif pose_det_dataset in ['CrowdPoseDataset']:
# pelvis (root) is in the middle of l_hip and r_hip
keypoints_new[:, 0] = (keypoints[:, 6] + keypoints[:, 7]) / 2
# thorax is in the middle of l_shoulder and r_shoulder
keypoints_new[:, 8] = (keypoints[:, 0] + keypoints[:, 1]) / 2
# spine is in the middle of thorax and pelvis
keypoints_new[:,
7] = (keypoints_new[:, 0] + keypoints_new[:, 8]) / 2
# neck base (top end of neck) is 1/4 the way from
# neck (bottom end of neck) to head top
keypoints_new[:, 9] = (3 * keypoints[:, 13] + keypoints[:, 12]) / 4
# head (spherical centre of head) is 7/12 the way from
# neck (bottom end of neck) to head top
keypoints_new[:, 10] = (5 * keypoints[:, 13] +
7 * keypoints[:, 12]) / 12
keypoints_new[:, [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16]] = \
keypoints[:, [7, 9, 11, 6, 8, 10, 0, 2, 4, 1, 3, 5]]
else:
raise NotImplementedError(
f'unsupported conversion between {pose_lift_dataset} and '
f'{pose_det_dataset}')
return keypoints_new | Convert pose det dataset keypoints definition to pose lifter dataset keypoints definition, so that they are compatible with the definitions required for 3D pose lifting. Args: keypoints (ndarray[N, K, 2 or 3]): 2D keypoints to be transformed. pose_det_dataset, (str): Name of the dataset for 2D pose detector. pose_lift_dataset (str): Name of the dataset for pose lifter model. Returns: ndarray[K, 2 or 3]: the transformed 2D keypoints. |
159,158 | import numpy as np
import torch
from mmengine.dataset import Compose, pseudo_collate
from mmengine.registry import init_default_scope
from mmengine.structures import InstanceData
from mmpose.structures import PoseDataSample
The provided code snippet includes necessary dependencies for implementing the `extract_pose_sequence` function. Write a Python function `def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1)` to solve the following problem:
Extract the target frame from 2D pose results, and pad the sequence to a fixed length. Args: pose_results (List[List[:obj:`PoseDataSample`]]): Multi-frame pose detection results stored in a list. frame_idx (int): The index of the frame in the original video. causal (bool): If True, the target frame is the last frame in a sequence. Otherwise, the target frame is in the middle of a sequence. seq_len (int): The number of frames in the input sequence. step (int): Step size to extract frames from the video. Returns: List[List[:obj:`PoseDataSample`]]: Multi-frame pose detection results stored in a nested list with a length of seq_len.
Here is the function:
def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1):
"""Extract the target frame from 2D pose results, and pad the sequence to a
fixed length.
Args:
pose_results (List[List[:obj:`PoseDataSample`]]): Multi-frame pose
detection results stored in a list.
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the last frame in
a sequence. Otherwise, the target frame is in the middle of
a sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[:obj:`PoseDataSample`]]: Multi-frame pose detection results
stored in a nested list with a length of seq_len.
"""
if causal:
frames_left = seq_len - 1
frames_right = 0
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(pose_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
pose_results_seq = [pose_results[0]] * pad_left + \
pose_results[start:end:step] + [pose_results[-1]] * pad_right
return pose_results_seq | Extract the target frame from 2D pose results, and pad the sequence to a fixed length. Args: pose_results (List[List[:obj:`PoseDataSample`]]): Multi-frame pose detection results stored in a list. frame_idx (int): The index of the frame in the original video. causal (bool): If True, the target frame is the last frame in a sequence. Otherwise, the target frame is in the middle of a sequence. seq_len (int): The number of frames in the input sequence. step (int): Step size to extract frames from the video. Returns: List[List[:obj:`PoseDataSample`]]: Multi-frame pose detection results stored in a nested list with a length of seq_len. |
159,159 | import numpy as np
import torch
from mmengine.dataset import Compose, pseudo_collate
from mmengine.registry import init_default_scope
from mmengine.structures import InstanceData
from mmpose.structures import PoseDataSample
def collate_pose_sequence(pose_results_2d,
with_track_id=True,
target_frame=-1):
"""Reorganize multi-frame pose detection results into individual pose
sequences.
Note:
- The temporal length of the pose detection results: T
- The number of the person instances: N
- The number of the keypoints: K
- The channel number of each keypoint: C
Args:
pose_results_2d (List[List[:obj:`PoseDataSample`]]): Multi-frame pose
detection results stored in a nested list. Each element of the
outer list is the pose detection results of a single frame, and
each element of the inner list is the pose information of one
person, which contains:
- keypoints (ndarray[K, 2 or 3]): x, y, [score]
- track_id (int): unique id of each person, required when
``with_track_id==True```
with_track_id (bool): If True, the element in pose_results is expected
to contain "track_id", which will be used to gather the pose
sequence of a person from multiple frames. Otherwise, the pose
results in each frame are expected to have a consistent number and
order of identities. Default is True.
target_frame (int): The index of the target frame. Default: -1.
Returns:
List[:obj:`PoseDataSample`]: Indivisual pose sequence in with length N.
"""
T = len(pose_results_2d)
assert T > 0
target_frame = (T + target_frame) % T # convert negative index to positive
N = len(
pose_results_2d[target_frame]) # use identities in the target frame
if N == 0:
return []
B, K, C = pose_results_2d[target_frame][0].pred_instances.keypoints.shape
track_ids = None
if with_track_id:
track_ids = [res.track_id for res in pose_results_2d[target_frame]]
pose_sequences = []
for idx in range(N):
pose_seq = PoseDataSample()
gt_instances = InstanceData()
pred_instances = InstanceData()
for k in pose_results_2d[target_frame][idx].gt_instances.keys():
gt_instances.set_field(
pose_results_2d[target_frame][idx].gt_instances[k], k)
for k in pose_results_2d[target_frame][idx].pred_instances.keys():
if k != 'keypoints':
pred_instances.set_field(
pose_results_2d[target_frame][idx].pred_instances[k], k)
pose_seq.pred_instances = pred_instances
pose_seq.gt_instances = gt_instances
if not with_track_id:
pose_seq.pred_instances.keypoints = np.stack([
frame[idx].pred_instances.keypoints
for frame in pose_results_2d
],
axis=1)
else:
keypoints = np.zeros((B, T, K, C), dtype=np.float32)
keypoints[:, target_frame] = pose_results_2d[target_frame][
idx].pred_instances.keypoints
# find the left most frame containing track_ids[idx]
for frame_idx in range(target_frame - 1, -1, -1):
contains_idx = False
for res in pose_results_2d[frame_idx]:
if res.track_id == track_ids[idx]:
keypoints[:, frame_idx] = res.pred_instances.keypoints
contains_idx = True
break
if not contains_idx:
# replicate the left most frame
keypoints[:, :frame_idx + 1] = keypoints[:, frame_idx + 1]
break
# find the right most frame containing track_idx[idx]
for frame_idx in range(target_frame + 1, T):
contains_idx = False
for res in pose_results_2d[frame_idx]:
if res.track_id == track_ids[idx]:
keypoints[:, frame_idx] = res.pred_instances.keypoints
contains_idx = True
break
if not contains_idx:
# replicate the right most frame
keypoints[:, frame_idx + 1:] = keypoints[:, frame_idx]
break
pose_seq.pred_instances.keypoints = keypoints
pose_sequences.append(pose_seq)
return pose_sequences
The provided code snippet includes necessary dependencies for implementing the `inference_pose_lifter_model` function. Write a Python function `def inference_pose_lifter_model(model, pose_results_2d, with_track_id=True, image_size=None, norm_pose_2d=False)` to solve the following problem:
Inference 3D pose from 2D pose sequences using a pose lifter model. Args: model (nn.Module): The loaded pose lifter model pose_results_2d (List[List[:obj:`PoseDataSample`]]): The 2D pose sequences stored in a nested list. with_track_id: If True, the element in pose_results_2d is expected to contain "track_id", which will be used to gather the pose sequence of a person from multiple frames. Otherwise, the pose results in each frame are expected to have a consistent number and order of identities. Default is True. image_size (tuple|list): image width, image height. If None, image size will not be contained in dict ``data``. norm_pose_2d (bool): If True, scale the bbox (along with the 2D pose) to the average bbox scale of the dataset, and move the bbox (along with the 2D pose) to the average bbox center of the dataset. Returns: List[:obj:`PoseDataSample`]: 3D pose inference results. Specifically, the predicted keypoints and scores are saved at ``data_sample.pred_instances.keypoints_3d``.
Here is the function:
def inference_pose_lifter_model(model,
pose_results_2d,
with_track_id=True,
image_size=None,
norm_pose_2d=False):
"""Inference 3D pose from 2D pose sequences using a pose lifter model.
Args:
model (nn.Module): The loaded pose lifter model
pose_results_2d (List[List[:obj:`PoseDataSample`]]): The 2D pose
sequences stored in a nested list.
with_track_id: If True, the element in pose_results_2d is expected to
contain "track_id", which will be used to gather the pose sequence
of a person from multiple frames. Otherwise, the pose results in
each frame are expected to have a consistent number and order of
identities. Default is True.
image_size (tuple|list): image width, image height. If None, image size
will not be contained in dict ``data``.
norm_pose_2d (bool): If True, scale the bbox (along with the 2D
pose) to the average bbox scale of the dataset, and move the bbox
(along with the 2D pose) to the average bbox center of the dataset.
Returns:
List[:obj:`PoseDataSample`]: 3D pose inference results. Specifically,
the predicted keypoints and scores are saved at
``data_sample.pred_instances.keypoints_3d``.
"""
init_default_scope(model.cfg.get('default_scope', 'mmpose'))
pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
causal = model.cfg.test_dataloader.dataset.get('causal', False)
target_idx = -1 if causal else len(pose_results_2d) // 2
dataset_info = model.dataset_meta
if dataset_info is not None:
if 'stats_info' in dataset_info:
bbox_center = dataset_info['stats_info']['bbox_center']
bbox_scale = dataset_info['stats_info']['bbox_scale']
else:
bbox_center = None
bbox_scale = None
for i, pose_res in enumerate(pose_results_2d):
for j, data_sample in enumerate(pose_res):
kpts = data_sample.pred_instances.keypoints
bboxes = data_sample.pred_instances.bboxes
keypoints = []
for k in range(len(kpts)):
kpt = kpts[k]
if norm_pose_2d:
bbox = bboxes[k]
center = np.array([[(bbox[0] + bbox[2]) / 2,
(bbox[1] + bbox[3]) / 2]])
scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
keypoints.append((kpt[:, :2] - center) / scale *
bbox_scale + bbox_center)
else:
keypoints.append(kpt[:, :2])
pose_results_2d[i][j].pred_instances.keypoints = np.array(
keypoints)
pose_sequences_2d = collate_pose_sequence(pose_results_2d, with_track_id,
target_idx)
if not pose_sequences_2d:
return []
data_list = []
for i, pose_seq in enumerate(pose_sequences_2d):
data_info = dict()
keypoints_2d = pose_seq.pred_instances.keypoints
keypoints_2d = np.squeeze(
keypoints_2d, axis=0) if keypoints_2d.ndim == 4 else keypoints_2d
T, K, C = keypoints_2d.shape
data_info['keypoints'] = keypoints_2d
data_info['keypoints_visible'] = np.ones((
T,
K,
), dtype=np.float32)
data_info['lifting_target'] = np.zeros((K, 3), dtype=np.float32)
data_info['lifting_target_visible'] = np.ones((K, 1), dtype=np.float32)
if image_size is not None:
assert len(image_size) == 2
data_info['camera_param'] = dict(w=image_size[0], h=image_size[1])
data_info.update(model.dataset_meta)
data_list.append(pipeline(data_info))
if data_list:
# collate data list into a batch, which is a dict with following keys:
# batch['inputs']: a list of input images
# batch['data_samples']: a list of :obj:`PoseDataSample`
batch = pseudo_collate(data_list)
with torch.no_grad():
results = model.test_step(batch)
else:
results = []
return results | Inference 3D pose from 2D pose sequences using a pose lifter model. Args: model (nn.Module): The loaded pose lifter model pose_results_2d (List[List[:obj:`PoseDataSample`]]): The 2D pose sequences stored in a nested list. with_track_id: If True, the element in pose_results_2d is expected to contain "track_id", which will be used to gather the pose sequence of a person from multiple frames. Otherwise, the pose results in each frame are expected to have a consistent number and order of identities. Default is True. image_size (tuple|list): image width, image height. If None, image size will not be contained in dict ``data``. norm_pose_2d (bool): If True, scale the bbox (along with the 2D pose) to the average bbox scale of the dataset, and move the bbox (along with the 2D pose) to the average bbox center of the dataset. Returns: List[:obj:`PoseDataSample`]: 3D pose inference results. Specifically, the predicted keypoints and scores are saved at ``data_sample.pred_instances.keypoints_3d``. |
159,160 | import warnings
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.dataset import Compose, pseudo_collate
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from PIL import Image
from mmpose.datasets.datasets.utils import parse_pose_metainfo
from mmpose.models.builder import build_pose_estimator
from mmpose.structures import PoseDataSample
from mmpose.structures.bbox import bbox_xywh2xyxy
def dataset_meta_from_config(config: Config,
dataset_mode: str = 'train') -> Optional[dict]:
"""Get dataset metainfo from the model config.
Args:
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
:obj:`Path`, or the config object.
dataset_mode (str): Specify the dataset of which to get the metainfo.
Options are ``'train'``, ``'val'`` and ``'test'``. Defaults to
``'train'``
Returns:
dict, optional: The dataset metainfo. See
``mmpose.datasets.datasets.utils.parse_pose_metainfo`` for details.
Return ``None`` if failing to get dataset metainfo from the config.
"""
try:
if dataset_mode == 'train':
dataset_cfg = config.train_dataloader.dataset
elif dataset_mode == 'val':
dataset_cfg = config.val_dataloader.dataset
elif dataset_mode == 'test':
dataset_cfg = config.test_dataloader.dataset
else:
raise ValueError(
f'Invalid dataset {dataset_mode} to get metainfo. '
'Should be one of "train", "val", or "test".')
if 'metainfo' in dataset_cfg:
metainfo = dataset_cfg.metainfo
else:
import mmpose.datasets.datasets # noqa: F401, F403
from mmpose.registry import DATASETS
dataset_class = DATASETS.get(dataset_cfg.type)
metainfo = dataset_class.METAINFO
metainfo = parse_pose_metainfo(metainfo)
except AttributeError:
metainfo = None
return metainfo
The provided code snippet includes necessary dependencies for implementing the `init_model` function. Write a Python function `def init_model(config: Union[str, Path, Config], checkpoint: Optional[str] = None, device: str = 'cuda:0', cfg_options: Optional[dict] = None) -> nn.Module` to solve the following problem:
Initialize a pose estimator from a config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Defaults to ``None`` device (str): The device where the anchors will be put on. Defaults to ``'cuda:0'``. cfg_options (dict, optional): Options to override some settings in the used config. Defaults to ``None`` Returns: nn.Module: The constructed pose estimator.
Here is the function:
def init_model(config: Union[str, Path, Config],
checkpoint: Optional[str] = None,
device: str = 'cuda:0',
cfg_options: Optional[dict] = None) -> nn.Module:
"""Initialize a pose estimator from a config file.
Args:
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights. Defaults to ``None``
device (str): The device where the anchors will be put on.
Defaults to ``'cuda:0'``.
cfg_options (dict, optional): Options to override some settings in
the used config. Defaults to ``None``
Returns:
nn.Module: The constructed pose estimator.
"""
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
config.model.train_cfg = None
# register all modules in mmpose into the registries
scope = config.get('default_scope', 'mmpose')
if scope is not None:
init_default_scope(scope)
model = build_pose_estimator(config.model)
model = revert_sync_batchnorm(model)
# get dataset_meta in this priority: checkpoint > config > default (COCO)
dataset_meta = None
if checkpoint is not None:
ckpt = load_checkpoint(model, checkpoint, map_location='cpu')
if 'dataset_meta' in ckpt.get('meta', {}):
# checkpoint from mmpose 1.x
dataset_meta = ckpt['meta']['dataset_meta']
if dataset_meta is None:
dataset_meta = dataset_meta_from_config(config, dataset_mode='train')
if dataset_meta is None:
warnings.simplefilter('once')
warnings.warn('Can not load dataset_meta from the checkpoint or the '
'model config. Use COCO metainfo by default.')
dataset_meta = parse_pose_metainfo(
dict(from_file='configs/_base_/datasets/coco.py'))
model.dataset_meta = dataset_meta
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model | Initialize a pose estimator from a config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Defaults to ``None`` device (str): The device where the anchors will be put on. Defaults to ``'cuda:0'``. cfg_options (dict, optional): Options to override some settings in the used config. Defaults to ``None`` Returns: nn.Module: The constructed pose estimator. |
159,161 | import warnings
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.dataset import Compose, pseudo_collate
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from PIL import Image
from mmpose.datasets.datasets.utils import parse_pose_metainfo
from mmpose.models.builder import build_pose_estimator
from mmpose.structures import PoseDataSample
from mmpose.structures.bbox import bbox_xywh2xyxy
The provided code snippet includes necessary dependencies for implementing the `inference_topdown` function. Write a Python function `def inference_topdown(model: nn.Module, img: Union[np.ndarray, str], bboxes: Optional[Union[List, np.ndarray]] = None, bbox_format: str = 'xyxy') -> List[PoseDataSample]` to solve the following problem:
Inference image with a top-down pose estimator. Args: model (nn.Module): The top-down pose estimator img (np.ndarray | str): The loaded image or image file to inference bboxes (np.ndarray, optional): The bboxes in shape (N, 4), each row represents a bbox. If not given, the entire image will be regarded as a single bbox area. Defaults to ``None`` bbox_format (str): The bbox format indicator. Options are ``'xywh'`` and ``'xyxy'``. Defaults to ``'xyxy'`` Returns: List[:obj:`PoseDataSample`]: The inference results. Specifically, the predicted keypoints and scores are saved at ``data_sample.pred_instances.keypoints`` and ``data_sample.pred_instances.keypoint_scores``.
Here is the function:
def inference_topdown(model: nn.Module,
img: Union[np.ndarray, str],
bboxes: Optional[Union[List, np.ndarray]] = None,
bbox_format: str = 'xyxy') -> List[PoseDataSample]:
"""Inference image with a top-down pose estimator.
Args:
model (nn.Module): The top-down pose estimator
img (np.ndarray | str): The loaded image or image file to inference
bboxes (np.ndarray, optional): The bboxes in shape (N, 4), each row
represents a bbox. If not given, the entire image will be regarded
as a single bbox area. Defaults to ``None``
bbox_format (str): The bbox format indicator. Options are ``'xywh'``
and ``'xyxy'``. Defaults to ``'xyxy'``
Returns:
List[:obj:`PoseDataSample`]: The inference results. Specifically, the
predicted keypoints and scores are saved at
``data_sample.pred_instances.keypoints`` and
``data_sample.pred_instances.keypoint_scores``.
"""
scope = model.cfg.get('default_scope', 'mmpose')
if scope is not None:
init_default_scope(scope)
pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
if bboxes is None or len(bboxes) == 0:
# get bbox from the image size
if isinstance(img, str):
w, h = Image.open(img).size
else:
h, w = img.shape[:2]
bboxes = np.array([[0, 0, w, h]], dtype=np.float32)
else:
if isinstance(bboxes, list):
bboxes = np.array(bboxes)
assert bbox_format in {'xyxy', 'xywh'}, \
f'Invalid bbox_format "{bbox_format}".'
if bbox_format == 'xywh':
bboxes = bbox_xywh2xyxy(bboxes)
# construct batch data samples
data_list = []
for bbox in bboxes:
if isinstance(img, str):
data_info = dict(img_path=img)
else:
data_info = dict(img=img)
data_info['bbox'] = bbox[None] # shape (1, 4)
data_info['bbox_score'] = np.ones(1, dtype=np.float32) # shape (1,)
data_info.update(model.dataset_meta)
data_list.append(pipeline(data_info))
if data_list:
# collate data list into a batch, which is a dict with following keys:
# batch['inputs']: a list of input images
# batch['data_samples']: a list of :obj:`PoseDataSample`
batch = pseudo_collate(data_list)
with torch.no_grad():
results = model.test_step(batch)
else:
results = []
return results | Inference image with a top-down pose estimator. Args: model (nn.Module): The top-down pose estimator img (np.ndarray | str): The loaded image or image file to inference bboxes (np.ndarray, optional): The bboxes in shape (N, 4), each row represents a bbox. If not given, the entire image will be regarded as a single bbox area. Defaults to ``None`` bbox_format (str): The bbox format indicator. Options are ``'xywh'`` and ``'xyxy'``. Defaults to ``'xyxy'`` Returns: List[:obj:`PoseDataSample`]: The inference results. Specifically, the predicted keypoints and scores are saved at ``data_sample.pred_instances.keypoints`` and ``data_sample.pred_instances.keypoint_scores``. |
159,162 | import warnings
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.dataset import Compose, pseudo_collate
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from PIL import Image
from mmpose.datasets.datasets.utils import parse_pose_metainfo
from mmpose.models.builder import build_pose_estimator
from mmpose.structures import PoseDataSample
from mmpose.structures.bbox import bbox_xywh2xyxy
The provided code snippet includes necessary dependencies for implementing the `inference_bottomup` function. Write a Python function `def inference_bottomup(model: nn.Module, img: Union[np.ndarray, str])` to solve the following problem:
Inference image with a bottom-up pose estimator. Args: model (nn.Module): The bottom-up pose estimator img (np.ndarray | str): The loaded image or image file to inference Returns: List[:obj:`PoseDataSample`]: The inference results. Specifically, the predicted keypoints and scores are saved at ``data_sample.pred_instances.keypoints`` and ``data_sample.pred_instances.keypoint_scores``.
Here is the function:
def inference_bottomup(model: nn.Module, img: Union[np.ndarray, str]):
"""Inference image with a bottom-up pose estimator.
Args:
model (nn.Module): The bottom-up pose estimator
img (np.ndarray | str): The loaded image or image file to inference
Returns:
List[:obj:`PoseDataSample`]: The inference results. Specifically, the
predicted keypoints and scores are saved at
``data_sample.pred_instances.keypoints`` and
``data_sample.pred_instances.keypoint_scores``.
"""
pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# prepare data batch
if isinstance(img, str):
data_info = dict(img_path=img)
else:
data_info = dict(img=img)
data_info.update(model.dataset_meta)
data = pipeline(data_info)
batch = pseudo_collate([data])
with torch.no_grad():
results = model.test_step(batch)
return results | Inference image with a bottom-up pose estimator. Args: model (nn.Module): The bottom-up pose estimator img (np.ndarray | str): The loaded image or image file to inference Returns: List[:obj:`PoseDataSample`]: The inference results. Specifically, the predicted keypoints and scores are saved at ``data_sample.pred_instances.keypoints`` and ``data_sample.pred_instances.keypoint_scores``. |
159,163 | import warnings
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.dataset import Compose, pseudo_collate
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from PIL import Image
from mmpose.datasets.datasets.utils import parse_pose_metainfo
from mmpose.models.builder import build_pose_estimator
from mmpose.structures import PoseDataSample
from mmpose.structures.bbox import bbox_xywh2xyxy
The provided code snippet includes necessary dependencies for implementing the `collect_multi_frames` function. Write a Python function `def collect_multi_frames(video, frame_id, indices, online=False)` to solve the following problem:
Collect multi frames from the video. Args: video (mmcv.VideoReader): A VideoReader of the input video file. frame_id (int): index of the current frame indices (list(int)): index offsets of the frames to collect online (bool): inference mode, if set to True, can not use future frame information. Returns: list(ndarray): multi frames collected from the input video file.
Here is the function:
def collect_multi_frames(video, frame_id, indices, online=False):
"""Collect multi frames from the video.
Args:
video (mmcv.VideoReader): A VideoReader of the input video file.
frame_id (int): index of the current frame
indices (list(int)): index offsets of the frames to collect
online (bool): inference mode, if set to True, can not use future
frame information.
Returns:
list(ndarray): multi frames collected from the input video file.
"""
num_frames = len(video)
frames = []
# put the current frame at first
frames.append(video[frame_id])
# use multi frames for inference
for idx in indices:
# skip current frame
if idx == 0:
continue
support_idx = frame_id + idx
# online mode, can not use future frame information
if online:
support_idx = np.clip(support_idx, 0, frame_id)
else:
support_idx = np.clip(support_idx, 0, num_frames - 1)
frames.append(video[support_idx])
return frames | Collect multi frames from the video. Args: video (mmcv.VideoReader): A VideoReader of the input video file. frame_id (int): index of the current frame indices (list(int)): index offsets of the frames to collect online (bool): inference mode, if set to True, can not use future frame information. Returns: list(ndarray): multi frames collected from the input video file. |
159,164 | from typing import Dict
from mmengine.infer import BaseInferencer
The provided code snippet includes necessary dependencies for implementing the `get_model_aliases` function. Write a Python function `def get_model_aliases(scope: str = 'mmpose') -> Dict[str, str]` to solve the following problem:
Retrieve model aliases and their corresponding configuration names. Args: scope (str, optional): The scope for the model aliases. Defaults to 'mmpose'. Returns: Dict[str, str]: A dictionary containing model aliases as keys and their corresponding configuration names as values.
Here is the function:
def get_model_aliases(scope: str = 'mmpose') -> Dict[str, str]:
"""Retrieve model aliases and their corresponding configuration names.
Args:
scope (str, optional): The scope for the model aliases. Defaults
to 'mmpose'.
Returns:
Dict[str, str]: A dictionary containing model aliases as keys and
their corresponding configuration names as values.
"""
# Get a list of model configurations from the metafile
repo_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(scope)
model_cfgs = BaseInferencer._get_models_from_metafile(repo_or_mim_dir)
model_alias_dict = dict()
for model_cfg in model_cfgs:
if 'Alias' in model_cfg:
if isinstance(model_cfg['Alias'], str):
model_alias_dict[model_cfg['Alias']] = model_cfg['Name']
elif isinstance(model_cfg['Alias'], list):
for alias in model_cfg['Alias']:
model_alias_dict[alias] = model_cfg['Name']
else:
raise ValueError(
'encounter an unexpected alias type. Please raise an '
'issue at https://github.com/open-mmlab/mmpose/issues '
'to announce us')
return model_alias_dict | Retrieve model aliases and their corresponding configuration names. Args: scope (str, optional): The scope for the model aliases. Defaults to 'mmpose'. Returns: Dict[str, str]: A dictionary containing model aliases as keys and their corresponding configuration names as values. |
159,165 | import warnings
import numpy as np
from mmpose.evaluation.functional.nms import oks_iou
def _compute_iou(bboxA, bboxB):
"""Compute the Intersection over Union (IoU) between two boxes .
Args:
bboxA (list): The first bbox info (left, top, right, bottom, score).
bboxB (list): The second bbox info (left, top, right, bottom, score).
Returns:
float: The IoU value.
"""
x1 = max(bboxA[0], bboxB[0])
y1 = max(bboxA[1], bboxB[1])
x2 = min(bboxA[2], bboxB[2])
y2 = min(bboxA[3], bboxB[3])
inter_area = max(0, x2 - x1) * max(0, y2 - y1)
bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1])
bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1])
union_area = float(bboxA_area + bboxB_area - inter_area)
if union_area == 0:
union_area = 1e-5
warnings.warn('union_area=0 is unexpected')
iou = inter_area / union_area
return iou
The provided code snippet includes necessary dependencies for implementing the `_track_by_iou` function. Write a Python function `def _track_by_iou(res, results_last, thr)` to solve the following problem:
Get track id using IoU tracking greedily.
Here is the function:
def _track_by_iou(res, results_last, thr):
"""Get track id using IoU tracking greedily."""
bbox = list(np.squeeze(res.pred_instances.bboxes, axis=0))
max_iou_score = -1
max_index = -1
match_result = {}
for index, res_last in enumerate(results_last):
bbox_last = list(np.squeeze(res_last.pred_instances.bboxes, axis=0))
iou_score = _compute_iou(bbox, bbox_last)
if iou_score > max_iou_score:
max_iou_score = iou_score
max_index = index
if max_iou_score > thr:
track_id = results_last[max_index].track_id
match_result = results_last[max_index]
del results_last[max_index]
else:
track_id = -1
return track_id, results_last, match_result | Get track id using IoU tracking greedily. |
159,166 | import warnings
import numpy as np
from mmpose.evaluation.functional.nms import oks_iou
The provided code snippet includes necessary dependencies for implementing the `_track_by_oks` function. Write a Python function `def _track_by_oks(res, results_last, thr, sigmas=None)` to solve the following problem:
Get track id using OKS tracking greedily.
Here is the function:
def _track_by_oks(res, results_last, thr, sigmas=None):
"""Get track id using OKS tracking greedily."""
keypoint = np.concatenate((res.pred_instances.keypoints,
res.pred_instances.keypoint_scores[:, :, None]),
axis=2)
keypoint = np.squeeze(keypoint, axis=0).reshape((-1))
area = np.squeeze(res.pred_instances.areas, axis=0)
max_index = -1
match_result = {}
if len(results_last) == 0:
return -1, results_last, match_result
keypoints_last = np.array([
np.squeeze(
np.concatenate(
(res_last.pred_instances.keypoints,
res_last.pred_instances.keypoint_scores[:, :, None]),
axis=2),
axis=0).reshape((-1)) for res_last in results_last
])
area_last = np.array([
np.squeeze(res_last.pred_instances.areas, axis=0)
for res_last in results_last
])
oks_score = oks_iou(
keypoint, keypoints_last, area, area_last, sigmas=sigmas)
max_index = np.argmax(oks_score)
if oks_score[max_index] > thr:
track_id = results_last[max_index].track_id
match_result = results_last[max_index]
del results_last[max_index]
else:
track_id = -1
return track_id, results_last, match_result | Get track id using OKS tracking greedily. |
159,167 | import math
from typing import Dict, List, Optional, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmengine.dist import master_only
from mmengine.structures import InstanceData, PixelData
from mmpose.datasets.datasets.utils import parse_pose_metainfo
from mmpose.registry import VISUALIZERS
from mmpose.structures import PoseDataSample
from .opencv_backend_visualizer import OpencvBackendVisualizer
from .simcc_vis import SimCCVisualizer
The provided code snippet includes necessary dependencies for implementing the `_get_adaptive_scales` function. Write a Python function `def _get_adaptive_scales(areas: np.ndarray, min_area: int = 800, max_area: int = 30000) -> np.ndarray` to solve the following problem:
Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``min_area``, the scale is 0.5 while the area is larger than ``max_area``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Defaults to 800. max_area (int): Upper bound areas for adaptive scales. Defaults to 30000. Returns: ndarray: The adaotive scales with the shape of (n, ).
Here is the function:
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales | Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``min_area``, the scale is 0.5 while the area is larger than ``max_area``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Defaults to 800. max_area (int): Upper bound areas for adaptive scales. Defaults to 30000. Returns: ndarray: The adaotive scales with the shape of (n, ). |
159,168 | from typing import List, Optional
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `nms` function. Write a Python function `def nms(dets: np.ndarray, thr: float) -> List[int]` to solve the following problem:
Greedily select boxes with high confidence and overlap <= thr. Args: dets (np.ndarray): [[x1, y1, x2, y2, score]]. thr (float): Retain overlap < thr. Returns: list: Indexes to keep.
Here is the function:
def nms(dets: np.ndarray, thr: float) -> List[int]:
"""Greedily select boxes with high confidence and overlap <= thr.
Args:
dets (np.ndarray): [[x1, y1, x2, y2, score]].
thr (float): Retain overlap < thr.
Returns:
list: Indexes to keep.
"""
if len(dets) == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while len(order) > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thr)[0]
order = order[inds + 1]
return keep | Greedily select boxes with high confidence and overlap <= thr. Args: dets (np.ndarray): [[x1, y1, x2, y2, score]]. thr (float): Retain overlap < thr. Returns: list: Indexes to keep. |
159,169 | from typing import List, Optional
import numpy as np
def oks_iou(g: np.ndarray,
d: np.ndarray,
a_g: float,
a_d: np.ndarray,
sigmas: Optional[np.ndarray] = None,
vis_thr: Optional[float] = None) -> np.ndarray:
"""Calculate oks ious.
Note:
- number of keypoints: K
- number of instances: N
Args:
g (np.ndarray): The instance to calculate OKS IOU with other
instances. Containing the keypoints coordinates. Shape: (K*3, )
d (np.ndarray): The rest instances. Containing the keypoints
coordinates. Shape: (N, K*3)
a_g (float): Area of the ground truth object.
a_d (np.ndarray): Area of the detected object. Shape: (N, )
sigmas (np.ndarray, optional): Keypoint labelling uncertainty.
Please refer to `COCO keypoint evaluation
<https://cocodataset.org/#keypoints-eval>`__ for more details.
If not given, use the sigmas on COCO dataset.
If specified, shape: (K, ). Defaults to ``None``
vis_thr(float, optional): Threshold of the keypoint visibility.
If specified, will calculate OKS based on those keypoints whose
visibility higher than vis_thr. If not given, calculate the OKS
based on all keypoints. Defaults to ``None``
Returns:
np.ndarray: The oks ious.
"""
if sigmas is None:
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07,
.87, .87, .89, .89
]) / 10.0
vars = (sigmas * 2)**2
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
ious = np.zeros(len(d), dtype=np.float32)
for n_d in range(0, len(d)):
xd = d[n_d, 0::3]
yd = d[n_d, 1::3]
vd = d[n_d, 2::3]
dx = xd - xg
dy = yd - yg
e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if vis_thr is not None:
ind = list((vg > vis_thr) & (vd > vis_thr))
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / len(e) if len(e) != 0 else 0.0
return ious
The provided code snippet includes necessary dependencies for implementing the `oks_nms` function. Write a Python function `def oks_nms(kpts_db: List[dict], thr: float, sigmas: Optional[np.ndarray] = None, vis_thr: Optional[float] = None, score_per_joint: bool = False)` to solve the following problem:
OKS NMS implementations. Args: kpts_db (List[dict]): The keypoints results of the same image. thr (float): The threshold of NMS. Will retain oks overlap < thr. sigmas (np.ndarray, optional): Keypoint labelling uncertainty. Please refer to `COCO keypoint evaluation <https://cocodataset.org/#keypoints-eval>`__ for more details. If not given, use the sigmas on COCO dataset. Defaults to ``None`` vis_thr(float, optional): Threshold of the keypoint visibility. If specified, will calculate OKS based on those keypoints whose visibility higher than vis_thr. If not given, calculate the OKS based on all keypoints. Defaults to ``None`` score_per_joint(bool): Whether the input scores (in kpts_db) are per-joint scores. Defaults to ``False`` Returns: np.ndarray: indexes to keep.
Here is the function:
def oks_nms(kpts_db: List[dict],
thr: float,
sigmas: Optional[np.ndarray] = None,
vis_thr: Optional[float] = None,
score_per_joint: bool = False):
"""OKS NMS implementations.
Args:
kpts_db (List[dict]): The keypoints results of the same image.
thr (float): The threshold of NMS. Will retain oks overlap < thr.
sigmas (np.ndarray, optional): Keypoint labelling uncertainty.
Please refer to `COCO keypoint evaluation
<https://cocodataset.org/#keypoints-eval>`__ for more details.
If not given, use the sigmas on COCO dataset. Defaults to ``None``
vis_thr(float, optional): Threshold of the keypoint visibility.
If specified, will calculate OKS based on those keypoints whose
visibility higher than vis_thr. If not given, calculate the OKS
based on all keypoints. Defaults to ``None``
score_per_joint(bool): Whether the input scores (in kpts_db) are
per-joint scores. Defaults to ``False``
Returns:
np.ndarray: indexes to keep.
"""
if len(kpts_db) == 0:
return []
if score_per_joint:
scores = np.array([k['score'].mean() for k in kpts_db])
else:
scores = np.array([k['score'] for k in kpts_db])
kpts = np.array([k['keypoints'].flatten() for k in kpts_db])
areas = np.array([k['area'] for k in kpts_db])
order = scores.argsort()[::-1]
keep = []
while len(order) > 0:
i = order[0]
keep.append(i)
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],
sigmas, vis_thr)
inds = np.where(oks_ovr <= thr)[0]
order = order[inds + 1]
keep = np.array(keep)
return keep | OKS NMS implementations. Args: kpts_db (List[dict]): The keypoints results of the same image. thr (float): The threshold of NMS. Will retain oks overlap < thr. sigmas (np.ndarray, optional): Keypoint labelling uncertainty. Please refer to `COCO keypoint evaluation <https://cocodataset.org/#keypoints-eval>`__ for more details. If not given, use the sigmas on COCO dataset. Defaults to ``None`` vis_thr(float, optional): Threshold of the keypoint visibility. If specified, will calculate OKS based on those keypoints whose visibility higher than vis_thr. If not given, calculate the OKS based on all keypoints. Defaults to ``None`` score_per_joint(bool): Whether the input scores (in kpts_db) are per-joint scores. Defaults to ``False`` Returns: np.ndarray: indexes to keep. |
159,170 | from typing import List, Optional
import numpy as np
def oks_iou(g: np.ndarray,
d: np.ndarray,
a_g: float,
a_d: np.ndarray,
sigmas: Optional[np.ndarray] = None,
vis_thr: Optional[float] = None) -> np.ndarray:
"""Calculate oks ious.
Note:
- number of keypoints: K
- number of instances: N
Args:
g (np.ndarray): The instance to calculate OKS IOU with other
instances. Containing the keypoints coordinates. Shape: (K*3, )
d (np.ndarray): The rest instances. Containing the keypoints
coordinates. Shape: (N, K*3)
a_g (float): Area of the ground truth object.
a_d (np.ndarray): Area of the detected object. Shape: (N, )
sigmas (np.ndarray, optional): Keypoint labelling uncertainty.
Please refer to `COCO keypoint evaluation
<https://cocodataset.org/#keypoints-eval>`__ for more details.
If not given, use the sigmas on COCO dataset.
If specified, shape: (K, ). Defaults to ``None``
vis_thr(float, optional): Threshold of the keypoint visibility.
If specified, will calculate OKS based on those keypoints whose
visibility higher than vis_thr. If not given, calculate the OKS
based on all keypoints. Defaults to ``None``
Returns:
np.ndarray: The oks ious.
"""
if sigmas is None:
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07,
.87, .87, .89, .89
]) / 10.0
vars = (sigmas * 2)**2
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
ious = np.zeros(len(d), dtype=np.float32)
for n_d in range(0, len(d)):
xd = d[n_d, 0::3]
yd = d[n_d, 1::3]
vd = d[n_d, 2::3]
dx = xd - xg
dy = yd - yg
e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if vis_thr is not None:
ind = list((vg > vis_thr) & (vd > vis_thr))
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / len(e) if len(e) != 0 else 0.0
return ious
def _rescore(overlap: np.ndarray,
scores: np.ndarray,
thr: float,
type: str = 'gaussian'):
"""Rescoring mechanism gaussian or linear.
Args:
overlap (np.ndarray): The calculated oks ious.
scores (np.ndarray): target scores.
thr (float): retain oks overlap < thr.
type (str): The rescoring type. Could be 'gaussian' or 'linear'.
Defaults to ``'gaussian'``
Returns:
np.ndarray: indexes to keep
"""
assert len(overlap) == len(scores)
assert type in ['gaussian', 'linear']
if type == 'linear':
inds = np.where(overlap >= thr)[0]
scores[inds] = scores[inds] * (1 - overlap[inds])
else:
scores = scores * np.exp(-overlap**2 / thr)
return scores
The provided code snippet includes necessary dependencies for implementing the `soft_oks_nms` function. Write a Python function `def soft_oks_nms(kpts_db: List[dict], thr: float, max_dets: int = 20, sigmas: Optional[np.ndarray] = None, vis_thr: Optional[float] = None, score_per_joint: bool = False)` to solve the following problem:
Soft OKS NMS implementations. Args: kpts_db (List[dict]): The keypoints results of the same image. thr (float): The threshold of NMS. Will retain oks overlap < thr. max_dets (int): Maximum number of detections to keep. Defaults to 20 sigmas (np.ndarray, optional): Keypoint labelling uncertainty. Please refer to `COCO keypoint evaluation <https://cocodataset.org/#keypoints-eval>`__ for more details. If not given, use the sigmas on COCO dataset. Defaults to ``None`` vis_thr(float, optional): Threshold of the keypoint visibility. If specified, will calculate OKS based on those keypoints whose visibility higher than vis_thr. If not given, calculate the OKS based on all keypoints. Defaults to ``None`` score_per_joint(bool): Whether the input scores (in kpts_db) are per-joint scores. Defaults to ``False`` Returns: np.ndarray: indexes to keep.
Here is the function:
def soft_oks_nms(kpts_db: List[dict],
thr: float,
max_dets: int = 20,
sigmas: Optional[np.ndarray] = None,
vis_thr: Optional[float] = None,
score_per_joint: bool = False):
"""Soft OKS NMS implementations.
Args:
kpts_db (List[dict]): The keypoints results of the same image.
thr (float): The threshold of NMS. Will retain oks overlap < thr.
max_dets (int): Maximum number of detections to keep. Defaults to 20
sigmas (np.ndarray, optional): Keypoint labelling uncertainty.
Please refer to `COCO keypoint evaluation
<https://cocodataset.org/#keypoints-eval>`__ for more details.
If not given, use the sigmas on COCO dataset. Defaults to ``None``
vis_thr(float, optional): Threshold of the keypoint visibility.
If specified, will calculate OKS based on those keypoints whose
visibility higher than vis_thr. If not given, calculate the OKS
based on all keypoints. Defaults to ``None``
score_per_joint(bool): Whether the input scores (in kpts_db) are
per-joint scores. Defaults to ``False``
Returns:
np.ndarray: indexes to keep.
"""
if len(kpts_db) == 0:
return []
if score_per_joint:
scores = np.array([k['score'].mean() for k in kpts_db])
else:
scores = np.array([k['score'] for k in kpts_db])
kpts = np.array([k['keypoints'].flatten() for k in kpts_db])
areas = np.array([k['area'] for k in kpts_db])
order = scores.argsort()[::-1]
scores = scores[order]
keep = np.zeros(max_dets, dtype=np.intp)
keep_cnt = 0
while len(order) > 0 and keep_cnt < max_dets:
i = order[0]
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],
sigmas, vis_thr)
order = order[1:]
scores = _rescore(oks_ovr, scores[1:], thr)
tmp = scores.argsort()[::-1]
order = order[tmp]
scores = scores[tmp]
keep[keep_cnt] = i
keep_cnt += 1
keep = keep[:keep_cnt]
return keep | Soft OKS NMS implementations. Args: kpts_db (List[dict]): The keypoints results of the same image. thr (float): The threshold of NMS. Will retain oks overlap < thr. max_dets (int): Maximum number of detections to keep. Defaults to 20 sigmas (np.ndarray, optional): Keypoint labelling uncertainty. Please refer to `COCO keypoint evaluation <https://cocodataset.org/#keypoints-eval>`__ for more details. If not given, use the sigmas on COCO dataset. Defaults to ``None`` vis_thr(float, optional): Threshold of the keypoint visibility. If specified, will calculate OKS based on those keypoints whose visibility higher than vis_thr. If not given, calculate the OKS based on all keypoints. Defaults to ``None`` score_per_joint(bool): Whether the input scores (in kpts_db) are per-joint scores. Defaults to ``False`` Returns: np.ndarray: indexes to keep. |
159,171 | from typing import List, Optional
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `nearby_joints_nms` function. Write a Python function `def nearby_joints_nms( kpts_db: List[dict], dist_thr: float, num_nearby_joints_thr: Optional[int] = None, score_per_joint: bool = False, max_dets: int = 30, )` to solve the following problem:
Nearby joints NMS implementations. Instances with non-maximum scores will be suppressed if they have too much closed joints with other instances. This function is modified from project `DEKR<https://github.com/HRNet/DEKR/blob/main/lib/core/nms.py>`. Args: kpts_db (list[dict]): keypoints and scores. dist_thr (float): threshold for judging whether two joints are close. num_nearby_joints_thr (int): threshold for judging whether two instances are close. max_dets (int): max number of detections to keep. score_per_joint (bool): the input scores (in kpts_db) are per joint scores. Returns: np.ndarray: indexes to keep.
Here is the function:
def nearby_joints_nms(
kpts_db: List[dict],
dist_thr: float,
num_nearby_joints_thr: Optional[int] = None,
score_per_joint: bool = False,
max_dets: int = 30,
):
"""Nearby joints NMS implementations. Instances with non-maximum scores
will be suppressed if they have too much closed joints with other
instances. This function is modified from project
`DEKR<https://github.com/HRNet/DEKR/blob/main/lib/core/nms.py>`.
Args:
kpts_db (list[dict]): keypoints and scores.
dist_thr (float): threshold for judging whether two joints are close.
num_nearby_joints_thr (int): threshold for judging whether two
instances are close.
max_dets (int): max number of detections to keep.
score_per_joint (bool): the input scores (in kpts_db) are per joint
scores.
Returns:
np.ndarray: indexes to keep.
"""
assert dist_thr > 0, '`dist_thr` must be greater than 0.'
if len(kpts_db) == 0:
return []
if score_per_joint:
scores = np.array([k['score'].mean() for k in kpts_db])
else:
scores = np.array([k['score'] for k in kpts_db])
kpts = np.array([k['keypoints'] for k in kpts_db])
num_people, num_joints, _ = kpts.shape
if num_nearby_joints_thr is None:
num_nearby_joints_thr = num_joints // 2
assert num_nearby_joints_thr < num_joints, '`num_nearby_joints_thr` must '\
'be less than the number of joints.'
# compute distance threshold
pose_area = kpts.max(axis=1) - kpts.min(axis=1)
pose_area = np.sqrt(np.power(pose_area, 2).sum(axis=1))
pose_area = pose_area.reshape(num_people, 1, 1)
pose_area = np.tile(pose_area, (num_people, num_joints))
close_dist_thr = pose_area * dist_thr
# count nearby joints between instances
instance_dist = kpts[:, None] - kpts
instance_dist = np.sqrt(np.power(instance_dist, 2).sum(axis=3))
close_instance_num = (instance_dist < close_dist_thr).sum(2)
close_instance = close_instance_num > num_nearby_joints_thr
# apply nms
ignored_pose_inds, keep_pose_inds = set(), list()
indexes = np.argsort(scores)[::-1]
for i in indexes:
if i in ignored_pose_inds:
continue
keep_inds = close_instance[i].nonzero()[0]
keep_ind = keep_inds[np.argmax(scores[keep_inds])]
if keep_ind not in ignored_pose_inds:
keep_pose_inds.append(keep_ind)
ignored_pose_inds = ignored_pose_inds.union(set(keep_inds))
# limit the number of output instances
if max_dets > 0 and len(keep_pose_inds) > max_dets:
sub_inds = np.argsort(scores[keep_pose_inds])[-1:-max_dets - 1:-1]
keep_pose_inds = [keep_pose_inds[i] for i in sub_inds]
return keep_pose_inds | Nearby joints NMS implementations. Instances with non-maximum scores will be suppressed if they have too much closed joints with other instances. This function is modified from project `DEKR<https://github.com/HRNet/DEKR/blob/main/lib/core/nms.py>`. Args: kpts_db (list[dict]): keypoints and scores. dist_thr (float): threshold for judging whether two joints are close. num_nearby_joints_thr (int): threshold for judging whether two instances are close. max_dets (int): max number of detections to keep. score_per_joint (bool): the input scores (in kpts_db) are per joint scores. Returns: np.ndarray: indexes to keep. |
159,172 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
def keypoint_pck_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray,
thr: np.ndarray, norm_factor: np.ndarray) -> tuple:
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints for coordinates.
Note:
PCK metric measures accuracy of the localization of the body joints.
The distances between predicted positions and the ground-truth ones
are typically normalized by the bounding box size.
The threshold (thr) of the normalized distance is commonly set
as 0.05, 0.1 or 0.2 etc.
- instance number: N
- keypoint number: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation.
norm_factor (np.ndarray[N, 2]): Normalization factor for H&W.
Returns:
tuple: A tuple containing keypoint accuracy.
- acc (np.ndarray[K]): Accuracy of each keypoint.
- avg_acc (float): Averaged accuracy across all keypoints.
- cnt (int): Number of valid keypoints.
"""
distances = _calc_distances(pred, gt, mask, norm_factor)
acc = np.array([_distance_acc(d, thr) for d in distances])
valid_acc = acc[acc >= 0]
cnt = len(valid_acc)
avg_acc = valid_acc.mean() if cnt > 0 else 0.0
return acc, avg_acc, cnt
The provided code snippet includes necessary dependencies for implementing the `keypoint_auc` function. Write a Python function `def keypoint_auc(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, norm_factor: np.ndarray, num_thrs: int = 20) -> float` to solve the following problem:
Calculate the Area under curve (AUC) of keypoint PCK accuracy. Note: - instance number: N - keypoint number: K Args: pred (np.ndarray[N, K, 2]): Predicted keypoint location. gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. norm_factor (float): Normalization factor. num_thrs (int): number of thresholds to calculate auc. Returns: float: Area under curve (AUC) of keypoint PCK accuracy.
Here is the function:
def keypoint_auc(pred: np.ndarray,
gt: np.ndarray,
mask: np.ndarray,
norm_factor: np.ndarray,
num_thrs: int = 20) -> float:
"""Calculate the Area under curve (AUC) of keypoint PCK accuracy.
Note:
- instance number: N
- keypoint number: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
norm_factor (float): Normalization factor.
num_thrs (int): number of thresholds to calculate auc.
Returns:
float: Area under curve (AUC) of keypoint PCK accuracy.
"""
nor = np.tile(np.array([[norm_factor, norm_factor]]), (pred.shape[0], 1))
thrs = [1.0 * i / num_thrs for i in range(num_thrs)]
avg_accs = []
for thr in thrs:
_, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor)
avg_accs.append(avg_acc)
auc = 0
for i in range(num_thrs):
auc += 1.0 / num_thrs * avg_accs[i]
return auc | Calculate the Area under curve (AUC) of keypoint PCK accuracy. Note: - instance number: N - keypoint number: K Args: pred (np.ndarray[N, K, 2]): Predicted keypoint location. gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. norm_factor (float): Normalization factor. num_thrs (int): number of thresholds to calculate auc. Returns: float: Area under curve (AUC) of keypoint PCK accuracy. |
159,173 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
def _calc_distances(preds: np.ndarray, gts: np.ndarray, mask: np.ndarray,
norm_factor: np.ndarray) -> np.ndarray:
"""Calculate the normalized distances between preds and target.
Note:
- instance number: N
- keypoint number: K
- keypoint dimension: D (normally, D=2 or D=3)
Args:
preds (np.ndarray[N, K, D]): Predicted keypoint location.
gts (np.ndarray[N, K, D]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
norm_factor (np.ndarray[N, D]): Normalization factor.
Typical value is heatmap_size.
Returns:
np.ndarray[K, N]: The normalized distances. \
If target keypoints are missing, the distance is -1.
"""
N, K, _ = preds.shape
# set mask=0 when norm_factor==0
_mask = mask.copy()
_mask[np.where((norm_factor == 0).sum(1))[0], :] = False
distances = np.full((N, K), -1, dtype=np.float32)
# handle invalid values
norm_factor[np.where(norm_factor <= 0)] = 1e6
distances[_mask] = np.linalg.norm(
((preds - gts) / norm_factor[:, None, :])[_mask], axis=-1)
return distances.T
The provided code snippet includes necessary dependencies for implementing the `keypoint_nme` function. Write a Python function `def keypoint_nme(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, normalize_factor: np.ndarray) -> float` to solve the following problem:
Calculate the normalized mean error (NME). Note: - instance number: N - keypoint number: K Args: pred (np.ndarray[N, K, 2]): Predicted keypoint location. gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. normalize_factor (np.ndarray[N, 2]): Normalization factor. Returns: float: normalized mean error
Here is the function:
def keypoint_nme(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray,
normalize_factor: np.ndarray) -> float:
"""Calculate the normalized mean error (NME).
Note:
- instance number: N
- keypoint number: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
normalize_factor (np.ndarray[N, 2]): Normalization factor.
Returns:
float: normalized mean error
"""
distances = _calc_distances(pred, gt, mask, normalize_factor)
distance_valid = distances[distances != -1]
return distance_valid.sum() / max(1, len(distance_valid)) | Calculate the normalized mean error (NME). Note: - instance number: N - keypoint number: K Args: pred (np.ndarray[N, K, 2]): Predicted keypoint location. gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. normalize_factor (np.ndarray[N, 2]): Normalization factor. Returns: float: normalized mean error |
159,174 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
def _calc_distances(preds: np.ndarray, gts: np.ndarray, mask: np.ndarray,
norm_factor: np.ndarray) -> np.ndarray:
"""Calculate the normalized distances between preds and target.
Note:
- instance number: N
- keypoint number: K
- keypoint dimension: D (normally, D=2 or D=3)
Args:
preds (np.ndarray[N, K, D]): Predicted keypoint location.
gts (np.ndarray[N, K, D]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
norm_factor (np.ndarray[N, D]): Normalization factor.
Typical value is heatmap_size.
Returns:
np.ndarray[K, N]: The normalized distances. \
If target keypoints are missing, the distance is -1.
"""
N, K, _ = preds.shape
# set mask=0 when norm_factor==0
_mask = mask.copy()
_mask[np.where((norm_factor == 0).sum(1))[0], :] = False
distances = np.full((N, K), -1, dtype=np.float32)
# handle invalid values
norm_factor[np.where(norm_factor <= 0)] = 1e6
distances[_mask] = np.linalg.norm(
((preds - gts) / norm_factor[:, None, :])[_mask], axis=-1)
return distances.T
The provided code snippet includes necessary dependencies for implementing the `keypoint_epe` function. Write a Python function `def keypoint_epe(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray) -> float` to solve the following problem:
Calculate the end-point error. Note: - instance number: N - keypoint number: K Args: pred (np.ndarray[N, K, 2]): Predicted keypoint location. gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. Returns: float: Average end-point error.
Here is the function:
def keypoint_epe(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray) -> float:
"""Calculate the end-point error.
Note:
- instance number: N
- keypoint number: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
Returns:
float: Average end-point error.
"""
distances = _calc_distances(
pred, gt, mask,
np.ones((pred.shape[0], pred.shape[2]), dtype=np.float32))
distance_valid = distances[distances != -1]
return distance_valid.sum() / max(1, len(distance_valid)) | Calculate the end-point error. Note: - instance number: N - keypoint number: K Args: pred (np.ndarray[N, K, 2]): Predicted keypoint location. gt (np.ndarray[N, K, 2]): Groundtruth keypoint location. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. Returns: float: Average end-point error. |
159,175 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
def keypoint_pck_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray,
thr: np.ndarray, norm_factor: np.ndarray) -> tuple:
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints for coordinates.
Note:
PCK metric measures accuracy of the localization of the body joints.
The distances between predicted positions and the ground-truth ones
are typically normalized by the bounding box size.
The threshold (thr) of the normalized distance is commonly set
as 0.05, 0.1 or 0.2 etc.
- instance number: N
- keypoint number: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation.
norm_factor (np.ndarray[N, 2]): Normalization factor for H&W.
Returns:
tuple: A tuple containing keypoint accuracy.
- acc (np.ndarray[K]): Accuracy of each keypoint.
- avg_acc (float): Averaged accuracy across all keypoints.
- cnt (int): Number of valid keypoints.
"""
distances = _calc_distances(pred, gt, mask, norm_factor)
acc = np.array([_distance_acc(d, thr) for d in distances])
valid_acc = acc[acc >= 0]
cnt = len(valid_acc)
avg_acc = valid_acc.mean() if cnt > 0 else 0.0
return acc, avg_acc, cnt
The provided code snippet includes necessary dependencies for implementing the `pose_pck_accuracy` function. Write a Python function `def pose_pck_accuracy(output: np.ndarray, target: np.ndarray, mask: np.ndarray, thr: float = 0.05, normalize: Optional[np.ndarray] = None) -> tuple` to solve the following problem:
Calculate the pose accuracy of PCK for each individual keypoint and the averaged accuracy across all keypoints from heatmaps. Note: PCK metric measures accuracy of the localization of the body joints. The distances between predicted positions and the ground-truth ones are typically normalized by the bounding box size. The threshold (thr) of the normalized distance is commonly set as 0.05, 0.1 or 0.2 etc. - batch_size: N - num_keypoints: K - heatmap height: H - heatmap width: W Args: output (np.ndarray[N, K, H, W]): Model output heatmaps. target (np.ndarray[N, K, H, W]): Groundtruth heatmaps. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. thr (float): Threshold of PCK calculation. Default 0.05. normalize (np.ndarray[N, 2]): Normalization factor for H&W. Returns: tuple: A tuple containing keypoint accuracy. - np.ndarray[K]: Accuracy of each keypoint. - float: Averaged accuracy across all keypoints. - int: Number of valid keypoints.
Here is the function:
def pose_pck_accuracy(output: np.ndarray,
target: np.ndarray,
mask: np.ndarray,
thr: float = 0.05,
normalize: Optional[np.ndarray] = None) -> tuple:
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints from heatmaps.
Note:
PCK metric measures accuracy of the localization of the body joints.
The distances between predicted positions and the ground-truth ones
are typically normalized by the bounding box size.
The threshold (thr) of the normalized distance is commonly set
as 0.05, 0.1 or 0.2 etc.
- batch_size: N
- num_keypoints: K
- heatmap height: H
- heatmap width: W
Args:
output (np.ndarray[N, K, H, W]): Model output heatmaps.
target (np.ndarray[N, K, H, W]): Groundtruth heatmaps.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation. Default 0.05.
normalize (np.ndarray[N, 2]): Normalization factor for H&W.
Returns:
tuple: A tuple containing keypoint accuracy.
- np.ndarray[K]: Accuracy of each keypoint.
- float: Averaged accuracy across all keypoints.
- int: Number of valid keypoints.
"""
N, K, H, W = output.shape
if K == 0:
return None, 0, 0
if normalize is None:
normalize = np.tile(np.array([[H, W]]), (N, 1))
pred, _ = get_heatmap_maximum(output)
gt, _ = get_heatmap_maximum(target)
return keypoint_pck_accuracy(pred, gt, mask, thr, normalize) | Calculate the pose accuracy of PCK for each individual keypoint and the averaged accuracy across all keypoints from heatmaps. Note: PCK metric measures accuracy of the localization of the body joints. The distances between predicted positions and the ground-truth ones are typically normalized by the bounding box size. The threshold (thr) of the normalized distance is commonly set as 0.05, 0.1 or 0.2 etc. - batch_size: N - num_keypoints: K - heatmap height: H - heatmap width: W Args: output (np.ndarray[N, K, H, W]): Model output heatmaps. target (np.ndarray[N, K, H, W]): Groundtruth heatmaps. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. thr (float): Threshold of PCK calculation. Default 0.05. normalize (np.ndarray[N, 2]): Normalization factor for H&W. Returns: tuple: A tuple containing keypoint accuracy. - np.ndarray[K]: Accuracy of each keypoint. - float: Averaged accuracy across all keypoints. - int: Number of valid keypoints. |
159,176 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
def keypoint_pck_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray,
thr: np.ndarray, norm_factor: np.ndarray) -> tuple:
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints for coordinates.
Note:
PCK metric measures accuracy of the localization of the body joints.
The distances between predicted positions and the ground-truth ones
are typically normalized by the bounding box size.
The threshold (thr) of the normalized distance is commonly set
as 0.05, 0.1 or 0.2 etc.
- instance number: N
- keypoint number: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation.
norm_factor (np.ndarray[N, 2]): Normalization factor for H&W.
Returns:
tuple: A tuple containing keypoint accuracy.
- acc (np.ndarray[K]): Accuracy of each keypoint.
- avg_acc (float): Averaged accuracy across all keypoints.
- cnt (int): Number of valid keypoints.
"""
distances = _calc_distances(pred, gt, mask, norm_factor)
acc = np.array([_distance_acc(d, thr) for d in distances])
valid_acc = acc[acc >= 0]
cnt = len(valid_acc)
avg_acc = valid_acc.mean() if cnt > 0 else 0.0
return acc, avg_acc, cnt
The provided code snippet includes necessary dependencies for implementing the `simcc_pck_accuracy` function. Write a Python function `def simcc_pck_accuracy(output: Tuple[np.ndarray, np.ndarray], target: Tuple[np.ndarray, np.ndarray], simcc_split_ratio: float, mask: np.ndarray, thr: float = 0.05, normalize: Optional[np.ndarray] = None) -> tuple` to solve the following problem:
Calculate the pose accuracy of PCK for each individual keypoint and the averaged accuracy across all keypoints from SimCC. Note: PCK metric measures accuracy of the localization of the body joints. The distances between predicted positions and the ground-truth ones are typically normalized by the bounding box size. The threshold (thr) of the normalized distance is commonly set as 0.05, 0.1 or 0.2 etc. - instance number: N - keypoint number: K Args: output (Tuple[np.ndarray, np.ndarray]): Model predicted SimCC. target (Tuple[np.ndarray, np.ndarray]): Groundtruth SimCC. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. thr (float): Threshold of PCK calculation. Default 0.05. normalize (np.ndarray[N, 2]): Normalization factor for H&W. Returns: tuple: A tuple containing keypoint accuracy. - np.ndarray[K]: Accuracy of each keypoint. - float: Averaged accuracy across all keypoints. - int: Number of valid keypoints.
Here is the function:
def simcc_pck_accuracy(output: Tuple[np.ndarray, np.ndarray],
target: Tuple[np.ndarray, np.ndarray],
simcc_split_ratio: float,
mask: np.ndarray,
thr: float = 0.05,
normalize: Optional[np.ndarray] = None) -> tuple:
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints from SimCC.
Note:
PCK metric measures accuracy of the localization of the body joints.
The distances between predicted positions and the ground-truth ones
are typically normalized by the bounding box size.
The threshold (thr) of the normalized distance is commonly set
as 0.05, 0.1 or 0.2 etc.
- instance number: N
- keypoint number: K
Args:
output (Tuple[np.ndarray, np.ndarray]): Model predicted SimCC.
target (Tuple[np.ndarray, np.ndarray]): Groundtruth SimCC.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation. Default 0.05.
normalize (np.ndarray[N, 2]): Normalization factor for H&W.
Returns:
tuple: A tuple containing keypoint accuracy.
- np.ndarray[K]: Accuracy of each keypoint.
- float: Averaged accuracy across all keypoints.
- int: Number of valid keypoints.
"""
pred_x, pred_y = output
gt_x, gt_y = target
N, _, Wx = pred_x.shape
_, _, Wy = pred_y.shape
W, H = int(Wx / simcc_split_ratio), int(Wy / simcc_split_ratio)
if normalize is None:
normalize = np.tile(np.array([[H, W]]), (N, 1))
pred_coords, _ = get_simcc_maximum(pred_x, pred_y)
pred_coords /= simcc_split_ratio
gt_coords, _ = get_simcc_maximum(gt_x, gt_y)
gt_coords /= simcc_split_ratio
return keypoint_pck_accuracy(pred_coords, gt_coords, mask, thr, normalize) | Calculate the pose accuracy of PCK for each individual keypoint and the averaged accuracy across all keypoints from SimCC. Note: PCK metric measures accuracy of the localization of the body joints. The distances between predicted positions and the ground-truth ones are typically normalized by the bounding box size. The threshold (thr) of the normalized distance is commonly set as 0.05, 0.1 or 0.2 etc. - instance number: N - keypoint number: K Args: output (Tuple[np.ndarray, np.ndarray]): Model predicted SimCC. target (Tuple[np.ndarray, np.ndarray]): Groundtruth SimCC. mask (np.ndarray[N, K]): Visibility of the target. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. thr (float): Threshold of PCK calculation. Default 0.05. normalize (np.ndarray[N, 2]): Normalization factor for H&W. Returns: tuple: A tuple containing keypoint accuracy. - np.ndarray[K]: Accuracy of each keypoint. - float: Averaged accuracy across all keypoints. - int: Number of valid keypoints. |
159,177 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
The provided code snippet includes necessary dependencies for implementing the `multilabel_classification_accuracy` function. Write a Python function `def multilabel_classification_accuracy(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, thr: float = 0.5) -> float` to solve the following problem:
Get multi-label classification accuracy. Note: - batch size: N - label number: L Args: pred (np.ndarray[N, L, 2]): model predicted labels. gt (np.ndarray[N, L, 2]): ground-truth labels. mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of ground-truth labels. thr (float): Threshold for calculating accuracy. Returns: float: multi-label classification accuracy.
Here is the function:
def multilabel_classification_accuracy(pred: np.ndarray,
gt: np.ndarray,
mask: np.ndarray,
thr: float = 0.5) -> float:
"""Get multi-label classification accuracy.
Note:
- batch size: N
- label number: L
Args:
pred (np.ndarray[N, L, 2]): model predicted labels.
gt (np.ndarray[N, L, 2]): ground-truth labels.
mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of
ground-truth labels.
thr (float): Threshold for calculating accuracy.
Returns:
float: multi-label classification accuracy.
"""
# we only compute accuracy on the samples with ground-truth of all labels.
valid = (mask > 0).min(axis=1) if mask.ndim == 2 else (mask > 0)
pred, gt = pred[valid], gt[valid]
if pred.shape[0] == 0:
acc = 0.0 # when no sample is with gt labels, set acc to 0.
else:
# The classification of a sample is regarded as correct
# only if it's correct for all labels.
acc = (((pred - thr) * (gt - thr)) > 0).all(axis=1).mean()
return acc | Get multi-label classification accuracy. Note: - batch size: N - label number: L Args: pred (np.ndarray[N, L, 2]): model predicted labels. gt (np.ndarray[N, L, 2]): ground-truth labels. mask (np.ndarray[N, 1] or np.ndarray[N, L] ): reliability of ground-truth labels. thr (float): Threshold for calculating accuracy. Returns: float: multi-label classification accuracy. |
159,178 | from typing import Optional, Tuple
import numpy as np
from mmpose.codecs.utils import get_heatmap_maximum, get_simcc_maximum
from .mesh_eval import compute_similarity_transform
def compute_similarity_transform(source_points, target_points):
"""Computes a similarity transform (sR, t) that takes a set of 3D points
source_points (N x 3) closest to a set of 3D points target_points, where R
is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the
transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal
Procrutes problem.
Note:
Points number: N
Args:
source_points (np.ndarray): Source point set with shape [N, 3].
target_points (np.ndarray): Target point set with shape [N, 3].
Returns:
np.ndarray: Transformed source point set with shape [N, 3].
"""
assert target_points.shape[0] == source_points.shape[0]
assert target_points.shape[1] == 3 and source_points.shape[1] == 3
source_points = source_points.T
target_points = target_points.T
# 1. Remove mean.
mu1 = source_points.mean(axis=1, keepdims=True)
mu2 = target_points.mean(axis=1, keepdims=True)
X1 = source_points - mu1
X2 = target_points - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, _, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale * (R.dot(mu1))
# 7. Transform the source points:
source_points_hat = scale * R.dot(source_points) + t
source_points_hat = source_points_hat.T
return source_points_hat
The provided code snippet includes necessary dependencies for implementing the `keypoint_mpjpe` function. Write a Python function `def keypoint_mpjpe(pred: np.ndarray, gt: np.ndarray, mask: np.ndarray, alignment: str = 'none')` to solve the following problem:
Calculate the mean per-joint position error (MPJPE) and the error after rigid alignment with the ground truth (P-MPJPE). Note: - batch_size: N - num_keypoints: K - keypoint_dims: C Args: pred (np.ndarray): Predicted keypoint location with shape [N, K, C]. gt (np.ndarray): Groundtruth keypoint location with shape [N, K, C]. mask (np.ndarray): Visibility of the target with shape [N, K]. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. alignment (str, optional): method to align the prediction with the groundtruth. Supported options are: - ``'none'``: no alignment will be applied - ``'scale'``: align in the least-square sense in scale - ``'procrustes'``: align in the least-square sense in scale, rotation and translation. Returns: tuple: A tuple containing joint position errors - (float | np.ndarray): mean per-joint position error (mpjpe). - (float | np.ndarray): mpjpe after rigid alignment with the ground truth (p-mpjpe).
Here is the function:
def keypoint_mpjpe(pred: np.ndarray,
gt: np.ndarray,
mask: np.ndarray,
alignment: str = 'none'):
"""Calculate the mean per-joint position error (MPJPE) and the error after
rigid alignment with the ground truth (P-MPJPE).
Note:
- batch_size: N
- num_keypoints: K
- keypoint_dims: C
Args:
pred (np.ndarray): Predicted keypoint location with shape [N, K, C].
gt (np.ndarray): Groundtruth keypoint location with shape [N, K, C].
mask (np.ndarray): Visibility of the target with shape [N, K].
False for invisible joints, and True for visible.
Invisible joints will be ignored for accuracy calculation.
alignment (str, optional): method to align the prediction with the
groundtruth. Supported options are:
- ``'none'``: no alignment will be applied
- ``'scale'``: align in the least-square sense in scale
- ``'procrustes'``: align in the least-square sense in
scale, rotation and translation.
Returns:
tuple: A tuple containing joint position errors
- (float | np.ndarray): mean per-joint position error (mpjpe).
- (float | np.ndarray): mpjpe after rigid alignment with the
ground truth (p-mpjpe).
"""
assert mask.any()
if alignment == 'none':
pass
elif alignment == 'procrustes':
pred = np.stack([
compute_similarity_transform(pred_i, gt_i)
for pred_i, gt_i in zip(pred, gt)
])
elif alignment == 'scale':
pred_dot_pred = np.einsum('nkc,nkc->n', pred, pred)
pred_dot_gt = np.einsum('nkc,nkc->n', pred, gt)
scale_factor = pred_dot_gt / pred_dot_pred
pred = pred * scale_factor[:, None, None]
else:
raise ValueError(f'Invalid value for alignment: {alignment}')
error = np.linalg.norm(pred - gt, ord=2, axis=-1)[mask].mean()
return error | Calculate the mean per-joint position error (MPJPE) and the error after rigid alignment with the ground truth (P-MPJPE). Note: - batch_size: N - num_keypoints: K - keypoint_dims: C Args: pred (np.ndarray): Predicted keypoint location with shape [N, K, C]. gt (np.ndarray): Groundtruth keypoint location with shape [N, K, C]. mask (np.ndarray): Visibility of the target with shape [N, K]. False for invisible joints, and True for visible. Invisible joints will be ignored for accuracy calculation. alignment (str, optional): method to align the prediction with the groundtruth. Supported options are: - ``'none'``: no alignment will be applied - ``'scale'``: align in the least-square sense in scale - ``'procrustes'``: align in the least-square sense in scale, rotation and translation. Returns: tuple: A tuple containing joint position errors - (float | np.ndarray): mean per-joint position error (mpjpe). - (float | np.ndarray): mpjpe after rigid alignment with the ground truth (p-mpjpe). |
159,179 | from mmengine.dist.utils import get_dist_info
from mmengine.optim import DefaultOptimWrapperConstructor
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ('backbone.cls_token', 'backbone.mask_token',
'backbone.pos_embed'):
return 0
elif var_name.startswith('backbone.patch_embed'):
return 0
elif var_name.startswith('backbone.layers'):
layer_id = int(var_name.split('.')[2])
return layer_id + 1
else:
return num_max_layer - 1 | null |
159,180 | from collections import namedtuple
from itertools import product
from typing import Any, List, Optional, Tuple
import numpy as np
import torch
from munkres import Munkres
from torch import Tensor
from mmpose.registry import KEYPOINT_CODECS
from mmpose.utils.tensor_utils import to_numpy
from .base import BaseKeypointCodec
from .utils import (batch_heatmap_nms, generate_gaussian_heatmaps,
generate_udp_gaussian_heatmaps, refine_keypoints,
refine_keypoints_dark_udp)
The provided code snippet includes necessary dependencies for implementing the `_group_keypoints_by_tags` function. Write a Python function `def _group_keypoints_by_tags(vals: np.ndarray, tags: np.ndarray, locs: np.ndarray, keypoint_order: List[int], val_thr: float, tag_thr: float = 1.0, max_groups: Optional[int] = None) -> np.ndarray` to solve the following problem:
Group the keypoints by tags using Munkres algorithm. Note: - keypoint number: K - candidate number: M - tag dimenssion: L - coordinate dimension: D - group number: G Args: vals (np.ndarray): The heatmap response values of keypoints in shape (K, M) tags (np.ndarray): The tags of the keypoint candidates in shape (K, M, L) locs (np.ndarray): The locations of the keypoint candidates in shape (K, M, D) keypoint_order (List[int]): The grouping order of the keypoints. The groupping usually starts from a keypoints around the head and torso, and gruadually moves out to the limbs val_thr (float): The threshold of the keypoint response value tag_thr (float): The maximum allowed tag distance when matching a keypoint to a group. A keypoint with larger tag distance to any of the existing groups will initializes a new group max_groups (int, optional): The maximum group number. ``None`` means no limitation. Defaults to ``None`` Returns: np.ndarray: grouped keypoints in shape (G, K, D+1), where the last dimenssion is the concatenated keypoint coordinates and scores.
Here is the function:
def _group_keypoints_by_tags(vals: np.ndarray,
tags: np.ndarray,
locs: np.ndarray,
keypoint_order: List[int],
val_thr: float,
tag_thr: float = 1.0,
max_groups: Optional[int] = None) -> np.ndarray:
"""Group the keypoints by tags using Munkres algorithm.
Note:
- keypoint number: K
- candidate number: M
- tag dimenssion: L
- coordinate dimension: D
- group number: G
Args:
vals (np.ndarray): The heatmap response values of keypoints in shape
(K, M)
tags (np.ndarray): The tags of the keypoint candidates in shape
(K, M, L)
locs (np.ndarray): The locations of the keypoint candidates in shape
(K, M, D)
keypoint_order (List[int]): The grouping order of the keypoints.
The groupping usually starts from a keypoints around the head and
torso, and gruadually moves out to the limbs
val_thr (float): The threshold of the keypoint response value
tag_thr (float): The maximum allowed tag distance when matching a
keypoint to a group. A keypoint with larger tag distance to any
of the existing groups will initializes a new group
max_groups (int, optional): The maximum group number. ``None`` means
no limitation. Defaults to ``None``
Returns:
np.ndarray: grouped keypoints in shape (G, K, D+1), where the last
dimenssion is the concatenated keypoint coordinates and scores.
"""
K, M, D = locs.shape
assert vals.shape == tags.shape[:2] == (K, M)
assert len(keypoint_order) == K
# Build Munkres instance
munkres = Munkres()
# Build a group pool, each group contains the keypoints of an instance
groups = []
Group = namedtuple('Group', field_names=['kpts', 'scores', 'tag_list'])
def _init_group():
"""Initialize a group, which is composed of the keypoints, keypoint
scores and the tag of each keypoint."""
_group = Group(
kpts=np.zeros((K, D), dtype=np.float32),
scores=np.zeros(K, dtype=np.float32),
tag_list=[])
return _group
for i in keypoint_order:
# Get all valid candidate of the i-th keypoints
valid = vals[i] > val_thr
if not valid.any():
continue
tags_i = tags[i, valid] # (M', L)
vals_i = vals[i, valid] # (M',)
locs_i = locs[i, valid] # (M', D)
if len(groups) == 0: # Initialize the group pool
for tag, val, loc in zip(tags_i, vals_i, locs_i):
group = _init_group()
group.kpts[i] = loc
group.scores[i] = val
group.tag_list.append(tag)
groups.append(group)
else: # Match keypoints to existing groups
groups = groups[:max_groups]
group_tags = [np.mean(g.tag_list, axis=0) for g in groups]
# Calculate distance matrix between group tags and tag candidates
# of the i-th keypoint
# Shape: (M', 1, L) , (1, G, L) -> (M', G, L)
diff = tags_i[:, None] - np.array(group_tags)[None]
dists = np.linalg.norm(diff, ord=2, axis=2)
num_kpts, num_groups = dists.shape[:2]
# Experimental cost function for keypoint-group matching
costs = np.round(dists) * 100 - vals_i[..., None]
if num_kpts > num_groups:
padding = np.full((num_kpts, num_kpts - num_groups),
1e10,
dtype=np.float32)
costs = np.concatenate((costs, padding), axis=1)
# Match keypoints and groups by Munkres algorithm
matches = munkres.compute(costs)
for kpt_idx, group_idx in matches:
if group_idx < num_groups and dists[kpt_idx,
group_idx] < tag_thr:
# Add the keypoint to the matched group
group = groups[group_idx]
else:
# Initialize a new group with unmatched keypoint
group = _init_group()
groups.append(group)
group.kpts[i] = locs_i[kpt_idx]
group.scores[i] = vals_i[kpt_idx]
group.tag_list.append(tags_i[kpt_idx])
groups = groups[:max_groups]
if groups:
grouped_keypoints = np.stack(
[np.r_['1', g.kpts, g.scores[:, None]] for g in groups])
else:
grouped_keypoints = np.empty((0, K, D + 1))
return grouped_keypoints | Group the keypoints by tags using Munkres algorithm. Note: - keypoint number: K - candidate number: M - tag dimenssion: L - coordinate dimension: D - group number: G Args: vals (np.ndarray): The heatmap response values of keypoints in shape (K, M) tags (np.ndarray): The tags of the keypoint candidates in shape (K, M, L) locs (np.ndarray): The locations of the keypoint candidates in shape (K, M, D) keypoint_order (List[int]): The grouping order of the keypoints. The groupping usually starts from a keypoints around the head and torso, and gruadually moves out to the limbs val_thr (float): The threshold of the keypoint response value tag_thr (float): The maximum allowed tag distance when matching a keypoint to a group. A keypoint with larger tag distance to any of the existing groups will initializes a new group max_groups (int, optional): The maximum group number. ``None`` means no limitation. Defaults to ``None`` Returns: np.ndarray: grouped keypoints in shape (G, K, D+1), where the last dimenssion is the concatenated keypoint coordinates and scores. |
159,181 | from itertools import product
from typing import Tuple, Union
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `generate_gaussian_heatmaps` function. Write a Python function `def generate_gaussian_heatmaps( heatmap_size: Tuple[int, int], keypoints: np.ndarray, keypoints_visible: np.ndarray, sigma: Union[float, Tuple[float], np.ndarray], ) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Generate gaussian heatmaps of keypoints. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) sigma (float or List[float]): A list of sigma values of the Gaussian heatmap for each instance. If sigma is given as a single float value, it will be expanded into a tuple Returns: tuple: - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (N, K)
Here is the function:
def generate_gaussian_heatmaps(
heatmap_size: Tuple[int, int],
keypoints: np.ndarray,
keypoints_visible: np.ndarray,
sigma: Union[float, Tuple[float], np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate gaussian heatmaps of keypoints.
Args:
heatmap_size (Tuple[int, int]): Heatmap size in [W, H]
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
sigma (float or List[float]): A list of sigma values of the Gaussian
heatmap for each instance. If sigma is given as a single float
value, it will be expanded into a tuple
Returns:
tuple:
- heatmaps (np.ndarray): The generated heatmap in shape
(K, H, W) where [W, H] is the `heatmap_size`
- keypoint_weights (np.ndarray): The target weights in shape
(N, K)
"""
N, K, _ = keypoints.shape
W, H = heatmap_size
heatmaps = np.zeros((K, H, W), dtype=np.float32)
keypoint_weights = keypoints_visible.copy()
if isinstance(sigma, (int, float)):
sigma = (sigma, ) * N
for n in range(N):
# 3-sigma rule
radius = sigma[n] * 3
# xy grid
gaussian_size = 2 * radius + 1
x = np.arange(0, gaussian_size, 1, dtype=np.float32)
y = x[:, None]
x0 = y0 = gaussian_size // 2
for k in range(K):
# skip unlabled keypoints
if keypoints_visible[n, k] < 0.5:
continue
# get gaussian center coordinates
mu = (keypoints[n, k] + 0.5).astype(np.int64)
# check that the gaussian has in-bounds part
left, top = (mu - radius).astype(np.int64)
right, bottom = (mu + radius + 1).astype(np.int64)
if left >= W or top >= H or right < 0 or bottom < 0:
keypoint_weights[n, k] = 0
continue
# The gaussian is not normalized,
# we want the center value to equal 1
gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma[n]**2))
# valid range in gaussian
g_x1 = max(0, -left)
g_x2 = min(W, right) - left
g_y1 = max(0, -top)
g_y2 = min(H, bottom) - top
# valid range in heatmap
h_x1 = max(0, left)
h_x2 = min(W, right)
h_y1 = max(0, top)
h_y2 = min(H, bottom)
heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2]
gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2]
_ = np.maximum(
heatmap_region, gaussian_regsion, out=heatmap_region)
return heatmaps, keypoint_weights | Generate gaussian heatmaps of keypoints. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) sigma (float or List[float]): A list of sigma values of the Gaussian heatmap for each instance. If sigma is given as a single float value, it will be expanded into a tuple Returns: tuple: - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (N, K) |
159,182 | from itertools import product
from typing import Tuple, Union
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `generate_unbiased_gaussian_heatmaps` function. Write a Python function `def generate_unbiased_gaussian_heatmaps( heatmap_size: Tuple[int, int], keypoints: np.ndarray, keypoints_visible: np.ndarray, sigma: float, ) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Generate gaussian heatmaps of keypoints using `Dark Pose`_. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) Returns: tuple: - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (N, K) .. _`Dark Pose`: https://arxiv.org/abs/1910.06278
Here is the function:
def generate_unbiased_gaussian_heatmaps(
heatmap_size: Tuple[int, int],
keypoints: np.ndarray,
keypoints_visible: np.ndarray,
sigma: float,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate gaussian heatmaps of keypoints using `Dark Pose`_.
Args:
heatmap_size (Tuple[int, int]): Heatmap size in [W, H]
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
Returns:
tuple:
- heatmaps (np.ndarray): The generated heatmap in shape
(K, H, W) where [W, H] is the `heatmap_size`
- keypoint_weights (np.ndarray): The target weights in shape
(N, K)
.. _`Dark Pose`: https://arxiv.org/abs/1910.06278
"""
N, K, _ = keypoints.shape
W, H = heatmap_size
heatmaps = np.zeros((K, H, W), dtype=np.float32)
keypoint_weights = keypoints_visible.copy()
# 3-sigma rule
radius = sigma * 3
# xy grid
x = np.arange(0, W, 1, dtype=np.float32)
y = np.arange(0, H, 1, dtype=np.float32)[:, None]
for n, k in product(range(N), range(K)):
# skip unlabled keypoints
if keypoints_visible[n, k] < 0.5:
continue
mu = keypoints[n, k]
# check that the gaussian has in-bounds part
left, top = mu - radius
right, bottom = mu + radius + 1
if left >= W or top >= H or right < 0 or bottom < 0:
keypoint_weights[n, k] = 0
continue
gaussian = np.exp(-((x - mu[0])**2 + (y - mu[1])**2) / (2 * sigma**2))
_ = np.maximum(gaussian, heatmaps[k], out=heatmaps[k])
return heatmaps, keypoint_weights | Generate gaussian heatmaps of keypoints using `Dark Pose`_. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) Returns: tuple: - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (N, K) .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 |
159,183 | from itertools import product
from typing import Tuple, Union
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `generate_udp_gaussian_heatmaps` function. Write a Python function `def generate_udp_gaussian_heatmaps( heatmap_size: Tuple[int, int], keypoints: np.ndarray, keypoints_visible: np.ndarray, sigma: float, ) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Generate gaussian heatmaps of keypoints using `UDP`_. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) sigma (float): The sigma value of the Gaussian heatmap Returns: tuple: - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (N, K) .. _`UDP`: https://arxiv.org/abs/1911.07524
Here is the function:
def generate_udp_gaussian_heatmaps(
heatmap_size: Tuple[int, int],
keypoints: np.ndarray,
keypoints_visible: np.ndarray,
sigma: float,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate gaussian heatmaps of keypoints using `UDP`_.
Args:
heatmap_size (Tuple[int, int]): Heatmap size in [W, H]
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
sigma (float): The sigma value of the Gaussian heatmap
Returns:
tuple:
- heatmaps (np.ndarray): The generated heatmap in shape
(K, H, W) where [W, H] is the `heatmap_size`
- keypoint_weights (np.ndarray): The target weights in shape
(N, K)
.. _`UDP`: https://arxiv.org/abs/1911.07524
"""
N, K, _ = keypoints.shape
W, H = heatmap_size
heatmaps = np.zeros((K, H, W), dtype=np.float32)
keypoint_weights = keypoints_visible.copy()
# 3-sigma rule
radius = sigma * 3
# xy grid
gaussian_size = 2 * radius + 1
x = np.arange(0, gaussian_size, 1, dtype=np.float32)
y = x[:, None]
for n, k in product(range(N), range(K)):
# skip unlabled keypoints
if keypoints_visible[n, k] < 0.5:
continue
mu = (keypoints[n, k] + 0.5).astype(np.int64)
# check that the gaussian has in-bounds part
left, top = (mu - radius).astype(np.int64)
right, bottom = (mu + radius + 1).astype(np.int64)
if left >= W or top >= H or right < 0 or bottom < 0:
keypoint_weights[n, k] = 0
continue
mu_ac = keypoints[n, k]
x0 = y0 = gaussian_size // 2
x0 += mu_ac[0] - mu[0]
y0 += mu_ac[1] - mu[1]
gaussian = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))
# valid range in gaussian
g_x1 = max(0, -left)
g_x2 = min(W, right) - left
g_y1 = max(0, -top)
g_y2 = min(H, bottom) - top
# valid range in heatmap
h_x1 = max(0, left)
h_x2 = min(W, right)
h_y1 = max(0, top)
h_y2 = min(H, bottom)
heatmap_region = heatmaps[k, h_y1:h_y2, h_x1:h_x2]
gaussian_regsion = gaussian[g_y1:g_y2, g_x1:g_x2]
_ = np.maximum(heatmap_region, gaussian_regsion, out=heatmap_region)
return heatmaps, keypoint_weights | Generate gaussian heatmaps of keypoints using `UDP`_. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) sigma (float): The sigma value of the Gaussian heatmap Returns: tuple: - heatmaps (np.ndarray): The generated heatmap in shape (K, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (N, K) .. _`UDP`: https://arxiv.org/abs/1911.07524 |
159,184 | from typing import Optional
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `get_instance_root` function. Write a Python function `def get_instance_root(keypoints: np.ndarray, keypoints_visible: Optional[np.ndarray] = None, root_type: str = 'kpt_center') -> np.ndarray` to solve the following problem:
Calculate the coordinates and visibility of instance roots. Args: keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) root_type (str): Calculation of instance roots which should be one of the following options: - ``'kpt_center'``: The roots' coordinates are the mean coordinates of visible keypoints - ``'bbox_center'``: The roots' are the center of bounding boxes outlined by visible keypoints Defaults to ``'kpt_center'`` Returns: tuple - roots_coordinate(np.ndarray): Coordinates of instance roots in shape [N, D] - roots_visible(np.ndarray): Visibility of instance roots in shape [N]
Here is the function:
def get_instance_root(keypoints: np.ndarray,
keypoints_visible: Optional[np.ndarray] = None,
root_type: str = 'kpt_center') -> np.ndarray:
"""Calculate the coordinates and visibility of instance roots.
Args:
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
root_type (str): Calculation of instance roots which should
be one of the following options:
- ``'kpt_center'``: The roots' coordinates are the mean
coordinates of visible keypoints
- ``'bbox_center'``: The roots' are the center of bounding
boxes outlined by visible keypoints
Defaults to ``'kpt_center'``
Returns:
tuple
- roots_coordinate(np.ndarray): Coordinates of instance roots in
shape [N, D]
- roots_visible(np.ndarray): Visibility of instance roots in
shape [N]
"""
roots_coordinate = np.zeros((keypoints.shape[0], 2), dtype=np.float32)
roots_visible = np.ones((keypoints.shape[0]), dtype=np.float32) * 2
for i in range(keypoints.shape[0]):
# collect visible keypoints
if keypoints_visible is not None:
visible_keypoints = keypoints[i][keypoints_visible[i] > 0]
else:
visible_keypoints = keypoints[i]
if visible_keypoints.size == 0:
roots_visible[i] = 0
continue
# compute the instance root with visible keypoints
if root_type == 'kpt_center':
roots_coordinate[i] = visible_keypoints.mean(axis=0)
roots_visible[i] = 1
elif root_type == 'bbox_center':
roots_coordinate[i] = (visible_keypoints.max(axis=0) +
visible_keypoints.min(axis=0)) / 2.0
roots_visible[i] = 1
else:
raise ValueError(
f'the value of `root_type` must be \'kpt_center\' or '
f'\'bbox_center\', but got \'{root_type}\'')
return roots_coordinate, roots_visible | Calculate the coordinates and visibility of instance roots. Args: keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) root_type (str): Calculation of instance roots which should be one of the following options: - ``'kpt_center'``: The roots' coordinates are the mean coordinates of visible keypoints - ``'bbox_center'``: The roots' are the center of bounding boxes outlined by visible keypoints Defaults to ``'kpt_center'`` Returns: tuple - roots_coordinate(np.ndarray): Coordinates of instance roots in shape [N, D] - roots_visible(np.ndarray): Visibility of instance roots in shape [N] |
159,185 | from typing import Optional
import numpy as np
def get_instance_bbox(keypoints: np.ndarray,
keypoints_visible: Optional[np.ndarray] = None
) -> np.ndarray:
"""Calculate the pseudo instance bounding box from visible keypoints. The
bounding boxes are in the xyxy format.
Args:
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
Returns:
np.ndarray: bounding boxes in [N, 4]
"""
bbox = np.zeros((keypoints.shape[0], 4), dtype=np.float32)
for i in range(keypoints.shape[0]):
if keypoints_visible is not None:
visible_keypoints = keypoints[i][keypoints_visible[i] > 0]
else:
visible_keypoints = keypoints[i]
if visible_keypoints.size == 0:
continue
bbox[i, :2] = visible_keypoints.min(axis=0)
bbox[i, 2:] = visible_keypoints.max(axis=0)
return bbox
The provided code snippet includes necessary dependencies for implementing the `get_diagonal_lengths` function. Write a Python function `def get_diagonal_lengths(keypoints: np.ndarray, keypoints_visible: Optional[np.ndarray] = None ) -> np.ndarray` to solve the following problem:
Calculate the diagonal length of instance bounding box from visible keypoints. Args: keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) Returns: np.ndarray: bounding box diagonal length in [N]
Here is the function:
def get_diagonal_lengths(keypoints: np.ndarray,
keypoints_visible: Optional[np.ndarray] = None
) -> np.ndarray:
"""Calculate the diagonal length of instance bounding box from visible
keypoints.
Args:
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
Returns:
np.ndarray: bounding box diagonal length in [N]
"""
pseudo_bbox = get_instance_bbox(keypoints, keypoints_visible)
pseudo_bbox = pseudo_bbox.reshape(-1, 2, 2)
h_w_diff = pseudo_bbox[:, 1] - pseudo_bbox[:, 0]
diagonal_length = np.sqrt(np.power(h_w_diff, 2).sum(axis=1))
return diagonal_length | Calculate the diagonal length of instance bounding box from visible keypoints. Args: keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) Returns: np.ndarray: bounding box diagonal length in [N] |
159,186 | from itertools import product
import numpy as np
from .post_processing import gaussian_blur, gaussian_blur1d
The provided code snippet includes necessary dependencies for implementing the `refine_keypoints` function. Write a Python function `def refine_keypoints(keypoints: np.ndarray, heatmaps: np.ndarray) -> np.ndarray` to solve the following problem:
Refine keypoint predictions by moving from the maximum towards the second maximum by 0.25 pixel. The operation is in-place. Note: - instance number: N - keypoint number: K - keypoint dimension: D - heatmap size: [W, H] Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) heatmaps (np.ndarray): The heatmaps in shape (K, H, W) Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D)
Here is the function:
def refine_keypoints(keypoints: np.ndarray,
heatmaps: np.ndarray) -> np.ndarray:
"""Refine keypoint predictions by moving from the maximum towards the
second maximum by 0.25 pixel. The operation is in-place.
Note:
- instance number: N
- keypoint number: K
- keypoint dimension: D
- heatmap size: [W, H]
Args:
keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D)
heatmaps (np.ndarray): The heatmaps in shape (K, H, W)
Returns:
np.ndarray: Refine keypoint coordinates in shape (N, K, D)
"""
N, K = keypoints.shape[:2]
H, W = heatmaps.shape[1:]
for n, k in product(range(N), range(K)):
x, y = keypoints[n, k, :2].astype(int)
if 1 < x < W - 1 and 0 < y < H:
dx = heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1]
else:
dx = 0.
if 1 < y < H - 1 and 0 < x < W:
dy = heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x]
else:
dy = 0.
keypoints[n, k] += np.sign([dx, dy], dtype=np.float32) * 0.25
return keypoints | Refine keypoint predictions by moving from the maximum towards the second maximum by 0.25 pixel. The operation is in-place. Note: - instance number: N - keypoint number: K - keypoint dimension: D - heatmap size: [W, H] Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) heatmaps (np.ndarray): The heatmaps in shape (K, H, W) Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) |
159,187 | from itertools import product
import numpy as np
from .post_processing import gaussian_blur, gaussian_blur1d
def gaussian_blur(heatmaps: np.ndarray, kernel: int = 11) -> np.ndarray:
"""Modulate heatmap distribution with Gaussian.
Note:
- num_keypoints: K
- heatmap height: H
- heatmap width: W
Args:
heatmaps (np.ndarray[K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray ([K, H, W]): Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
K, H, W = heatmaps.shape
for k in range(K):
origin_max = np.max(heatmaps[k])
dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[k].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[k] = dr[border:-border, border:-border].copy()
heatmaps[k] *= origin_max / np.max(heatmaps[k])
return heatmaps
The provided code snippet includes necessary dependencies for implementing the `refine_keypoints_dark` function. Write a Python function `def refine_keypoints_dark(keypoints: np.ndarray, heatmaps: np.ndarray, blur_kernel_size: int) -> np.ndarray` to solve the following problem:
Refine keypoint predictions using distribution aware coordinate decoding. See `Dark Pose`_ for details. The operation is in-place. Note: - instance number: N - keypoint number: K - keypoint dimension: D - heatmap size: [W, H] Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) heatmaps (np.ndarray): The heatmaps in shape (K, H, W) blur_kernel_size (int): The Gaussian blur kernel size of the heatmap modulation Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) .. _`Dark Pose`: https://arxiv.org/abs/1910.06278
Here is the function:
def refine_keypoints_dark(keypoints: np.ndarray, heatmaps: np.ndarray,
blur_kernel_size: int) -> np.ndarray:
"""Refine keypoint predictions using distribution aware coordinate
decoding. See `Dark Pose`_ for details. The operation is in-place.
Note:
- instance number: N
- keypoint number: K
- keypoint dimension: D
- heatmap size: [W, H]
Args:
keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D)
heatmaps (np.ndarray): The heatmaps in shape (K, H, W)
blur_kernel_size (int): The Gaussian blur kernel size of the heatmap
modulation
Returns:
np.ndarray: Refine keypoint coordinates in shape (N, K, D)
.. _`Dark Pose`: https://arxiv.org/abs/1910.06278
"""
N, K = keypoints.shape[:2]
H, W = heatmaps.shape[1:]
# modulate heatmaps
heatmaps = gaussian_blur(heatmaps, blur_kernel_size)
np.maximum(heatmaps, 1e-10, heatmaps)
np.log(heatmaps, heatmaps)
for n, k in product(range(N), range(K)):
x, y = keypoints[n, k, :2].astype(int)
if 1 < x < W - 2 and 1 < y < H - 2:
dx = 0.5 * (heatmaps[k, y, x + 1] - heatmaps[k, y, x - 1])
dy = 0.5 * (heatmaps[k, y + 1, x] - heatmaps[k, y - 1, x])
dxx = 0.25 * (
heatmaps[k, y, x + 2] - 2 * heatmaps[k, y, x] +
heatmaps[k, y, x - 2])
dxy = 0.25 * (
heatmaps[k, y + 1, x + 1] - heatmaps[k, y - 1, x + 1] -
heatmaps[k, y + 1, x - 1] + heatmaps[k, y - 1, x - 1])
dyy = 0.25 * (
heatmaps[k, y + 2, x] - 2 * heatmaps[k, y, x] +
heatmaps[k, y - 2, x])
derivative = np.array([[dx], [dy]])
hessian = np.array([[dxx, dxy], [dxy, dyy]])
if dxx * dyy - dxy**2 != 0:
hessianinv = np.linalg.inv(hessian)
offset = -hessianinv @ derivative
offset = np.squeeze(np.array(offset.T), axis=0)
keypoints[n, k, :2] += offset
return keypoints | Refine keypoint predictions using distribution aware coordinate decoding. See `Dark Pose`_ for details. The operation is in-place. Note: - instance number: N - keypoint number: K - keypoint dimension: D - heatmap size: [W, H] Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) heatmaps (np.ndarray): The heatmaps in shape (K, H, W) blur_kernel_size (int): The Gaussian blur kernel size of the heatmap modulation Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) .. _`Dark Pose`: https://arxiv.org/abs/1910.06278 |
159,188 | from itertools import product
import numpy as np
from .post_processing import gaussian_blur, gaussian_blur1d
def gaussian_blur(heatmaps: np.ndarray, kernel: int = 11) -> np.ndarray:
"""Modulate heatmap distribution with Gaussian.
Note:
- num_keypoints: K
- heatmap height: H
- heatmap width: W
Args:
heatmaps (np.ndarray[K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray ([K, H, W]): Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
K, H, W = heatmaps.shape
for k in range(K):
origin_max = np.max(heatmaps[k])
dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[k].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[k] = dr[border:-border, border:-border].copy()
heatmaps[k] *= origin_max / np.max(heatmaps[k])
return heatmaps
The provided code snippet includes necessary dependencies for implementing the `refine_keypoints_dark_udp` function. Write a Python function `def refine_keypoints_dark_udp(keypoints: np.ndarray, heatmaps: np.ndarray, blur_kernel_size: int) -> np.ndarray` to solve the following problem:
Refine keypoint predictions using distribution aware coordinate decoding for UDP. See `UDP`_ for details. The operation is in-place. Note: - instance number: N - keypoint number: K - keypoint dimension: D - heatmap size: [W, H] Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) heatmaps (np.ndarray): The heatmaps in shape (K, H, W) blur_kernel_size (int): The Gaussian blur kernel size of the heatmap modulation Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) .. _`UDP`: https://arxiv.org/abs/1911.07524
Here is the function:
def refine_keypoints_dark_udp(keypoints: np.ndarray, heatmaps: np.ndarray,
blur_kernel_size: int) -> np.ndarray:
"""Refine keypoint predictions using distribution aware coordinate decoding
for UDP. See `UDP`_ for details. The operation is in-place.
Note:
- instance number: N
- keypoint number: K
- keypoint dimension: D
- heatmap size: [W, H]
Args:
keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D)
heatmaps (np.ndarray): The heatmaps in shape (K, H, W)
blur_kernel_size (int): The Gaussian blur kernel size of the heatmap
modulation
Returns:
np.ndarray: Refine keypoint coordinates in shape (N, K, D)
.. _`UDP`: https://arxiv.org/abs/1911.07524
"""
N, K = keypoints.shape[:2]
H, W = heatmaps.shape[1:]
# modulate heatmaps
heatmaps = gaussian_blur(heatmaps, blur_kernel_size)
np.clip(heatmaps, 1e-3, 50., heatmaps)
np.log(heatmaps, heatmaps)
heatmaps_pad = np.pad(
heatmaps, ((0, 0), (1, 1), (1, 1)), mode='edge').flatten()
for n in range(N):
index = keypoints[n, :, 0] + 1 + (keypoints[n, :, 1] + 1) * (W + 2)
index += (W + 2) * (H + 2) * np.arange(0, K)
index = index.astype(int).reshape(-1, 1)
i_ = heatmaps_pad[index]
ix1 = heatmaps_pad[index + 1]
iy1 = heatmaps_pad[index + W + 2]
ix1y1 = heatmaps_pad[index + W + 3]
ix1_y1_ = heatmaps_pad[index - W - 3]
ix1_ = heatmaps_pad[index - 1]
iy1_ = heatmaps_pad[index - 2 - W]
dx = 0.5 * (ix1 - ix1_)
dy = 0.5 * (iy1 - iy1_)
derivative = np.concatenate([dx, dy], axis=1)
derivative = derivative.reshape(K, 2, 1)
dxx = ix1 - 2 * i_ + ix1_
dyy = iy1 - 2 * i_ + iy1_
dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_)
hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1)
hessian = hessian.reshape(K, 2, 2)
hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2))
keypoints[n] -= np.einsum('imn,ink->imk', hessian,
derivative).squeeze()
return keypoints | Refine keypoint predictions using distribution aware coordinate decoding for UDP. See `UDP`_ for details. The operation is in-place. Note: - instance number: N - keypoint number: K - keypoint dimension: D - heatmap size: [W, H] Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) heatmaps (np.ndarray): The heatmaps in shape (K, H, W) blur_kernel_size (int): The Gaussian blur kernel size of the heatmap modulation Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) .. _`UDP`: https://arxiv.org/abs/1911.07524 |
159,189 | from itertools import product
import numpy as np
from .post_processing import gaussian_blur, gaussian_blur1d
def gaussian_blur1d(simcc: np.ndarray, kernel: int = 11) -> np.ndarray:
"""Modulate simcc distribution with Gaussian.
Note:
- num_keypoints: K
- simcc length: Wx
Args:
simcc (np.ndarray[K, Wx]): model predicted simcc.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the simcc gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray ([K, Wx]): Modulated simcc distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
N, K, Wx = simcc.shape
for n, k in product(range(N), range(K)):
origin_max = np.max(simcc[n, k])
dr = np.zeros((1, Wx + 2 * border), dtype=np.float32)
dr[0, border:-border] = simcc[n, k].copy()
dr = cv2.GaussianBlur(dr, (kernel, 1), 0)
simcc[n, k] = dr[0, border:-border].copy()
simcc[n, k] *= origin_max / np.max(simcc[n, k])
return simcc
The provided code snippet includes necessary dependencies for implementing the `refine_simcc_dark` function. Write a Python function `def refine_simcc_dark(keypoints: np.ndarray, simcc: np.ndarray, blur_kernel_size: int) -> np.ndarray` to solve the following problem:
SimCC version. Refine keypoint predictions using distribution aware coordinate decoding for UDP. See `UDP`_ for details. The operation is in- place. Note: - instance number: N - keypoint number: K - keypoint dimension: D Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) simcc (np.ndarray): The heatmaps in shape (N, K, Wx) blur_kernel_size (int): The Gaussian blur kernel size of the heatmap modulation Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) .. _`UDP`: https://arxiv.org/abs/1911.07524
Here is the function:
def refine_simcc_dark(keypoints: np.ndarray, simcc: np.ndarray,
blur_kernel_size: int) -> np.ndarray:
"""SimCC version. Refine keypoint predictions using distribution aware
coordinate decoding for UDP. See `UDP`_ for details. The operation is in-
place.
Note:
- instance number: N
- keypoint number: K
- keypoint dimension: D
Args:
keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D)
simcc (np.ndarray): The heatmaps in shape (N, K, Wx)
blur_kernel_size (int): The Gaussian blur kernel size of the heatmap
modulation
Returns:
np.ndarray: Refine keypoint coordinates in shape (N, K, D)
.. _`UDP`: https://arxiv.org/abs/1911.07524
"""
N = simcc.shape[0]
# modulate simcc
simcc = gaussian_blur1d(simcc, blur_kernel_size)
np.clip(simcc, 1e-3, 50., simcc)
np.log(simcc, simcc)
simcc = np.pad(simcc, ((0, 0), (0, 0), (2, 2)), 'edge')
for n in range(N):
px = (keypoints[n] + 2.5).astype(np.int64).reshape(-1, 1) # K, 1
dx0 = np.take_along_axis(simcc[n], px, axis=1) # K, 1
dx1 = np.take_along_axis(simcc[n], px + 1, axis=1)
dx_1 = np.take_along_axis(simcc[n], px - 1, axis=1)
dx2 = np.take_along_axis(simcc[n], px + 2, axis=1)
dx_2 = np.take_along_axis(simcc[n], px - 2, axis=1)
dx = 0.5 * (dx1 - dx_1)
dxx = 1e-9 + 0.25 * (dx2 - 2 * dx0 + dx_2)
offset = dx / dxx
keypoints[n] -= offset.reshape(-1)
return keypoints | SimCC version. Refine keypoint predictions using distribution aware coordinate decoding for UDP. See `UDP`_ for details. The operation is in- place. Note: - instance number: N - keypoint number: K - keypoint dimension: D Args: keypoints (np.ndarray): The keypoint coordinates in shape (N, K, D) simcc (np.ndarray): The heatmaps in shape (N, K, Wx) blur_kernel_size (int): The Gaussian blur kernel size of the heatmap modulation Returns: np.ndarray: Refine keypoint coordinates in shape (N, K, D) .. _`UDP`: https://arxiv.org/abs/1911.07524 |
159,190 | from itertools import product
from typing import Tuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `generate_offset_heatmap` function. Write a Python function `def generate_offset_heatmap( heatmap_size: Tuple[int, int], keypoints: np.ndarray, keypoints_visible: np.ndarray, radius_factor: float, ) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Generate offset heatmaps of keypoints, where each keypoint is represented by 3 maps: one pixel-level class label map (1 for keypoint and 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions respectively. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) radius_factor (float): The radius factor of the binary label map. The positive region is defined as the neighbor of the keypoint with the radius :math:`r=radius_factor*max(W, H)` Returns: tuple: - heatmap (np.ndarray): The generated heatmap in shape (K*3, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (K,)
Here is the function:
def generate_offset_heatmap(
heatmap_size: Tuple[int, int],
keypoints: np.ndarray,
keypoints_visible: np.ndarray,
radius_factor: float,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate offset heatmaps of keypoints, where each keypoint is
represented by 3 maps: one pixel-level class label map (1 for keypoint and
0 for non-keypoint) and 2 pixel-level offset maps for x and y directions
respectively.
Args:
heatmap_size (Tuple[int, int]): Heatmap size in [W, H]
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
radius_factor (float): The radius factor of the binary label
map. The positive region is defined as the neighbor of the
keypoint with the radius :math:`r=radius_factor*max(W, H)`
Returns:
tuple:
- heatmap (np.ndarray): The generated heatmap in shape
(K*3, H, W) where [W, H] is the `heatmap_size`
- keypoint_weights (np.ndarray): The target weights in shape
(K,)
"""
N, K, _ = keypoints.shape
W, H = heatmap_size
heatmaps = np.zeros((K, 3, H, W), dtype=np.float32)
keypoint_weights = keypoints_visible.copy()
# xy grid
x = np.arange(0, W, 1)
y = np.arange(0, H, 1)[:, None]
# positive area radius in the classification map
radius = radius_factor * max(W, H)
for n, k in product(range(N), range(K)):
if keypoints_visible[n, k] < 0.5:
continue
mu = keypoints[n, k]
x_offset = (mu[0] - x) / radius
y_offset = (mu[1] - y) / radius
heatmaps[k, 0] = np.where(x_offset**2 + y_offset**2 <= 1, 1., 0.)
heatmaps[k, 1] = x_offset
heatmaps[k, 2] = y_offset
heatmaps = heatmaps.reshape(K * 3, H, W)
return heatmaps, keypoint_weights | Generate offset heatmaps of keypoints, where each keypoint is represented by 3 maps: one pixel-level class label map (1 for keypoint and 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions respectively. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) radius_factor (float): The radius factor of the binary label map. The positive region is defined as the neighbor of the keypoint with the radius :math:`r=radius_factor*max(W, H)` Returns: tuple: - heatmap (np.ndarray): The generated heatmap in shape (K*3, H, W) where [W, H] is the `heatmap_size` - keypoint_weights (np.ndarray): The target weights in shape (K,) |
159,191 | from itertools import product
from typing import Tuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `generate_displacement_heatmap` function. Write a Python function `def generate_displacement_heatmap( heatmap_size: Tuple[int, int], keypoints: np.ndarray, keypoints_visible: np.ndarray, roots: np.ndarray, roots_visible: np.ndarray, diagonal_lengths: np.ndarray, radius: float, )` to solve the following problem:
Generate displacement heatmaps of keypoints, where each keypoint is represented by 3 maps: one pixel-level class label map (1 for keypoint and 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions respectively. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) roots (np.ndarray): Coordinates of instance centers in shape (N, D). The displacement fields of each instance will locate around its center. roots_visible (np.ndarray): Roots visibilities in shape (N,) diagonal_lengths (np.ndarray): Diaginal length of the bounding boxes of each instance in shape (N,) radius (float): The radius factor of the binary label map. The positive region is defined as the neighbor of the keypoint with the radius :math:`r=radius_factor*max(W, H)` Returns: tuple: - displacements (np.ndarray): The generated displacement map in shape (K*2, H, W) where [W, H] is the `heatmap_size` - displacement_weights (np.ndarray): The target weights in shape (K*2, H, W)
Here is the function:
def generate_displacement_heatmap(
heatmap_size: Tuple[int, int],
keypoints: np.ndarray,
keypoints_visible: np.ndarray,
roots: np.ndarray,
roots_visible: np.ndarray,
diagonal_lengths: np.ndarray,
radius: float,
):
"""Generate displacement heatmaps of keypoints, where each keypoint is
represented by 3 maps: one pixel-level class label map (1 for keypoint and
0 for non-keypoint) and 2 pixel-level offset maps for x and y directions
respectively.
Args:
heatmap_size (Tuple[int, int]): Heatmap size in [W, H]
keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D)
keypoints_visible (np.ndarray): Keypoint visibilities in shape
(N, K)
roots (np.ndarray): Coordinates of instance centers in shape (N, D).
The displacement fields of each instance will locate around its
center.
roots_visible (np.ndarray): Roots visibilities in shape (N,)
diagonal_lengths (np.ndarray): Diaginal length of the bounding boxes
of each instance in shape (N,)
radius (float): The radius factor of the binary label
map. The positive region is defined as the neighbor of the
keypoint with the radius :math:`r=radius_factor*max(W, H)`
Returns:
tuple:
- displacements (np.ndarray): The generated displacement map in
shape (K*2, H, W) where [W, H] is the `heatmap_size`
- displacement_weights (np.ndarray): The target weights in shape
(K*2, H, W)
"""
N, K, _ = keypoints.shape
W, H = heatmap_size
displacements = np.zeros((K * 2, H, W), dtype=np.float32)
displacement_weights = np.zeros((K * 2, H, W), dtype=np.float32)
instance_size_map = np.zeros((H, W), dtype=np.float32)
for n in range(N):
if (roots_visible[n] < 1 or (roots[n, 0] < 0 or roots[n, 1] < 0)
or (roots[n, 0] >= W or roots[n, 1] >= H)):
continue
diagonal_length = diagonal_lengths[n]
for k in range(K):
if keypoints_visible[n, k] < 1 or keypoints[n, k, 0] < 0 \
or keypoints[n, k, 1] < 0 or keypoints[n, k, 0] >= W \
or keypoints[n, k, 1] >= H:
continue
start_x = max(int(roots[n, 0] - radius), 0)
start_y = max(int(roots[n, 1] - radius), 0)
end_x = min(int(roots[n, 0] + radius), W)
end_y = min(int(roots[n, 1] + radius), H)
for x in range(start_x, end_x):
for y in range(start_y, end_y):
if displacements[2 * k, y,
x] != 0 or displacements[2 * k + 1, y,
x] != 0:
if diagonal_length > instance_size_map[y, x]:
# keep the gt displacement of smaller instance
continue
displacement_weights[2 * k:2 * k + 2, y,
x] = 1 / diagonal_length
displacements[2 * k:2 * k + 2, y,
x] = keypoints[n, k] - [x, y]
instance_size_map[y, x] = diagonal_length
return displacements, displacement_weights | Generate displacement heatmaps of keypoints, where each keypoint is represented by 3 maps: one pixel-level class label map (1 for keypoint and 0 for non-keypoint) and 2 pixel-level offset maps for x and y directions respectively. Args: heatmap_size (Tuple[int, int]): Heatmap size in [W, H] keypoints (np.ndarray): Keypoint coordinates in shape (N, K, D) keypoints_visible (np.ndarray): Keypoint visibilities in shape (N, K) roots (np.ndarray): Coordinates of instance centers in shape (N, D). The displacement fields of each instance will locate around its center. roots_visible (np.ndarray): Roots visibilities in shape (N,) diagonal_lengths (np.ndarray): Diaginal length of the bounding boxes of each instance in shape (N,) radius (float): The radius factor of the binary label map. The positive region is defined as the neighbor of the keypoint with the radius :math:`r=radius_factor*max(W, H)` Returns: tuple: - displacements (np.ndarray): The generated displacement map in shape (K*2, H, W) where [W, H] is the `heatmap_size` - displacement_weights (np.ndarray): The target weights in shape (K*2, H, W) |
159,192 | from itertools import product
from typing import Tuple
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `get_simcc_normalized` function. Write a Python function `def get_simcc_normalized(batch_pred_simcc, sigma=None)` to solve the following problem:
Normalize the predicted SimCC. Args: batch_pred_simcc (torch.Tensor): The predicted SimCC. sigma (float): The sigma of the Gaussian distribution. Returns: torch.Tensor: The normalized SimCC.
Here is the function:
def get_simcc_normalized(batch_pred_simcc, sigma=None):
"""Normalize the predicted SimCC.
Args:
batch_pred_simcc (torch.Tensor): The predicted SimCC.
sigma (float): The sigma of the Gaussian distribution.
Returns:
torch.Tensor: The normalized SimCC.
"""
B, K, _ = batch_pred_simcc.shape
# Scale and clamp the tensor
if sigma is not None:
batch_pred_simcc = batch_pred_simcc / (sigma * np.sqrt(np.pi * 2))
batch_pred_simcc = batch_pred_simcc.clamp(min=0)
# Compute the binary mask
mask = (batch_pred_simcc.amax(dim=-1) > 1).reshape(B, K, 1)
# Normalize the tensor using the maximum value
norm = (batch_pred_simcc / batch_pred_simcc.amax(dim=-1).reshape(B, K, 1))
# Apply normalization
batch_pred_simcc = torch.where(mask, norm, batch_pred_simcc)
return batch_pred_simcc | Normalize the predicted SimCC. Args: batch_pred_simcc (torch.Tensor): The predicted SimCC. sigma (float): The sigma of the Gaussian distribution. Returns: torch.Tensor: The normalized SimCC. |
159,193 | from itertools import product
from typing import Tuple
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `get_simcc_maximum` function. Write a Python function `def get_simcc_maximum(simcc_x: np.ndarray, simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Get maximum response location and value from simcc representations. Note: instance number: N num_keypoints: K heatmap height: H heatmap width: W Args: simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) Returns: tuple: - locs (np.ndarray): locations of maximum heatmap responses in shape (K, 2) or (N, K, 2) - vals (np.ndarray): values of maximum heatmap responses in shape (K,) or (N, K)
Here is the function:
def get_simcc_maximum(simcc_x: np.ndarray,
simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get maximum response location and value from simcc representations.
Note:
instance number: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
Returns:
tuple:
- locs (np.ndarray): locations of maximum heatmap responses in shape
(K, 2) or (N, K, 2)
- vals (np.ndarray): values of maximum heatmap responses in shape
(K,) or (N, K)
"""
assert isinstance(simcc_x, np.ndarray), ('simcc_x should be numpy.ndarray')
assert isinstance(simcc_y, np.ndarray), ('simcc_y should be numpy.ndarray')
assert simcc_x.ndim == 2 or simcc_x.ndim == 3, (
f'Invalid shape {simcc_x.shape}')
assert simcc_y.ndim == 2 or simcc_y.ndim == 3, (
f'Invalid shape {simcc_y.shape}')
assert simcc_x.ndim == simcc_y.ndim, (
f'{simcc_x.shape} != {simcc_y.shape}')
if simcc_x.ndim == 3:
N, K, Wx = simcc_x.shape
simcc_x = simcc_x.reshape(N * K, -1)
simcc_y = simcc_y.reshape(N * K, -1)
else:
N = None
x_locs = np.argmax(simcc_x, axis=1)
y_locs = np.argmax(simcc_y, axis=1)
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
max_val_x = np.amax(simcc_x, axis=1)
max_val_y = np.amax(simcc_y, axis=1)
mask = max_val_x > max_val_y
max_val_x[mask] = max_val_y[mask]
vals = max_val_x
locs[vals <= 0.] = -1
if N:
locs = locs.reshape(N, K, 2)
vals = vals.reshape(N, K)
return locs, vals | Get maximum response location and value from simcc representations. Note: instance number: N num_keypoints: K heatmap height: H heatmap width: W Args: simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) Returns: tuple: - locs (np.ndarray): locations of maximum heatmap responses in shape (K, 2) or (N, K, 2) - vals (np.ndarray): values of maximum heatmap responses in shape (K,) or (N, K) |
159,194 | from itertools import product
from typing import Tuple
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `get_heatmap_maximum` function. Write a Python function `def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Get maximum response location and value from heatmaps. Note: batch_size: B num_keypoints: K heatmap height: H heatmap width: W Args: heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W) Returns: tuple: - locs (np.ndarray): locations of maximum heatmap responses in shape (K, 2) or (B, K, 2) - vals (np.ndarray): values of maximum heatmap responses in shape (K,) or (B, K)
Here is the function:
def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get maximum response location and value from heatmaps.
Note:
batch_size: B
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W)
Returns:
tuple:
- locs (np.ndarray): locations of maximum heatmap responses in shape
(K, 2) or (B, K, 2)
- vals (np.ndarray): values of maximum heatmap responses in shape
(K,) or (B, K)
"""
assert isinstance(heatmaps,
np.ndarray), ('heatmaps should be numpy.ndarray')
assert heatmaps.ndim == 3 or heatmaps.ndim == 4, (
f'Invalid shape {heatmaps.shape}')
if heatmaps.ndim == 3:
K, H, W = heatmaps.shape
B = None
heatmaps_flatten = heatmaps.reshape(K, -1)
else:
B, K, H, W = heatmaps.shape
heatmaps_flatten = heatmaps.reshape(B * K, -1)
y_locs, x_locs = np.unravel_index(
np.argmax(heatmaps_flatten, axis=1), shape=(H, W))
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
vals = np.amax(heatmaps_flatten, axis=1)
locs[vals <= 0.] = -1
if B:
locs = locs.reshape(B, K, 2)
vals = vals.reshape(B, K)
return locs, vals | Get maximum response location and value from heatmaps. Note: batch_size: B num_keypoints: K heatmap height: H heatmap width: W Args: heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W) Returns: tuple: - locs (np.ndarray): locations of maximum heatmap responses in shape (K, 2) or (B, K, 2) - vals (np.ndarray): values of maximum heatmap responses in shape (K,) or (B, K) |
159,195 | from itertools import product
from typing import Tuple
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `batch_heatmap_nms` function. Write a Python function `def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5)` to solve the following problem:
Apply NMS on a batch of heatmaps. Args: batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W) kernel_size (int): The kernel size of the NMS which should be a odd integer. Defaults to 5 Returns: Tensor: The batch heatmaps after NMS.
Here is the function:
def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5):
"""Apply NMS on a batch of heatmaps.
Args:
batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W)
kernel_size (int): The kernel size of the NMS which should be
a odd integer. Defaults to 5
Returns:
Tensor: The batch heatmaps after NMS.
"""
assert isinstance(kernel_size, int) and kernel_size % 2 == 1, \
f'The kernel_size should be an odd integer, got {kernel_size}'
padding = (kernel_size - 1) // 2
maximum = F.max_pool2d(
batch_heatmaps, kernel_size, stride=1, padding=padding)
maximum_indicator = torch.eq(batch_heatmaps, maximum)
batch_heatmaps = batch_heatmaps * maximum_indicator.float()
return batch_heatmaps | Apply NMS on a batch of heatmaps. Args: batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W) kernel_size (int): The kernel size of the NMS which should be a odd integer. Defaults to 5 Returns: Tensor: The batch heatmaps after NMS. |
159,196 | import copy
import platform
import random
import numpy as np
import torch
from mmengine import build_from_cfg, is_seq_of
from mmengine.dataset import ConcatDataset, RepeatDataset
from mmpose.registry import DATASETS
The provided code snippet includes necessary dependencies for implementing the `worker_init_fn` function. Write a Python function `def worker_init_fn(worker_id, num_workers, rank, seed)` to solve the following problem:
Init the random seed for various workers.
Here is the function:
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Init the random seed for various workers."""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed) | Init the random seed for various workers. |
159,197 | from typing import Sequence, Union
import numpy as np
import torch
from mmcv.transforms import BaseTransform
from mmengine.structures import InstanceData, PixelData
from mmengine.utils import is_seq_of
from mmpose.registry import TRANSFORMS
from mmpose.structures import MultilevelPixelData, PoseDataSample
The provided code snippet includes necessary dependencies for implementing the `image_to_tensor` function. Write a Python function `def image_to_tensor(img: Union[np.ndarray, Sequence[np.ndarray]]) -> torch.torch.Tensor` to solve the following problem:
Translate image or sequence of images to tensor. Multiple image tensors will be stacked. Args: value (np.ndarray | Sequence[np.ndarray]): The original image or image sequence Returns: torch.Tensor: The output tensor.
Here is the function:
def image_to_tensor(img: Union[np.ndarray,
Sequence[np.ndarray]]) -> torch.torch.Tensor:
"""Translate image or sequence of images to tensor. Multiple image tensors
will be stacked.
Args:
value (np.ndarray | Sequence[np.ndarray]): The original image or
image sequence
Returns:
torch.Tensor: The output tensor.
"""
if isinstance(img, np.ndarray):
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img)
tensor = torch.from_numpy(img).permute(2, 0, 1).contiguous()
else:
assert is_seq_of(img, np.ndarray)
tensor = torch.stack([image_to_tensor(_img) for _img in img])
return tensor | Translate image or sequence of images to tensor. Multiple image tensors will be stacked. Args: value (np.ndarray | Sequence[np.ndarray]): The original image or image sequence Returns: torch.Tensor: The output tensor. |
159,198 | from typing import Sequence, Union
import numpy as np
import torch
from mmcv.transforms import BaseTransform
from mmengine.structures import InstanceData, PixelData
from mmengine.utils import is_seq_of
from mmpose.registry import TRANSFORMS
from mmpose.structures import MultilevelPixelData, PoseDataSample
The provided code snippet includes necessary dependencies for implementing the `keypoints_to_tensor` function. Write a Python function `def keypoints_to_tensor(keypoints: Union[np.ndarray, Sequence[np.ndarray]] ) -> torch.torch.Tensor` to solve the following problem:
Translate keypoints or sequence of keypoints to tensor. Multiple keypoints tensors will be stacked. Args: keypoints (np.ndarray | Sequence[np.ndarray]): The keypoints or keypoints sequence. Returns: torch.Tensor: The output tensor.
Here is the function:
def keypoints_to_tensor(keypoints: Union[np.ndarray, Sequence[np.ndarray]]
) -> torch.torch.Tensor:
"""Translate keypoints or sequence of keypoints to tensor. Multiple
keypoints tensors will be stacked.
Args:
keypoints (np.ndarray | Sequence[np.ndarray]): The keypoints or
keypoints sequence.
Returns:
torch.Tensor: The output tensor.
"""
if isinstance(keypoints, np.ndarray):
keypoints = np.ascontiguousarray(keypoints)
N = keypoints.shape[0]
keypoints = keypoints.transpose(1, 2, 0).reshape(-1, N)
tensor = torch.from_numpy(keypoints).contiguous()
else:
assert is_seq_of(keypoints, np.ndarray)
tensor = torch.stack(
[keypoints_to_tensor(_keypoints) for _keypoints in keypoints])
return tensor | Translate keypoints or sequence of keypoints to tensor. Multiple keypoints tensors will be stacked. Args: keypoints (np.ndarray | Sequence[np.ndarray]): The keypoints or keypoints sequence. Returns: torch.Tensor: The output tensor. |
159,199 | import os.path as osp
import warnings
import numpy as np
from mmengine import Config
The provided code snippet includes necessary dependencies for implementing the `parse_pose_metainfo` function. Write a Python function `def parse_pose_metainfo(metainfo: dict)` to solve the following problem:
Load meta information of pose dataset and check its integrity. Args: metainfo (dict): Raw data of pose meta information, which should contain following contents: - "dataset_name" (str): The name of the dataset - "keypoint_info" (dict): The keypoint-related meta information, e.g., name, upper/lower body, and symmetry - "skeleton_info" (dict): The skeleton-related meta information, e.g., start/end keypoint of limbs - "joint_weights" (list[float]): The loss weights of keypoints - "sigmas" (list[float]): The keypoint distribution parameters to calculate OKS score. See `COCO keypoint evaluation <https://cocodataset.org/#keypoints-eval>`__. An example of metainfo is shown as follows. .. code-block:: none { "dataset_name": "coco", "keypoint_info": { 0: { "name": "nose", "type": "upper", "swap": "", "color": [51, 153, 255], }, 1: { "name": "right_eye", "type": "upper", "swap": "left_eye", "color": [51, 153, 255], }, ... }, "skeleton_info": { 0: { "link": ("left_ankle", "left_knee"), "color": [0, 255, 0], }, ... }, "joint_weights": [1., 1., ...], "sigmas": [0.026, 0.025, ...], } A special case is that `metainfo` can have the key "from_file", which should be the path of a config file. In this case, the actual metainfo will be loaded by: .. code-block:: python metainfo = mmengine.Config.fromfile(metainfo['from_file']) Returns: Dict: pose meta information that contains following contents: - "dataset_name" (str): Same as ``"dataset_name"`` in the input - "num_keypoints" (int): Number of keypoints - "keypoint_id2name" (dict): Mapping from keypoint id to name - "keypoint_name2id" (dict): Mapping from keypoint name to id - "upper_body_ids" (list): Ids of upper-body keypoint - "lower_body_ids" (list): Ids of lower-body keypoint - "flip_indices" (list): The Id of each keypoint's symmetric keypoint - "flip_pairs" (list): The Ids of symmetric keypoint pairs - "keypoint_colors" (numpy.ndarray): The keypoint color matrix of shape [K, 3], where each row is the color of one keypint in bgr - "num_skeleton_links" (int): The number of links - "skeleton_links" (list): The links represented by Id pairs of start and end points - "skeleton_link_colors" (numpy.ndarray): The link color matrix - "dataset_keypoint_weights" (numpy.ndarray): Same as the ``"joint_weights"`` in the input - "sigmas" (numpy.ndarray): Same as the ``"sigmas"`` in the input
Here is the function:
def parse_pose_metainfo(metainfo: dict):
"""Load meta information of pose dataset and check its integrity.
Args:
metainfo (dict): Raw data of pose meta information, which should
contain following contents:
- "dataset_name" (str): The name of the dataset
- "keypoint_info" (dict): The keypoint-related meta information,
e.g., name, upper/lower body, and symmetry
- "skeleton_info" (dict): The skeleton-related meta information,
e.g., start/end keypoint of limbs
- "joint_weights" (list[float]): The loss weights of keypoints
- "sigmas" (list[float]): The keypoint distribution parameters
to calculate OKS score. See `COCO keypoint evaluation
<https://cocodataset.org/#keypoints-eval>`__.
An example of metainfo is shown as follows.
.. code-block:: none
{
"dataset_name": "coco",
"keypoint_info":
{
0:
{
"name": "nose",
"type": "upper",
"swap": "",
"color": [51, 153, 255],
},
1:
{
"name": "right_eye",
"type": "upper",
"swap": "left_eye",
"color": [51, 153, 255],
},
...
},
"skeleton_info":
{
0:
{
"link": ("left_ankle", "left_knee"),
"color": [0, 255, 0],
},
...
},
"joint_weights": [1., 1., ...],
"sigmas": [0.026, 0.025, ...],
}
A special case is that `metainfo` can have the key "from_file",
which should be the path of a config file. In this case, the
actual metainfo will be loaded by:
.. code-block:: python
metainfo = mmengine.Config.fromfile(metainfo['from_file'])
Returns:
Dict: pose meta information that contains following contents:
- "dataset_name" (str): Same as ``"dataset_name"`` in the input
- "num_keypoints" (int): Number of keypoints
- "keypoint_id2name" (dict): Mapping from keypoint id to name
- "keypoint_name2id" (dict): Mapping from keypoint name to id
- "upper_body_ids" (list): Ids of upper-body keypoint
- "lower_body_ids" (list): Ids of lower-body keypoint
- "flip_indices" (list): The Id of each keypoint's symmetric keypoint
- "flip_pairs" (list): The Ids of symmetric keypoint pairs
- "keypoint_colors" (numpy.ndarray): The keypoint color matrix of
shape [K, 3], where each row is the color of one keypint in bgr
- "num_skeleton_links" (int): The number of links
- "skeleton_links" (list): The links represented by Id pairs of start
and end points
- "skeleton_link_colors" (numpy.ndarray): The link color matrix
- "dataset_keypoint_weights" (numpy.ndarray): Same as the
``"joint_weights"`` in the input
- "sigmas" (numpy.ndarray): Same as the ``"sigmas"`` in the input
"""
if 'from_file' in metainfo:
cfg_file = metainfo['from_file']
if not osp.isfile(cfg_file):
# Search configs in 'mmpose/.mim/configs/' in case that mmpose
# is installed in non-editable mode.
import mmpose
mmpose_path = osp.dirname(mmpose.__file__)
_cfg_file = osp.join(mmpose_path, '.mim', 'configs', '_base_',
'datasets', osp.basename(cfg_file))
if osp.isfile(_cfg_file):
warnings.warn(
f'The metainfo config file "{cfg_file}" does not exist. '
f'A matched config file "{_cfg_file}" will be used '
'instead.')
cfg_file = _cfg_file
else:
raise FileNotFoundError(
f'The metainfo config file "{cfg_file}" does not exist.')
# TODO: remove the nested structure of dataset_info
# metainfo = Config.fromfile(metainfo['from_file'])
metainfo = Config.fromfile(cfg_file).dataset_info
# check data integrity
assert 'dataset_name' in metainfo
assert 'keypoint_info' in metainfo
assert 'skeleton_info' in metainfo
assert 'joint_weights' in metainfo
assert 'sigmas' in metainfo
# parse metainfo
parsed = dict(
dataset_name=None,
num_keypoints=None,
keypoint_id2name={},
keypoint_name2id={},
upper_body_ids=[],
lower_body_ids=[],
flip_indices=[],
flip_pairs=[],
keypoint_colors=[],
num_skeleton_links=None,
skeleton_links=[],
skeleton_link_colors=[],
dataset_keypoint_weights=None,
sigmas=None,
)
parsed['dataset_name'] = metainfo['dataset_name']
# parse keypoint information
parsed['num_keypoints'] = len(metainfo['keypoint_info'])
for kpt_id, kpt in metainfo['keypoint_info'].items():
kpt_name = kpt['name']
parsed['keypoint_id2name'][kpt_id] = kpt_name
parsed['keypoint_name2id'][kpt_name] = kpt_id
parsed['keypoint_colors'].append(kpt.get('color', [255, 128, 0]))
kpt_type = kpt.get('type', '')
if kpt_type == 'upper':
parsed['upper_body_ids'].append(kpt_id)
elif kpt_type == 'lower':
parsed['lower_body_ids'].append(kpt_id)
swap_kpt = kpt.get('swap', '')
if swap_kpt == kpt_name or swap_kpt == '':
parsed['flip_indices'].append(kpt_name)
else:
parsed['flip_indices'].append(swap_kpt)
pair = (swap_kpt, kpt_name)
if pair not in parsed['flip_pairs']:
parsed['flip_pairs'].append(pair)
# parse skeleton information
parsed['num_skeleton_links'] = len(metainfo['skeleton_info'])
for _, sk in metainfo['skeleton_info'].items():
parsed['skeleton_links'].append(sk['link'])
parsed['skeleton_link_colors'].append(sk.get('color', [96, 96, 255]))
# parse extra information
parsed['dataset_keypoint_weights'] = np.array(
metainfo['joint_weights'], dtype=np.float32)
parsed['sigmas'] = np.array(metainfo['sigmas'], dtype=np.float32)
if 'stats_info' in metainfo:
parsed['stats_info'] = {}
for name, val in metainfo['stats_info'].items():
parsed['stats_info'][name] = np.array(val, dtype=np.float32)
# formatting
def _map(src, mapping: dict):
if isinstance(src, (list, tuple)):
cls = type(src)
return cls(_map(s, mapping) for s in src)
else:
return mapping[src]
parsed['flip_pairs'] = _map(
parsed['flip_pairs'], mapping=parsed['keypoint_name2id'])
parsed['flip_indices'] = _map(
parsed['flip_indices'], mapping=parsed['keypoint_name2id'])
parsed['skeleton_links'] = _map(
parsed['skeleton_links'], mapping=parsed['keypoint_name2id'])
parsed['keypoint_colors'] = np.array(
parsed['keypoint_colors'], dtype=np.uint8)
parsed['skeleton_link_colors'] = np.array(
parsed['skeleton_link_colors'], dtype=np.uint8)
return parsed | Load meta information of pose dataset and check its integrity. Args: metainfo (dict): Raw data of pose meta information, which should contain following contents: - "dataset_name" (str): The name of the dataset - "keypoint_info" (dict): The keypoint-related meta information, e.g., name, upper/lower body, and symmetry - "skeleton_info" (dict): The skeleton-related meta information, e.g., start/end keypoint of limbs - "joint_weights" (list[float]): The loss weights of keypoints - "sigmas" (list[float]): The keypoint distribution parameters to calculate OKS score. See `COCO keypoint evaluation <https://cocodataset.org/#keypoints-eval>`__. An example of metainfo is shown as follows. .. code-block:: none { "dataset_name": "coco", "keypoint_info": { 0: { "name": "nose", "type": "upper", "swap": "", "color": [51, 153, 255], }, 1: { "name": "right_eye", "type": "upper", "swap": "left_eye", "color": [51, 153, 255], }, ... }, "skeleton_info": { 0: { "link": ("left_ankle", "left_knee"), "color": [0, 255, 0], }, ... }, "joint_weights": [1., 1., ...], "sigmas": [0.026, 0.025, ...], } A special case is that `metainfo` can have the key "from_file", which should be the path of a config file. In this case, the actual metainfo will be loaded by: .. code-block:: python metainfo = mmengine.Config.fromfile(metainfo['from_file']) Returns: Dict: pose meta information that contains following contents: - "dataset_name" (str): Same as ``"dataset_name"`` in the input - "num_keypoints" (int): Number of keypoints - "keypoint_id2name" (dict): Mapping from keypoint id to name - "keypoint_name2id" (dict): Mapping from keypoint name to id - "upper_body_ids" (list): Ids of upper-body keypoint - "lower_body_ids" (list): Ids of lower-body keypoint - "flip_indices" (list): The Id of each keypoint's symmetric keypoint - "flip_pairs" (list): The Ids of symmetric keypoint pairs - "keypoint_colors" (numpy.ndarray): The keypoint color matrix of shape [K, 3], where each row is the color of one keypint in bgr - "num_skeleton_links" (int): The number of links - "skeleton_links" (list): The links represented by Id pairs of start and end points - "skeleton_link_colors" (numpy.ndarray): The link color matrix - "dataset_keypoint_weights" (numpy.ndarray): Same as the ``"joint_weights"`` in the input - "sigmas" (numpy.ndarray): Same as the ``"sigmas"`` in the input |
159,200 | import math
import torch
import torch.nn as nn
from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import build_dropout
from mmengine.model import BaseModule, trunc_normal_init
from torch.nn.functional import pad
from mmpose.registry import MODELS
from .hrnet import Bottleneck, HRModule, HRNet
The provided code snippet includes necessary dependencies for implementing the `nlc_to_nchw` function. Write a Python function `def nlc_to_nchw(x, hw_shape)` to solve the following problem:
Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion.
Here is the function:
def nlc_to_nchw(x, hw_shape):
"""Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, L, C] before conversion.
hw_shape (Sequence[int]): The height and width of output feature map.
Returns:
Tensor: The output tensor of shape [N, C, H, W] after conversion.
"""
H, W = hw_shape
assert len(x.shape) == 3
B, L, C = x.shape
assert L == H * W, 'The seq_len doesn\'t match H, W'
return x.transpose(1, 2).reshape(B, C, H, W) | Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion. |
159,201 | import math
import torch
import torch.nn as nn
from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import build_dropout
from mmengine.model import BaseModule, trunc_normal_init
from torch.nn.functional import pad
from mmpose.registry import MODELS
from .hrnet import Bottleneck, HRModule, HRNet
The provided code snippet includes necessary dependencies for implementing the `nchw_to_nlc` function. Write a Python function `def nchw_to_nlc(x)` to solve the following problem:
Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion.
Here is the function:
def nchw_to_nlc(x):
"""Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, C, H, W] before conversion.
Returns:
Tensor: The output tensor of shape [N, L, C] after conversion.
"""
assert len(x.shape) == 4
return x.flatten(2).transpose(1, 2).contiguous() | Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion. |
159,202 | import math
import torch
import torch.nn as nn
from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import build_dropout
from mmengine.model import BaseModule, trunc_normal_init
from torch.nn.functional import pad
from mmpose.registry import MODELS
from .hrnet import Bottleneck, HRModule, HRNet
The provided code snippet includes necessary dependencies for implementing the `build_drop_path` function. Write a Python function `def build_drop_path(drop_path_rate)` to solve the following problem:
Build drop path layer.
Here is the function:
def build_drop_path(drop_path_rate):
"""Build drop path layer."""
return build_dropout(dict(type='DropPath', drop_prob=drop_path_rate)) | Build drop path layer. |
159,203 | import copy
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer
from mmcv.cnn.bricks import ContextBlock
from mmengine.model import BaseModule, Sequential
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from mmpose.registry import MODELS
from .base_backbone import BaseBackbone
class ViPNAS_Bottleneck(BaseModule):
"""Bottleneck block for ViPNAS_ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the input/output channels of conv2. Default: 4.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module): downsample operation on identity branch.
Default: None.
style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: "pytorch".
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
kernel_size (int): kernel size of conv2 searched in ViPANS.
groups (int): group number of conv2 searched in ViPNAS.
attention (bool): whether to use attention module in the end of
the block.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
expansion=4,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
kernel_size=3,
groups=1,
attention=False,
init_cfg=None):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__(init_cfg=init_cfg)
assert style in ['pytorch', 'caffe']
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, out_channels, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
self.mid_channels,
kernel_size=kernel_size,
stride=self.conv2_stride,
padding=kernel_size // 2,
groups=groups,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
if attention:
self.attention = ContextBlock(out_channels,
max(1.0 / 16, 16.0 / out_channels))
else:
self.attention = None
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def norm3(self):
"""nn.Module: the normalization layer named "norm3" """
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.attention is not None:
out = self.attention(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
The provided code snippet includes necessary dependencies for implementing the `get_expansion` function. Write a Python function `def get_expansion(block, expansion=None)` to solve the following problem:
Get the expansion of a residual block. The block expansion will be obtained by the following order: 1. If ``expansion`` is given, just return it. 2. If ``block`` has the attribute ``expansion``, then return ``block.expansion``. 3. Return the default value according the the block type: 4 for ``ViPNAS_Bottleneck``. Args: block (class): The block class. expansion (int | None): The given expansion ratio. Returns: int: The expansion of the block.
Here is the function:
def get_expansion(block, expansion=None):
"""Get the expansion of a residual block.
The block expansion will be obtained by the following order:
1. If ``expansion`` is given, just return it.
2. If ``block`` has the attribute ``expansion``, then return
``block.expansion``.
3. Return the default value according the the block type:
4 for ``ViPNAS_Bottleneck``.
Args:
block (class): The block class.
expansion (int | None): The given expansion ratio.
Returns:
int: The expansion of the block.
"""
if isinstance(expansion, int):
assert expansion > 0
elif expansion is None:
if hasattr(block, 'expansion'):
expansion = block.expansion
elif issubclass(block, ViPNAS_Bottleneck):
expansion = 1
else:
raise TypeError(f'expansion is not specified for {block.__name__}')
else:
raise TypeError('expansion must be an integer or None')
return expansion | Get the expansion of a residual block. The block expansion will be obtained by the following order: 1. If ``expansion`` is given, just return it. 2. If ``block`` has the attribute ``expansion``, then return ``block.expansion``. 3. Return the default value according the the block type: 4 for ``ViPNAS_Bottleneck``. Args: block (class): The block class. expansion (int | None): The given expansion ratio. Returns: int: The expansion of the block. |
159,204 | import copy
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer
from mmengine.model import BaseModule, constant_init
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from mmpose.registry import MODELS
from .base_backbone import BaseBackbone
class BasicBlock(BaseModule):
"""BasicBlock for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the output channels of conv1. This is a
reserved argument in BasicBlock and should always be 1. Default: 1.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module): downsample operation on identity branch.
Default: None.
style (str): `pytorch` or `caffe`. It is unused and reserved for
unified API with Bottleneck.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
expansion=1,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
init_cfg=None):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert self.expansion == 1
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, out_channels, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(BaseModule):
"""Bottleneck block for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the input/output channels of conv2. Default: 4.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module): downsample operation on identity branch.
Default: None.
style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: "pytorch".
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
expansion=4,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
init_cfg=None):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__(init_cfg=init_cfg)
assert style in ['pytorch', 'caffe']
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, out_channels, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
self.mid_channels,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def norm3(self):
"""nn.Module: the normalization layer named "norm3" """
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
The provided code snippet includes necessary dependencies for implementing the `get_expansion` function. Write a Python function `def get_expansion(block, expansion=None)` to solve the following problem:
Get the expansion of a residual block. The block expansion will be obtained by the following order: 1. If ``expansion`` is given, just return it. 2. If ``block`` has the attribute ``expansion``, then return ``block.expansion``. 3. Return the default value according the the block type: 1 for ``BasicBlock`` and 4 for ``Bottleneck``. Args: block (class): The block class. expansion (int | None): The given expansion ratio. Returns: int: The expansion of the block.
Here is the function:
def get_expansion(block, expansion=None):
"""Get the expansion of a residual block.
The block expansion will be obtained by the following order:
1. If ``expansion`` is given, just return it.
2. If ``block`` has the attribute ``expansion``, then return
``block.expansion``.
3. Return the default value according the the block type:
1 for ``BasicBlock`` and 4 for ``Bottleneck``.
Args:
block (class): The block class.
expansion (int | None): The given expansion ratio.
Returns:
int: The expansion of the block.
"""
if isinstance(expansion, int):
assert expansion > 0
elif expansion is None:
if hasattr(block, 'expansion'):
expansion = block.expansion
elif issubclass(block, BasicBlock):
expansion = 1
elif issubclass(block, Bottleneck):
expansion = 4
else:
raise TypeError(f'expansion is not specified for {block.__name__}')
else:
raise TypeError('expansion must be an integer or None')
return expansion | Get the expansion of a residual block. The block expansion will be obtained by the following order: 1. If ``expansion`` is given, just return it. 2. If ``block`` has the attribute ``expansion``, then return ``block.expansion``. 3. Return the default value according the the block type: 1 for ``BasicBlock`` and 4 for ``Bottleneck``. Args: block (class): The block class. expansion (int | None): The given expansion ratio. Returns: int: The expansion of the block. |
159,205 | import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from mmpose.registry import MODELS
from .base_backbone import BaseBackbone
def make_vgg_layer(in_channels,
out_channels,
num_blocks,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dilation=1,
with_norm=False,
ceil_mode=False):
layers = []
for _ in range(num_blocks):
layer = ConvModule(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
dilation=dilation,
padding=dilation,
bias=True,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
layers.append(layer)
in_channels = out_channels
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
return layers | null |
159,206 | from collections import OrderedDict
from mmengine.runner import CheckpointLoader, load_state_dict
def load_checkpoint(model,
filename,
map_location='cpu',
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = CheckpointLoader.load_checkpoint(filename, map_location)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict_tmp = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict_tmp = checkpoint['model']
else:
state_dict_tmp = checkpoint
state_dict = OrderedDict()
# strip prefix of state_dict
for k, v in state_dict_tmp.items():
if k.startswith('module.backbone.'):
state_dict[k[16:]] = v
elif k.startswith('module.'):
state_dict[k[7:]] = v
elif k.startswith('backbone.'):
state_dict[k[9:]] = v
else:
state_dict[k] = v
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
The provided code snippet includes necessary dependencies for implementing the `get_state_dict` function. Write a Python function `def get_state_dict(filename, map_location='cpu')` to solve the following problem:
Get state_dict from a file or URI. Args: filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. map_location (str): Same as :func:`torch.load`. Returns: OrderedDict: The state_dict.
Here is the function:
def get_state_dict(filename, map_location='cpu'):
"""Get state_dict from a file or URI.
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``.
map_location (str): Same as :func:`torch.load`.
Returns:
OrderedDict: The state_dict.
"""
checkpoint = CheckpointLoader.load_checkpoint(filename, map_location)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict_tmp = checkpoint['state_dict']
else:
state_dict_tmp = checkpoint
state_dict = OrderedDict()
# strip prefix of state_dict
for k, v in state_dict_tmp.items():
if k.startswith('module.backbone.'):
state_dict[k[16:]] = v
elif k.startswith('module.'):
state_dict[k[7:]] = v
elif k.startswith('backbone.'):
state_dict[k[9:]] = v
else:
state_dict[k] = v
return state_dict | Get state_dict from a file or URI. Args: filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. map_location (str): Same as :func:`torch.load`. Returns: OrderedDict: The state_dict. |
159,208 | import torch
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `channel_shuffle` function. Write a Python function `def channel_shuffle(x, groups)` to solve the following problem:
Channel Shuffle operation. This function enables cross-group information flow for multiple groups convolution layers. Args: x (Tensor): The input tensor. groups (int): The number of groups to divide the input tensor in the channel dimension. Returns: Tensor: The output tensor after channel shuffle operation.
Here is the function:
def channel_shuffle(x, groups):
"""Channel Shuffle operation.
This function enables cross-group information flow for multiple groups
convolution layers.
Args:
x (Tensor): The input tensor.
groups (int): The number of groups to divide the input tensor
in the channel dimension.
Returns:
Tensor: The output tensor after channel shuffle operation.
"""
batch_size, num_channels, height, width = x.size()
assert (num_channels % groups == 0), ('num_channels should be '
'divisible by groups')
channels_per_group = num_channels // groups
x = x.view(batch_size, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch_size, groups * channels_per_group, height, width)
return x | Channel Shuffle operation. This function enables cross-group information flow for multiple groups convolution layers. Args: x (Tensor): The input tensor. groups (int): The number of groups to divide the input tensor in the channel dimension. Returns: Tensor: The output tensor after channel shuffle operation. |
159,210 | import warnings
from mmpose.registry import MODELS
BACKBONES = MODELS
The provided code snippet includes necessary dependencies for implementing the `build_backbone` function. Write a Python function `def build_backbone(cfg)` to solve the following problem:
Build backbone.
Here is the function:
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg) | Build backbone. |
159,211 | import warnings
from mmpose.registry import MODELS
NECKS = MODELS
The provided code snippet includes necessary dependencies for implementing the `build_neck` function. Write a Python function `def build_neck(cfg)` to solve the following problem:
Build neck.
Here is the function:
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg) | Build neck. |
159,212 | import warnings
from mmpose.registry import MODELS
HEADS = MODELS
The provided code snippet includes necessary dependencies for implementing the `build_head` function. Write a Python function `def build_head(cfg)` to solve the following problem:
Build head.
Here is the function:
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg) | Build head. |
159,213 | import warnings
from mmpose.registry import MODELS
LOSSES = MODELS
The provided code snippet includes necessary dependencies for implementing the `build_loss` function. Write a Python function `def build_loss(cfg)` to solve the following problem:
Build loss.
Here is the function:
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg) | Build loss. |
159,214 | import warnings
from mmpose.registry import MODELS
def build_pose_estimator(cfg):
"""Build pose estimator."""
return POSE_ESTIMATORS.build(cfg)
The provided code snippet includes necessary dependencies for implementing the `build_posenet` function. Write a Python function `def build_posenet(cfg)` to solve the following problem:
Build posenet.
Here is the function:
def build_posenet(cfg):
"""Build posenet."""
warnings.warn(
'``build_posenet`` will be deprecated soon, '
'please use ``build_pose_estimator`` instead.', DeprecationWarning)
return build_pose_estimator(cfg) | Build posenet. |
159,215 | import math
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import build_conv_layer
from mmengine.model import BaseModule, ModuleDict
from mmengine.structures import InstanceData, PixelData
from torch import Tensor
from mmpose.models.utils.tta import flip_heatmaps
from mmpose.registry import KEYPOINT_CODECS, MODELS
from mmpose.utils.typing import (ConfigType, Features, OptConfigType,
OptSampleList, Predictions)
from ..base_head import BaseHead
The provided code snippet includes necessary dependencies for implementing the `smooth_heatmaps` function. Write a Python function `def smooth_heatmaps(heatmaps: Tensor, blur_kernel_size: int) -> Tensor` to solve the following problem:
Smooth the heatmaps by blurring and averaging. Args: heatmaps (Tensor): The heatmaps to smooth. blur_kernel_size (int): The kernel size for blurring the heatmaps. Returns: Tensor: The smoothed heatmaps.
Here is the function:
def smooth_heatmaps(heatmaps: Tensor, blur_kernel_size: int) -> Tensor:
"""Smooth the heatmaps by blurring and averaging.
Args:
heatmaps (Tensor): The heatmaps to smooth.
blur_kernel_size (int): The kernel size for blurring the heatmaps.
Returns:
Tensor: The smoothed heatmaps.
"""
smoothed_heatmaps = torch.nn.functional.avg_pool2d(
heatmaps, blur_kernel_size, 1, (blur_kernel_size - 1) // 2)
smoothed_heatmaps = (heatmaps + smoothed_heatmaps) / 2.0
return smoothed_heatmaps | Smooth the heatmaps by blurring and averaging. Args: heatmaps (Tensor): The heatmaps to smooth. blur_kernel_size (int): The kernel size for blurring the heatmaps. Returns: Tensor: The smoothed heatmaps. |
159,216 | import math
from typing import Sequence
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule
from mmengine.utils import to_2tuple
The provided code snippet includes necessary dependencies for implementing the `nlc_to_nchw` function. Write a Python function `def nlc_to_nchw(x, hw_shape)` to solve the following problem:
Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion.
Here is the function:
def nlc_to_nchw(x, hw_shape):
"""Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, L, C] before conversion.
hw_shape (Sequence[int]): The height and width of output feature map.
Returns:
Tensor: The output tensor of shape [N, C, H, W] after conversion.
"""
H, W = hw_shape
assert len(x.shape) == 3
B, L, C = x.shape
assert L == H * W, 'The seq_len does not match H, W'
return x.transpose(1, 2).reshape(B, C, H, W).contiguous() | Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion. |
159,217 | import math
from typing import Sequence
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule
from mmengine.utils import to_2tuple
The provided code snippet includes necessary dependencies for implementing the `nchw_to_nlc` function. Write a Python function `def nchw_to_nlc(x)` to solve the following problem:
Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion.
Here is the function:
def nchw_to_nlc(x):
"""Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, C, H, W] before conversion.
Returns:
Tensor: The output tensor of shape [N, L, C] after conversion.
"""
assert len(x.shape) == 4
return x.flatten(2).transpose(1, 2).contiguous() | Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion. |
159,218 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks import DropPath
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `rope` function. Write a Python function `def rope(x, dim)` to solve the following problem:
Applies Rotary Position Embedding to input tensor. Args: x (torch.Tensor): Input tensor. dim (int | list[int]): The spatial dimension(s) to apply rotary position embedding. Returns: torch.Tensor: The tensor after applying rotary position embedding. Reference: `RoFormer: Enhanced Transformer with Rotary Position Embedding <https://arxiv.org/abs/2104.09864>`_
Here is the function:
def rope(x, dim):
"""Applies Rotary Position Embedding to input tensor.
Args:
x (torch.Tensor): Input tensor.
dim (int | list[int]): The spatial dimension(s) to apply
rotary position embedding.
Returns:
torch.Tensor: The tensor after applying rotary position
embedding.
Reference:
`RoFormer: Enhanced Transformer with Rotary
Position Embedding <https://arxiv.org/abs/2104.09864>`_
"""
shape = x.shape
if isinstance(dim, int):
dim = [dim]
spatial_shape = [shape[i] for i in dim]
total_len = 1
for i in spatial_shape:
total_len *= i
position = torch.reshape(
torch.arange(total_len, dtype=torch.int, device=x.device),
spatial_shape)
for i in range(dim[-1] + 1, len(shape) - 1, 1):
position = torch.unsqueeze(position, dim=-1)
half_size = shape[-1] // 2
freq_seq = -torch.arange(
half_size, dtype=torch.int, device=x.device) / float(half_size)
inv_freq = 10000**-freq_seq
sinusoid = position[..., None] * inv_freq[None, None, :]
sin = torch.sin(sinusoid)
cos = torch.cos(sinusoid)
x1, x2 = torch.chunk(x, 2, dim=-1)
return torch.cat([x1 * cos - x2 * sin, x2 * cos + x1 * sin], dim=-1) | Applies Rotary Position Embedding to input tensor. Args: x (torch.Tensor): Input tensor. dim (int | list[int]): The spatial dimension(s) to apply rotary position embedding. Returns: torch.Tensor: The tensor after applying rotary position embedding. Reference: `RoFormer: Enhanced Transformer with Rotary Position Embedding <https://arxiv.org/abs/2104.09864>`_ |
159,219 | from typing import Dict, Optional, Tuple, Union
from mmengine.config import Config, ConfigDict
from mmengine.dist import master_only
from mmengine.logging import MMLogger
ConfigType = Union[Config, ConfigDict]
def process_input_transform(input_transform: str, head: Dict, head_new: Dict,
head_deleted_dict: Dict, head_append_dict: Dict,
neck_new: Dict, input_index: Tuple[int],
align_corners: bool) -> None:
"""Process the input_transform field and update head and neck
dictionaries."""
if input_transform == 'resize_concat':
in_channels = head_new.pop('in_channels')
head_deleted_dict['in_channels'] = str(in_channels)
in_channels = sum([in_channels[i] for i in input_index])
head_new['in_channels'] = in_channels
head_append_dict['in_channels'] = str(in_channels)
neck_new.update(
dict(
type='FeatureMapProcessor',
concat=True,
select_index=input_index,
))
if align_corners:
neck_new['align_corners'] = align_corners
elif input_transform == 'select':
if input_index != (-1, ):
neck_new.update(
dict(type='FeatureMapProcessor', select_index=input_index))
if isinstance(head['in_channels'], tuple):
in_channels = head_new.pop('in_channels')
head_deleted_dict['in_channels'] = str(in_channels)
if isinstance(input_index, int):
in_channels = in_channels[input_index]
else:
in_channels = tuple([in_channels[i] for i in input_index])
head_new['in_channels'] = in_channels
head_append_dict['in_channels'] = str(in_channels)
if align_corners:
neck_new['align_corners'] = align_corners
else:
raise ValueError(f'model.head get invalid value for argument '
f'input_transform: {input_transform}')
def process_extra_field(extra: Dict, head_new: Dict, head_deleted_dict: Dict,
head_append_dict: Dict, neck_new: Dict) -> None:
"""Process the extra field and update head and neck dictionaries."""
head_deleted_dict['extra'] = 'dict('
for key, value in extra.items():
head_deleted_dict['extra'] += f'{key}={value},'
head_deleted_dict['extra'] = head_deleted_dict['extra'][:-1] + ')'
if 'final_conv_kernel' in extra:
kernel_size = extra['final_conv_kernel']
if kernel_size > 1:
padding = kernel_size // 2
head_new['final_layer'] = dict(
kernel_size=kernel_size, padding=padding)
head_append_dict[
'final_layer'] = f'dict(kernel_size={kernel_size}, ' \
f'padding={padding})'
else:
head_new['final_layer'] = dict(kernel_size=kernel_size)
head_append_dict[
'final_layer'] = f'dict(kernel_size={kernel_size})'
if 'upsample' in extra:
neck_new.update(
dict(
type='FeatureMapProcessor',
scale_factor=float(extra['upsample']),
apply_relu=True,
))
def process_has_final_layer(has_final_layer: bool, head_new: Dict,
head_deleted_dict: Dict,
head_append_dict: Dict) -> None:
"""Process the has_final_layer field and update the head dictionary."""
head_deleted_dict['has_final_layer'] = str(has_final_layer)
if not has_final_layer:
if 'final_layer' not in head_new:
head_new['final_layer'] = None
head_append_dict['final_layer'] = 'None'
def display_modifications(head_deleted_dict: Dict, head_append_dict: Dict,
neck: Dict) -> None:
"""Display the modifications made to the head and neck configurations.
Args:
head_deleted_dict (Dict): Dictionary of deleted fields in the head.
head_append_dict (Dict): Dictionary of appended fields in the head.
neck (Dict): Updated neck configuration.
"""
if len(head_deleted_dict) + len(head_append_dict) == 0:
return
old_model_info, new_model_info = build_model_info(head_deleted_dict,
head_append_dict, neck)
total_info = '\nThe config you are using is outdated. '\
'The following section of the config:\n```\n'
total_info += old_model_info
total_info += '```\nshould be updated to\n```\n'
total_info += new_model_info
total_info += '```\nFor more information, please refer to '\
'https://mmpose.readthedocs.io/en/latest/' \
'guide_to_framework.html#step3-model'
logger: MMLogger = MMLogger.get_current_instance()
logger.warning(total_info)
The provided code snippet includes necessary dependencies for implementing the `check_and_update_config` function. Write a Python function `def check_and_update_config(neck: Optional[ConfigType], head: ConfigType) -> Tuple[Optional[Dict], Dict]` to solve the following problem:
Check and update the configuration of the head and neck components. Args: neck (Optional[ConfigType]): Configuration for the neck component. head (ConfigType): Configuration for the head component. Returns: Tuple[Optional[Dict], Dict]: Updated configurations for the neck and head components.
Here is the function:
def check_and_update_config(neck: Optional[ConfigType],
head: ConfigType) -> Tuple[Optional[Dict], Dict]:
"""Check and update the configuration of the head and neck components.
Args:
neck (Optional[ConfigType]): Configuration for the neck component.
head (ConfigType): Configuration for the head component.
Returns:
Tuple[Optional[Dict], Dict]: Updated configurations for the neck
and head components.
"""
head_new, neck_new = head.copy(), neck.copy() if isinstance(neck,
dict) else {}
head_deleted_dict, head_append_dict = {}, {}
if 'input_transform' in head:
input_transform = head_new.pop('input_transform')
head_deleted_dict['input_transform'] = f'\'{input_transform}\''
else:
input_transform = 'select'
if 'input_index' in head:
input_index = head_new.pop('input_index')
head_deleted_dict['input_index'] = str(input_index)
else:
input_index = (-1, )
if 'align_corners' in head:
align_corners = head_new.pop('align_corners')
head_deleted_dict['align_corners'] = str(align_corners)
else:
align_corners = False
process_input_transform(input_transform, head, head_new, head_deleted_dict,
head_append_dict, neck_new, input_index,
align_corners)
if 'extra' in head:
extra = head_new.pop('extra')
process_extra_field(extra, head_new, head_deleted_dict,
head_append_dict, neck_new)
if 'has_final_layer' in head:
has_final_layer = head_new.pop('has_final_layer')
process_has_final_layer(has_final_layer, head_new, head_deleted_dict,
head_append_dict)
display_modifications(head_deleted_dict, head_append_dict, neck_new)
neck_new = neck_new if len(neck_new) else None
return neck_new, head_new | Check and update the configuration of the head and neck components. Args: neck (Optional[ConfigType]): Configuration for the neck component. head (ConfigType): Configuration for the head component. Returns: Tuple[Optional[Dict], Dict]: Updated configurations for the neck and head components. |
159,220 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `flip_heatmaps` function. Write a Python function `def flip_heatmaps(heatmaps: Tensor, flip_indices: Optional[List[int]] = None, flip_mode: str = 'heatmap', shift_heatmap: bool = True)` to solve the following problem:
Flip heatmaps for test-time augmentation. Args: heatmaps (Tensor): The heatmaps to flip. Should be a tensor in shape [B, C, H, W] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint. Defaults to ``None`` flip_mode (str): Specify the flipping mode. Options are: - ``'heatmap'``: horizontally flip the heatmaps and swap heatmaps of symmetric keypoints according to ``flip_indices`` - ``'udp_combined'``: similar to ``'heatmap'`` mode but further flip the x_offset values - ``'offset'``: horizontally flip the offset fields and swap heatmaps of symmetric keypoints according to ``flip_indices``. x_offset values are also reversed shift_heatmap (bool): Shift the flipped heatmaps to align with the original heatmaps and improve accuracy. Defaults to ``True`` Returns: Tensor: flipped heatmaps in shape [B, C, H, W]
Here is the function:
def flip_heatmaps(heatmaps: Tensor,
flip_indices: Optional[List[int]] = None,
flip_mode: str = 'heatmap',
shift_heatmap: bool = True):
"""Flip heatmaps for test-time augmentation.
Args:
heatmaps (Tensor): The heatmaps to flip. Should be a tensor in shape
[B, C, H, W]
flip_indices (List[int]): The indices of each keypoint's symmetric
keypoint. Defaults to ``None``
flip_mode (str): Specify the flipping mode. Options are:
- ``'heatmap'``: horizontally flip the heatmaps and swap heatmaps
of symmetric keypoints according to ``flip_indices``
- ``'udp_combined'``: similar to ``'heatmap'`` mode but further
flip the x_offset values
- ``'offset'``: horizontally flip the offset fields and swap
heatmaps of symmetric keypoints according to
``flip_indices``. x_offset values are also reversed
shift_heatmap (bool): Shift the flipped heatmaps to align with the
original heatmaps and improve accuracy. Defaults to ``True``
Returns:
Tensor: flipped heatmaps in shape [B, C, H, W]
"""
if flip_mode == 'heatmap':
heatmaps = heatmaps.flip(-1)
if flip_indices is not None:
assert len(flip_indices) == heatmaps.shape[1]
heatmaps = heatmaps[:, flip_indices]
elif flip_mode == 'udp_combined':
B, C, H, W = heatmaps.shape
heatmaps = heatmaps.view(B, C // 3, 3, H, W)
heatmaps = heatmaps.flip(-1)
if flip_indices is not None:
assert len(flip_indices) == C // 3
heatmaps = heatmaps[:, flip_indices]
heatmaps[:, :, 1] = -heatmaps[:, :, 1]
heatmaps = heatmaps.view(B, C, H, W)
elif flip_mode == 'offset':
B, C, H, W = heatmaps.shape
heatmaps = heatmaps.view(B, C // 2, -1, H, W)
heatmaps = heatmaps.flip(-1)
if flip_indices is not None:
assert len(flip_indices) == C // 2
heatmaps = heatmaps[:, flip_indices]
heatmaps[:, :, 0] = -heatmaps[:, :, 0]
heatmaps = heatmaps.view(B, C, H, W)
else:
raise ValueError(f'Invalid flip_mode value "{flip_mode}"')
if shift_heatmap:
# clone data to avoid unexpected in-place operation when using CPU
heatmaps[..., 1:] = heatmaps[..., :-1].clone()
return heatmaps | Flip heatmaps for test-time augmentation. Args: heatmaps (Tensor): The heatmaps to flip. Should be a tensor in shape [B, C, H, W] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint. Defaults to ``None`` flip_mode (str): Specify the flipping mode. Options are: - ``'heatmap'``: horizontally flip the heatmaps and swap heatmaps of symmetric keypoints according to ``flip_indices`` - ``'udp_combined'``: similar to ``'heatmap'`` mode but further flip the x_offset values - ``'offset'``: horizontally flip the offset fields and swap heatmaps of symmetric keypoints according to ``flip_indices``. x_offset values are also reversed shift_heatmap (bool): Shift the flipped heatmaps to align with the original heatmaps and improve accuracy. Defaults to ``True`` Returns: Tensor: flipped heatmaps in shape [B, C, H, W] |
159,221 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `flip_vectors` function. Write a Python function `def flip_vectors(x_labels: Tensor, y_labels: Tensor, flip_indices: List[int])` to solve the following problem:
Flip instance-level labels in specific axis for test-time augmentation. Args: x_labels (Tensor): The vector labels in x-axis to flip. Should be a tensor in shape [B, C, Wx] y_labels (Tensor): The vector labels in y-axis to flip. Should be a tensor in shape [B, C, Wy] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint
Here is the function:
def flip_vectors(x_labels: Tensor, y_labels: Tensor, flip_indices: List[int]):
"""Flip instance-level labels in specific axis for test-time augmentation.
Args:
x_labels (Tensor): The vector labels in x-axis to flip. Should be
a tensor in shape [B, C, Wx]
y_labels (Tensor): The vector labels in y-axis to flip. Should be
a tensor in shape [B, C, Wy]
flip_indices (List[int]): The indices of each keypoint's symmetric
keypoint
"""
assert x_labels.ndim == 3 and y_labels.ndim == 3
assert len(flip_indices) == x_labels.shape[1] and len(
flip_indices) == y_labels.shape[1]
x_labels = x_labels[:, flip_indices].flip(-1)
y_labels = y_labels[:, flip_indices]
return x_labels, y_labels | Flip instance-level labels in specific axis for test-time augmentation. Args: x_labels (Tensor): The vector labels in x-axis to flip. Should be a tensor in shape [B, C, Wx] y_labels (Tensor): The vector labels in y-axis to flip. Should be a tensor in shape [B, C, Wy] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint |
159,222 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `flip_coordinates` function. Write a Python function `def flip_coordinates(coords: Tensor, flip_indices: List[int], shift_coords: bool, input_size: Tuple[int, int])` to solve the following problem:
Flip normalized coordinates for test-time augmentation. Args: coords (Tensor): The coordinates to flip. Should be a tensor in shape [B, K, D] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint shift_coords (bool): Shift the flipped coordinates to align with the original coordinates and improve accuracy. Defaults to ``True`` input_size (Tuple[int, int]): The size of input image in [w, h]
Here is the function:
def flip_coordinates(coords: Tensor, flip_indices: List[int],
shift_coords: bool, input_size: Tuple[int, int]):
"""Flip normalized coordinates for test-time augmentation.
Args:
coords (Tensor): The coordinates to flip. Should be a tensor in shape
[B, K, D]
flip_indices (List[int]): The indices of each keypoint's symmetric
keypoint
shift_coords (bool): Shift the flipped coordinates to align with the
original coordinates and improve accuracy. Defaults to ``True``
input_size (Tuple[int, int]): The size of input image in [w, h]
"""
assert coords.ndim == 3
assert len(flip_indices) == coords.shape[1]
coords[:, :, 0] = 1.0 - coords[:, :, 0]
if shift_coords:
img_width = input_size[0]
coords[:, :, 0] -= 1.0 / img_width
coords = coords[:, flip_indices]
return coords | Flip normalized coordinates for test-time augmentation. Args: coords (Tensor): The coordinates to flip. Should be a tensor in shape [B, K, D] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint shift_coords (bool): Shift the flipped coordinates to align with the original coordinates and improve accuracy. Defaults to ``True`` input_size (Tuple[int, int]): The size of input image in [w, h] |
159,223 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `flip_visibility` function. Write a Python function `def flip_visibility(vis: Tensor, flip_indices: List[int])` to solve the following problem:
Flip keypoints visibility for test-time augmentation. Args: vis (Tensor): The keypoints visibility to flip. Should be a tensor in shape [B, K] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint
Here is the function:
def flip_visibility(vis: Tensor, flip_indices: List[int]):
"""Flip keypoints visibility for test-time augmentation.
Args:
vis (Tensor): The keypoints visibility to flip. Should be a tensor
in shape [B, K]
flip_indices (List[int]): The indices of each keypoint's symmetric
keypoint
"""
assert vis.ndim == 2
vis = vis[:, flip_indices]
return vis | Flip keypoints visibility for test-time augmentation. Args: vis (Tensor): The keypoints visibility to flip. Should be a tensor in shape [B, K] flip_indices (List[int]): The indices of each keypoint's symmetric keypoint |
159,224 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `aggregate_heatmaps` function. Write a Python function `def aggregate_heatmaps(heatmaps: List[Tensor], size: Optional[Tuple[int, int]], align_corners: bool = False, mode: str = 'average')` to solve the following problem:
Aggregate multiple heatmaps. Args: heatmaps (List[Tensor]): Multiple heatmaps to aggregate. Each should be in shape (B, C, H, W) size (Tuple[int, int], optional): The target size in (w, h). All heatmaps will be resized to the target size. If not given, the first heatmap tensor's width and height will be used as the target size. Defaults to ``None`` align_corners (bool): Whether align corners when resizing heatmaps. Defaults to ``False`` mode (str): Aggregation mode in one of the following: - ``'average'``: Get average of heatmaps. All heatmaps mush have the same channel number - ``'concat'``: Concate the heatmaps at the channel dim
Here is the function:
def aggregate_heatmaps(heatmaps: List[Tensor],
size: Optional[Tuple[int, int]],
align_corners: bool = False,
mode: str = 'average'):
"""Aggregate multiple heatmaps.
Args:
heatmaps (List[Tensor]): Multiple heatmaps to aggregate. Each should
be in shape (B, C, H, W)
size (Tuple[int, int], optional): The target size in (w, h). All
heatmaps will be resized to the target size. If not given, the
first heatmap tensor's width and height will be used as the target
size. Defaults to ``None``
align_corners (bool): Whether align corners when resizing heatmaps.
Defaults to ``False``
mode (str): Aggregation mode in one of the following:
- ``'average'``: Get average of heatmaps. All heatmaps mush have
the same channel number
- ``'concat'``: Concate the heatmaps at the channel dim
"""
if mode not in {'average', 'concat'}:
raise ValueError(f'Invalid aggregation mode `{mode}`')
if size is None:
h, w = heatmaps[0].shape[2:4]
else:
w, h = size
for i, _heatmaps in enumerate(heatmaps):
assert _heatmaps.ndim == 4
if mode == 'average':
assert _heatmaps.shape[:2] == heatmaps[0].shape[:2]
else:
assert _heatmaps.shape[0] == heatmaps[0].shape[0]
if _heatmaps.shape[2:4] != (h, w):
heatmaps[i] = F.interpolate(
_heatmaps,
size=(h, w),
mode='bilinear',
align_corners=align_corners)
if mode == 'average':
output = sum(heatmaps).div(len(heatmaps))
elif mode == 'concat':
output = torch.cat(heatmaps, dim=1)
else:
raise ValueError()
return output | Aggregate multiple heatmaps. Args: heatmaps (List[Tensor]): Multiple heatmaps to aggregate. Each should be in shape (B, C, H, W) size (Tuple[int, int], optional): The target size in (w, h). All heatmaps will be resized to the target size. If not given, the first heatmap tensor's width and height will be used as the target size. Defaults to ``None`` align_corners (bool): Whether align corners when resizing heatmaps. Defaults to ``False`` mode (str): Aggregation mode in one of the following: - ``'average'``: Get average of heatmaps. All heatmaps mush have the same channel number - ``'concat'``: Concate the heatmaps at the channel dim |
159,225 | import torch
from torch.nn import functional as F
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `rot6d_to_rotmat` function. Write a Python function `def rot6d_to_rotmat(x)` to solve the following problem:
Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices
Here is the function:
def rot6d_to_rotmat(x):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation
Representations in Neural Networks", CVPR 2019
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3,3) Batch of corresponding rotation matrices
"""
x = x.view(-1, 3, 2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1) | Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices |
159,226 | import torch
from torch.nn import functional as F
def quat_to_rotmat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion
-- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1],\
norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([
w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy,
w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz,
w2 - x2 - y2 + z2
],
dim=1).view(B, 3, 3)
return rotMat
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `batch_rodrigues` function. Write a Python function `def batch_rodrigues(theta)` to solve the following problem:
Convert axis-angle representation to rotation matrix. Args: theta: size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
Here is the function:
def batch_rodrigues(theta):
"""Convert axis-angle representation to rotation matrix.
Args:
theta: size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion
-- size = [B, 3, 3]
"""
l2norm = torch.norm(theta + 1e-8, p=2, dim=1)
angle = torch.unsqueeze(l2norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim=1)
return quat_to_rotmat(quat) | Convert axis-angle representation to rotation matrix. Args: theta: size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] |
159,227 | import warnings
from typing import Optional, Tuple, Union
import torch
from torch.nn import functional as F
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `resize` function. Write a Python function `def resize(input: torch.Tensor, size: Optional[Union[Tuple[int, int], torch.Size]] = None, scale_factor: Optional[float] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, warning: bool = True) -> torch.Tensor` to solve the following problem:
Resize a given input tensor using specified size or scale_factor. Args: input (torch.Tensor): The input tensor to be resized. size (Optional[Union[Tuple[int, int], torch.Size]]): The desired output size. Defaults to None. scale_factor (Optional[float]): The scaling factor for resizing. Defaults to None. mode (str): The interpolation mode. Defaults to 'nearest'. align_corners (Optional[bool]): Determines whether to align the corners when using certain interpolation modes. Defaults to None. warning (bool): Whether to display a warning when the input and output sizes are not ideal for alignment. Defaults to True. Returns: torch.Tensor: The resized tensor.
Here is the function:
def resize(input: torch.Tensor,
size: Optional[Union[Tuple[int, int], torch.Size]] = None,
scale_factor: Optional[float] = None,
mode: str = 'nearest',
align_corners: Optional[bool] = None,
warning: bool = True) -> torch.Tensor:
"""Resize a given input tensor using specified size or scale_factor.
Args:
input (torch.Tensor): The input tensor to be resized.
size (Optional[Union[Tuple[int, int], torch.Size]]): The desired
output size. Defaults to None.
scale_factor (Optional[float]): The scaling factor for resizing.
Defaults to None.
mode (str): The interpolation mode. Defaults to 'nearest'.
align_corners (Optional[bool]): Determines whether to align the
corners when using certain interpolation modes. Defaults to None.
warning (bool): Whether to display a warning when the input and
output sizes are not ideal for alignment. Defaults to True.
Returns:
torch.Tensor: The resized tensor.
"""
# Check if a warning should be displayed regarding input and output sizes
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would be more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
# Convert torch.Size to tuple if necessary
if isinstance(size, torch.Size):
size = tuple(int(x) for x in size)
# Perform the resizing operation
return F.interpolate(input, size, scale_factor, mode, align_corners) | Resize a given input tensor using specified size or scale_factor. Args: input (torch.Tensor): The input tensor to be resized. size (Optional[Union[Tuple[int, int], torch.Size]]): The desired output size. Defaults to None. scale_factor (Optional[float]): The scaling factor for resizing. Defaults to None. mode (str): The interpolation mode. Defaults to 'nearest'. align_corners (Optional[bool]): Determines whether to align the corners when using certain interpolation modes. Defaults to None. warning (bool): Whether to display a warning when the input and output sizes are not ideal for alignment. Defaults to True. Returns: torch.Tensor: The resized tensor. |
159,228 | from collections import OrderedDict
import torch
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
def pvt_convert(ckpt):
new_ckpt = OrderedDict()
# Process the concat between q linear weights and kv linear weights
use_abs_pos_embed = False
use_conv_ffn = False
for k in ckpt.keys():
if k.startswith('pos_embed'):
use_abs_pos_embed = True
if k.find('dwconv') >= 0:
use_conv_ffn = True
for k, v in ckpt.items():
if k.startswith('head'):
continue
if k.startswith('norm.'):
continue
if k.startswith('cls_token'):
continue
if k.startswith('pos_embed'):
stage_i = int(k.replace('pos_embed', ''))
new_k = k.replace(f'pos_embed{stage_i}',
f'layers.{stage_i - 1}.1.0.pos_embed')
if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7
new_v = v[:, 1:, :] # remove cls token
else:
new_v = v
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}',
f'layers.{stage_i - 1}.0')
new_v = v
if 'proj.' in new_k:
new_k = new_k.replace('proj.', 'projection.')
elif k.startswith('block'):
stage_i = int(k.split('.')[0].replace('block', ''))
layer_i = int(k.split('.')[1])
new_layer_i = layer_i + use_abs_pos_embed
new_k = k.replace(f'block{stage_i}.{layer_i}',
f'layers.{stage_i - 1}.1.{new_layer_i}')
new_v = v
if 'attn.q.' in new_k:
sub_item_k = k.replace('q.', 'kv.')
new_k = new_k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
elif 'attn.kv.' in new_k:
continue
elif 'attn.proj.' in new_k:
new_k = new_k.replace('proj.', 'attn.out_proj.')
elif 'attn.sr.' in new_k:
new_k = new_k.replace('sr.', 'sr.')
elif 'mlp.' in new_k:
string = f'{new_k}-'
new_k = new_k.replace('mlp.', 'ffn.layers.')
if 'fc1.weight' in new_k or 'fc2.weight' in new_k:
new_v = v.reshape((*v.shape, 1, 1))
new_k = new_k.replace('fc1.', '0.')
new_k = new_k.replace('dwconv.dwconv.', '1.')
if use_conv_ffn:
new_k = new_k.replace('fc2.', '4.')
else:
new_k = new_k.replace('fc2.', '3.')
string += f'{new_k} {v.shape}-{new_v.shape}'
elif k.startswith('norm'):
stage_i = int(k[4])
new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')
new_v = v
else:
new_k = k
new_v = v
new_ckpt[new_k] = new_v
return new_ckpt | null |
159,230 | import datetime
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
from mmengine import DefaultScope
The provided code snippet includes necessary dependencies for implementing the `register_all_modules` function. Write a Python function `def register_all_modules(init_default_scope: bool = True) -> None` to solve the following problem:
Register all modules in mmpose into the registries. Args: init_default_scope (bool): Whether initialize the mmpose default scope. When `init_default_scope=True`, the global default scope will be set to `mmpose`, and all registries will build modules from mmpose's registry node. To understand more about the registry, please refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True.
Here is the function:
def register_all_modules(init_default_scope: bool = True) -> None:
"""Register all modules in mmpose into the registries.
Args:
init_default_scope (bool): Whether initialize the mmpose default scope.
When `init_default_scope=True`, the global default scope will be
set to `mmpose`, and all registries will build modules from mmpose's
registry node. To understand more about the registry, please refer
to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
""" # noqa
import mmpose.codecs # noqa: F401, F403
import mmpose.datasets # noqa: F401,F403
import mmpose.engine # noqa: F401,F403
import mmpose.evaluation # noqa: F401,F403
import mmpose.models # noqa: F401,F403
import mmpose.visualization # noqa: F401,F403
if init_default_scope:
never_created = DefaultScope.get_current_instance() is None \
or not DefaultScope.check_instance_created('mmpose')
if never_created:
DefaultScope.get_instance('mmpose', scope_name='mmpose')
return
current_scope = DefaultScope.get_current_instance()
if current_scope.scope_name != 'mmpose':
warnings.warn('The current default scope '
f'"{current_scope.scope_name}" is not "mmpose", '
'`register_all_modules` will force the current'
'default scope to be "mmpose". If this is not '
'expected, please set `init_default_scope=False`.')
# avoid name conflict
new_instance_name = f'mmpose-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name='mmpose') | Register all modules in mmpose into the registries. Args: init_default_scope (bool): Whether initialize the mmpose default scope. When `init_default_scope=True`, the global default scope will be set to `mmpose`, and all registries will build modules from mmpose's registry node. To understand more about the registry, please refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True. |
159,231 | from mmengine.utils import get_git_hash
from mmengine.utils.dl_utils import collect_env as collect_base_env
import mmpose
def collect_env():
env_info = collect_base_env()
env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7))
return env_info | null |
159,232 | from mmpose.utils.typing import ConfigDict
The provided code snippet includes necessary dependencies for implementing the `adapt_mmdet_pipeline` function. Write a Python function `def adapt_mmdet_pipeline(cfg: ConfigDict) -> ConfigDict` to solve the following problem:
Converts pipeline types in MMDetection's test dataloader to use the 'mmdet' namespace. Args: cfg (ConfigDict): Configuration dictionary for MMDetection. Returns: ConfigDict: Configuration dictionary with updated pipeline types.
Here is the function:
def adapt_mmdet_pipeline(cfg: ConfigDict) -> ConfigDict:
"""Converts pipeline types in MMDetection's test dataloader to use the
'mmdet' namespace.
Args:
cfg (ConfigDict): Configuration dictionary for MMDetection.
Returns:
ConfigDict: Configuration dictionary with updated pipeline types.
"""
# use lazy import to avoid hard dependence on mmdet
from mmdet.datasets import transforms
if 'test_dataloader' not in cfg:
return cfg
pipeline = cfg.test_dataloader.dataset.pipeline
for trans in pipeline:
if trans['type'] in dir(transforms):
trans['type'] = 'mmdet.' + trans['type']
return cfg | Converts pipeline types in MMDetection's test dataloader to use the 'mmdet' namespace. Args: cfg (ConfigDict): Configuration dictionary for MMDetection. Returns: ConfigDict: Configuration dictionary with updated pipeline types. |
159,234 | from typing import Any, Optional, Sequence, Union
import numpy as np
import torch
from mmengine.utils import is_seq_of
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `to_numpy` function. Write a Python function `def to_numpy(x: Union[Tensor, Sequence[Tensor]], return_device: bool = False, unzip: bool = False) -> Union[np.ndarray, tuple]` to solve the following problem:
Convert torch tensor to numpy.ndarray. Args: x (Tensor | Sequence[Tensor]): A single tensor or a sequence of tensors return_device (bool): Whether return the tensor device. Defaults to ``False`` unzip (bool): Whether unzip the input sequence. Defaults to ``False`` Returns: np.ndarray | tuple: If ``return_device`` is ``True``, return a tuple of converted numpy array(s) and the device indicator; otherwise only return the numpy array(s)
Here is the function:
def to_numpy(x: Union[Tensor, Sequence[Tensor]],
return_device: bool = False,
unzip: bool = False) -> Union[np.ndarray, tuple]:
"""Convert torch tensor to numpy.ndarray.
Args:
x (Tensor | Sequence[Tensor]): A single tensor or a sequence of
tensors
return_device (bool): Whether return the tensor device. Defaults to
``False``
unzip (bool): Whether unzip the input sequence. Defaults to ``False``
Returns:
np.ndarray | tuple: If ``return_device`` is ``True``, return a tuple
of converted numpy array(s) and the device indicator; otherwise only
return the numpy array(s)
"""
if isinstance(x, Tensor):
arrays = x.detach().cpu().numpy()
device = x.device
elif is_seq_of(x, Tensor):
if unzip:
# convert (A, B) -> [(A[0], B[0]), (A[1], B[1]), ...]
arrays = [
tuple(to_numpy(_x[None, :]) for _x in _each)
for _each in zip(*x)
]
else:
arrays = [to_numpy(_x) for _x in x]
device = x[0].device
else:
raise ValueError(f'Invalid input type {type(x)}')
if return_device:
return arrays, device
else:
return arrays | Convert torch tensor to numpy.ndarray. Args: x (Tensor | Sequence[Tensor]): A single tensor or a sequence of tensors return_device (bool): Whether return the tensor device. Defaults to ``False`` unzip (bool): Whether unzip the input sequence. Defaults to ``False`` Returns: np.ndarray | tuple: If ``return_device`` is ``True``, return a tuple of converted numpy array(s) and the device indicator; otherwise only return the numpy array(s) |
159,235 | from typing import Any, Optional, Sequence, Union
import numpy as np
import torch
from mmengine.utils import is_seq_of
from torch import Tensor
try:
import torch
except ImportError:
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
'digit_version', 'get_git_hash', 'import_modules_from_strings',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
else:
from .env import collect_env
from .logging import get_logger, print_log
from .parrots_jit import jit, skip_no_elena
from .parrots_wrapper import (
TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
_MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
from .registry import Registry, build_from_cfg
from .trace import is_jit_tracing
__all__ = [
'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
'symlink', 'scandir', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Registry',
'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
'_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
'_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
'deprecated_api_warning', 'digit_version', 'get_git_hash',
'import_modules_from_strings', 'jit', 'skip_no_elena',
'assert_dict_contains_subset', 'assert_attrs_equal',
'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
'assert_params_all_zeros', 'check_python_script',
'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
'_get_cuda_home', 'has_method'
]
The provided code snippet includes necessary dependencies for implementing the `to_tensor` function. Write a Python function `def to_tensor(x: Union[np.ndarray, Sequence[np.ndarray]], device: Optional[Any] = None) -> Union[Tensor, Sequence[Tensor]]` to solve the following problem:
Convert numpy.ndarray to torch tensor. Args: x (np.ndarray | Sequence[np.ndarray]): A single np.ndarray or a sequence of tensors tensor (Any, optional): The device indicator. Defaults to ``None`` Returns: tuple: - Tensor | Sequence[Tensor]: The converted Tensor or Tensor sequence
Here is the function:
def to_tensor(x: Union[np.ndarray, Sequence[np.ndarray]],
device: Optional[Any] = None) -> Union[Tensor, Sequence[Tensor]]:
"""Convert numpy.ndarray to torch tensor.
Args:
x (np.ndarray | Sequence[np.ndarray]): A single np.ndarray or a
sequence of tensors
tensor (Any, optional): The device indicator. Defaults to ``None``
Returns:
tuple:
- Tensor | Sequence[Tensor]: The converted Tensor or Tensor sequence
"""
if isinstance(x, np.ndarray):
return torch.tensor(x, device=device)
elif is_seq_of(x, np.ndarray):
return [to_tensor(_x, device=device) for _x in x]
else:
raise ValueError(f'Invalid input type {type(x)}') | Convert numpy.ndarray to torch tensor. Args: x (np.ndarray | Sequence[np.ndarray]): A single np.ndarray or a sequence of tensors tensor (Any, optional): The device indicator. Defaults to ``None`` Returns: tuple: - Tensor | Sequence[Tensor]: The converted Tensor or Tensor sequence |
159,236 | import logging
from mmengine.logging import MMLogger
import logging
The provided code snippet includes necessary dependencies for implementing the `get_root_logger` function. Write a Python function `def get_root_logger(log_file=None, log_level=logging.INFO)` to solve the following problem:
Use `MMLogger` class in mmengine to get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmpose". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger.
Here is the function:
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Use `MMLogger` class in mmengine to get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmpose".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
return MMLogger('MMLogger', __name__.split('.')[0], log_file, log_level) | Use `MMLogger` class in mmengine to get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmpose". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger. |
159,237 | import os
import argparse
from multiprocessing import Pool
def findAllFile(base):
file_path = []
for root, ds, fs in os.walk(base):
for f in fs:
fullname = os.path.join(root, f)
file_path.append(fullname)
return file_path | null |
159,238 | import os
import argparse
from multiprocessing import Pool
def convert(video_path):
video_name = video_path.split('/')[-1]
image_path = video_path.replace(video_name, video_name.split('.')[0])
image_path = image_path.replace('/videos/', '/images/')
os.makedirs(image_path, exist_ok=True)
os.system(f'ffmpeg -i {video_path} -f image2 -r 30 -b:v 5626k {image_path}/%06d.png') | null |
159,239 | import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
def parse_args():
parser = argparse.ArgumentParser(description='Train a pose model')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='whether to auto scale the learning rate according to the '
'actual batch size and the original batch size.')
parser.add_argument(
'--show-dir',
help='directory where the visualization images will be saved.')
parser.add_argument(
'--show',
action='store_true',
help='whether to display the prediction results in a window.')
parser.add_argument(
'--interval',
type=int,
default=1,
help='visualize per interval samples.')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='display time of every window. (second)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.