Upload 3 files
Browse files
comfyui-teskors-utils-main/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# comfyui-teskors-utils
|
| 2 |
+
|
| 3 |
+
TSPoseDataSmoother — DWPose temporal smoothing node for ComfyUI.
|
| 4 |
+
Recreated from original teskor-hub/comfyui-teskors-utils (deleted).
|
| 5 |
+
|
| 6 |
+
## TSPoseDataSmoother
|
| 7 |
+
Smooths pose data using temporal EMA filtering for Wan2.2 Animate.
|
| 8 |
+
|
| 9 |
+
Install: clone to ComfyUI/custom_nodes/comfyui-teskors-utils
|
comfyui-teskors-utils-main/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
| 2 |
+
|
| 3 |
+
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
|
comfyui-teskors-utils-main/nodes.py
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TSPoseDataSmoother — DWPose temporal smoothing and rendering node.
|
| 3 |
+
Recreated from original comfyui-teskors-utils by teskor-hub.
|
| 4 |
+
|
| 5 |
+
This node takes POSEDATA from PoseAndFaceDetection, applies exponential
|
| 6 |
+
moving average smoothing across frames, filters out extra people,
|
| 7 |
+
and outputs smoothed pose images and data.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import copy
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
from comfy.utils import ProgressBar
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _get_keypoints_array(meta, key):
|
| 21 |
+
"""Extract keypoints array from an AAPoseMeta or dict-based meta."""
|
| 22 |
+
if hasattr(meta, key):
|
| 23 |
+
kp = getattr(meta, key)
|
| 24 |
+
elif isinstance(meta, dict) and key in meta:
|
| 25 |
+
kp = meta[key]
|
| 26 |
+
else:
|
| 27 |
+
return None
|
| 28 |
+
if kp is None:
|
| 29 |
+
return None
|
| 30 |
+
if isinstance(kp, np.ndarray):
|
| 31 |
+
return kp.copy()
|
| 32 |
+
return np.array(kp, dtype=np.float32)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _set_keypoints_array(meta, key, value):
|
| 36 |
+
"""Set keypoints array back into meta."""
|
| 37 |
+
if hasattr(meta, key):
|
| 38 |
+
setattr(meta, key, value)
|
| 39 |
+
elif isinstance(meta, dict):
|
| 40 |
+
meta[key] = value
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _ema_smooth(prev_kp, curr_kp, alpha, conf_thresh):
|
| 44 |
+
"""
|
| 45 |
+
Apply exponential moving average smoothing.
|
| 46 |
+
Only smooth keypoints that have confidence above threshold.
|
| 47 |
+
|
| 48 |
+
prev_kp, curr_kp: numpy arrays of shape (N, 3) with [x, y, confidence]
|
| 49 |
+
alpha: smoothing factor (0-1), higher = more smoothing from current frame
|
| 50 |
+
conf_thresh: minimum confidence for a keypoint to be considered valid
|
| 51 |
+
"""
|
| 52 |
+
if prev_kp is None or curr_kp is None:
|
| 53 |
+
return curr_kp
|
| 54 |
+
if prev_kp.shape != curr_kp.shape:
|
| 55 |
+
return curr_kp
|
| 56 |
+
|
| 57 |
+
smoothed = curr_kp.copy()
|
| 58 |
+
n_points = min(prev_kp.shape[0], curr_kp.shape[0])
|
| 59 |
+
|
| 60 |
+
for i in range(n_points):
|
| 61 |
+
# Only smooth if both previous and current have sufficient confidence
|
| 62 |
+
prev_conf = prev_kp[i, 2] if prev_kp.shape[1] > 2 else 1.0
|
| 63 |
+
curr_conf = curr_kp[i, 2] if curr_kp.shape[1] > 2 else 1.0
|
| 64 |
+
|
| 65 |
+
if prev_conf >= conf_thresh and curr_conf >= conf_thresh:
|
| 66 |
+
# EMA: smoothed = alpha * current + (1 - alpha) * previous
|
| 67 |
+
smoothed[i, :2] = alpha * curr_kp[i, :2] + (1 - alpha) * prev_kp[i, :2]
|
| 68 |
+
# If current frame confidence is too low, keep current (don't hallucinate)
|
| 69 |
+
|
| 70 |
+
return smoothed
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _filter_to_primary_person(pose_metas, min_run_frames):
|
| 74 |
+
"""
|
| 75 |
+
When multiple people are detected, keep only the most prominent one.
|
| 76 |
+
Identifies the primary person based on bbox area and continuous presence.
|
| 77 |
+
Returns the filtered metas (list of same type).
|
| 78 |
+
"""
|
| 79 |
+
# For the WanAnimate pipeline, PoseAndFaceDetection already returns
|
| 80 |
+
# single-person results per frame, so filtering is mainly about
|
| 81 |
+
# ensuring continuity and removing spurious detections.
|
| 82 |
+
# We just pass through as-is since the detector handles this.
|
| 83 |
+
return pose_metas
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _smooth_pose_sequence(pose_metas, smooth_alpha, gap_frames, min_run_frames,
|
| 87 |
+
conf_thresh_body, conf_thresh_hands, filter_extra_people):
|
| 88 |
+
"""
|
| 89 |
+
Apply temporal smoothing to a sequence of pose meta data.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
pose_metas: list of AAPoseMeta objects or dicts
|
| 93 |
+
smooth_alpha: EMA blending factor (higher = favor current frame more)
|
| 94 |
+
gap_frames: max gap to interpolate across
|
| 95 |
+
min_run_frames: minimum consecutive frames for a valid detection run
|
| 96 |
+
conf_thresh_body: confidence threshold for body keypoints
|
| 97 |
+
conf_thresh_hands: confidence threshold for hand keypoints
|
| 98 |
+
filter_extra_people: whether to filter to single person
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
list of smoothed pose metas (deep copies)
|
| 102 |
+
"""
|
| 103 |
+
if not pose_metas:
|
| 104 |
+
return pose_metas
|
| 105 |
+
|
| 106 |
+
# Deep copy to avoid modifying originals
|
| 107 |
+
smoothed_metas = []
|
| 108 |
+
for meta in pose_metas:
|
| 109 |
+
smoothed_metas.append(copy.deepcopy(meta))
|
| 110 |
+
|
| 111 |
+
if filter_extra_people:
|
| 112 |
+
smoothed_metas = _filter_to_primary_person(smoothed_metas, min_run_frames)
|
| 113 |
+
|
| 114 |
+
# Apply EMA smoothing across frames
|
| 115 |
+
body_keys = ['keypoints_body']
|
| 116 |
+
hand_keys = ['keypoints_lhand', 'keypoints_rhand']
|
| 117 |
+
face_keys = ['keypoints_face']
|
| 118 |
+
|
| 119 |
+
prev_body = None
|
| 120 |
+
prev_lhand = None
|
| 121 |
+
prev_rhand = None
|
| 122 |
+
|
| 123 |
+
gap_counter = 0
|
| 124 |
+
|
| 125 |
+
for i, meta in enumerate(smoothed_metas):
|
| 126 |
+
# Body smoothing
|
| 127 |
+
curr_body = _get_keypoints_array(meta, 'keypoints_body')
|
| 128 |
+
if curr_body is not None:
|
| 129 |
+
if prev_body is not None and gap_counter <= gap_frames:
|
| 130 |
+
smoothed_body = _ema_smooth(prev_body, curr_body, smooth_alpha, conf_thresh_body)
|
| 131 |
+
_set_keypoints_array(meta, 'keypoints_body', smoothed_body)
|
| 132 |
+
prev_body = smoothed_body
|
| 133 |
+
else:
|
| 134 |
+
prev_body = curr_body
|
| 135 |
+
gap_counter = 0
|
| 136 |
+
else:
|
| 137 |
+
gap_counter += 1
|
| 138 |
+
|
| 139 |
+
# Hand smoothing (left)
|
| 140 |
+
curr_lhand = _get_keypoints_array(meta, 'keypoints_lhand')
|
| 141 |
+
if curr_lhand is not None and prev_lhand is not None:
|
| 142 |
+
smoothed_lhand = _ema_smooth(prev_lhand, curr_lhand, smooth_alpha, conf_thresh_hands)
|
| 143 |
+
_set_keypoints_array(meta, 'keypoints_lhand', smoothed_lhand)
|
| 144 |
+
prev_lhand = smoothed_lhand
|
| 145 |
+
elif curr_lhand is not None:
|
| 146 |
+
prev_lhand = curr_lhand
|
| 147 |
+
|
| 148 |
+
# Hand smoothing (right)
|
| 149 |
+
curr_rhand = _get_keypoints_array(meta, 'keypoints_rhand')
|
| 150 |
+
if curr_rhand is not None and prev_rhand is not None:
|
| 151 |
+
smoothed_rhand = _ema_smooth(prev_rhand, curr_rhand, smooth_alpha, conf_thresh_hands)
|
| 152 |
+
_set_keypoints_array(meta, 'keypoints_rhand', smoothed_rhand)
|
| 153 |
+
prev_rhand = smoothed_rhand
|
| 154 |
+
elif curr_rhand is not None:
|
| 155 |
+
prev_rhand = curr_rhand
|
| 156 |
+
|
| 157 |
+
return smoothed_metas
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class TSPoseDataSmoother:
|
| 161 |
+
"""
|
| 162 |
+
Smooths pose data across video frames using temporal EMA filtering.
|
| 163 |
+
Reduces jitter/trembling in detected poses for smoother animation.
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
@classmethod
|
| 167 |
+
def INPUT_TYPES(s):
|
| 168 |
+
return {
|
| 169 |
+
"required": {
|
| 170 |
+
"pose_data": ("POSEDATA",),
|
| 171 |
+
"filter_extra_people": ("BOOLEAN", {
|
| 172 |
+
"default": True,
|
| 173 |
+
"tooltip": "Filter to keep only the primary detected person"
|
| 174 |
+
}),
|
| 175 |
+
"smooth_alpha": ("FLOAT", {
|
| 176 |
+
"default": 0.70,
|
| 177 |
+
"min": 0.0,
|
| 178 |
+
"max": 1.0,
|
| 179 |
+
"step": 0.01,
|
| 180 |
+
"tooltip": "EMA smoothing factor. Higher = more weight on current frame (less smoothing). Lower = more weight on previous frames (more smoothing)."
|
| 181 |
+
}),
|
| 182 |
+
"gap_frames": ("INT", {
|
| 183 |
+
"default": 12,
|
| 184 |
+
"min": 0,
|
| 185 |
+
"max": 120,
|
| 186 |
+
"step": 1,
|
| 187 |
+
"tooltip": "Maximum gap (in frames) to bridge when a detection is temporarily lost."
|
| 188 |
+
}),
|
| 189 |
+
"min_run_frames": ("INT", {
|
| 190 |
+
"default": 2,
|
| 191 |
+
"min": 1,
|
| 192 |
+
"max": 30,
|
| 193 |
+
"step": 1,
|
| 194 |
+
"tooltip": "Minimum consecutive frames a person must be detected to be considered valid."
|
| 195 |
+
}),
|
| 196 |
+
"conf_thresh_body": ("FLOAT", {
|
| 197 |
+
"default": 0.20,
|
| 198 |
+
"min": 0.0,
|
| 199 |
+
"max": 1.0,
|
| 200 |
+
"step": 0.01,
|
| 201 |
+
"tooltip": "Minimum confidence threshold for body keypoints to be smoothed."
|
| 202 |
+
}),
|
| 203 |
+
"conf_thresh_hands": ("FLOAT", {
|
| 204 |
+
"default": 0.50,
|
| 205 |
+
"min": 0.0,
|
| 206 |
+
"max": 1.0,
|
| 207 |
+
"step": 0.01,
|
| 208 |
+
"tooltip": "Minimum confidence threshold for hand keypoints to be smoothed."
|
| 209 |
+
}),
|
| 210 |
+
},
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
RETURN_TYPES = ("IMAGE", "POSEDATA")
|
| 214 |
+
RETURN_NAMES = ("IMAGE", "pose_data")
|
| 215 |
+
FUNCTION = "smooth"
|
| 216 |
+
CATEGORY = "WanAnimatePreprocess"
|
| 217 |
+
DESCRIPTION = "Smooths pose data across video frames using temporal EMA filtering to reduce jitter in detected poses."
|
| 218 |
+
|
| 219 |
+
def smooth(self, pose_data, filter_extra_people, smooth_alpha, gap_frames,
|
| 220 |
+
min_run_frames, conf_thresh_body, conf_thresh_hands):
|
| 221 |
+
|
| 222 |
+
pose_metas = pose_data.get("pose_metas", [])
|
| 223 |
+
pose_metas_original = pose_data.get("pose_metas_original", [])
|
| 224 |
+
|
| 225 |
+
if not pose_metas:
|
| 226 |
+
logger.warning("TSPoseDataSmoother: No pose_metas found in pose_data")
|
| 227 |
+
return (torch.zeros(1, 64, 64, 3), pose_data)
|
| 228 |
+
|
| 229 |
+
# Get dimensions from the first meta
|
| 230 |
+
first_meta = pose_metas_original[0] if pose_metas_original else pose_metas[0]
|
| 231 |
+
if hasattr(first_meta, 'width'):
|
| 232 |
+
width = first_meta.width if hasattr(first_meta, 'width') else first_meta.get('width', 512)
|
| 233 |
+
height = first_meta.height if hasattr(first_meta, 'height') else first_meta.get('height', 512)
|
| 234 |
+
elif isinstance(first_meta, dict):
|
| 235 |
+
width = first_meta.get('width', 512)
|
| 236 |
+
height = first_meta.get('height', 512)
|
| 237 |
+
else:
|
| 238 |
+
width = 512
|
| 239 |
+
height = 512
|
| 240 |
+
|
| 241 |
+
# Apply smoothing to the pose metas
|
| 242 |
+
smoothed_metas = _smooth_pose_sequence(
|
| 243 |
+
pose_metas,
|
| 244 |
+
smooth_alpha=smooth_alpha,
|
| 245 |
+
gap_frames=gap_frames,
|
| 246 |
+
min_run_frames=min_run_frames,
|
| 247 |
+
conf_thresh_body=conf_thresh_body,
|
| 248 |
+
conf_thresh_hands=conf_thresh_hands,
|
| 249 |
+
filter_extra_people=filter_extra_people,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Render smoothed pose images using the same drawing function
|
| 253 |
+
# as ComfyUI-WanAnimatePreprocess's DrawViTPose
|
| 254 |
+
try:
|
| 255 |
+
from ComfyUI_WanAnimatePreprocess_module import draw_aapose_by_meta_new
|
| 256 |
+
except ImportError:
|
| 257 |
+
pass
|
| 258 |
+
|
| 259 |
+
# Try to import the drawing function from the WanAnimatePreprocess package
|
| 260 |
+
draw_fn = None
|
| 261 |
+
try:
|
| 262 |
+
import importlib
|
| 263 |
+
import sys
|
| 264 |
+
# Look for the module in custom_nodes
|
| 265 |
+
import os
|
| 266 |
+
custom_nodes_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 267 |
+
wan_preprocess_dir = os.path.join(custom_nodes_dir, "ComfyUI-WanAnimatePreprocess")
|
| 268 |
+
|
| 269 |
+
if os.path.exists(wan_preprocess_dir):
|
| 270 |
+
sys.path.insert(0, wan_preprocess_dir)
|
| 271 |
+
from pose_utils.human_visualization import draw_aapose_by_meta_new
|
| 272 |
+
from utils import padding_resize
|
| 273 |
+
draw_fn = draw_aapose_by_meta_new
|
| 274 |
+
sys.path.pop(0)
|
| 275 |
+
except ImportError as e:
|
| 276 |
+
logger.warning(f"TSPoseDataSmoother: Could not import drawing functions: {e}")
|
| 277 |
+
|
| 278 |
+
comfy_pbar = ProgressBar(len(smoothed_metas))
|
| 279 |
+
pose_images = []
|
| 280 |
+
|
| 281 |
+
for idx, meta in enumerate(smoothed_metas):
|
| 282 |
+
canvas = np.zeros((height, width, 3), dtype=np.uint8)
|
| 283 |
+
|
| 284 |
+
if draw_fn is not None:
|
| 285 |
+
try:
|
| 286 |
+
pose_image = draw_fn(canvas, meta, draw_hand=True, draw_head=True)
|
| 287 |
+
# Apply padding/resize to match target dimensions
|
| 288 |
+
try:
|
| 289 |
+
pose_image = padding_resize(pose_image, height, width)
|
| 290 |
+
except Exception:
|
| 291 |
+
pass
|
| 292 |
+
except Exception as e:
|
| 293 |
+
logger.warning(f"TSPoseDataSmoother: Drawing failed on frame {idx}: {e}")
|
| 294 |
+
pose_image = canvas
|
| 295 |
+
else:
|
| 296 |
+
# Fallback: simple keypoint rendering
|
| 297 |
+
pose_image = _fallback_draw_pose(canvas, meta, height, width)
|
| 298 |
+
|
| 299 |
+
pose_images.append(pose_image)
|
| 300 |
+
if (idx + 1) % 10 == 0:
|
| 301 |
+
comfy_pbar.update_absolute(idx + 1)
|
| 302 |
+
|
| 303 |
+
comfy_pbar.update_absolute(len(smoothed_metas))
|
| 304 |
+
|
| 305 |
+
pose_images_np = np.stack(pose_images, 0)
|
| 306 |
+
pose_images_tensor = torch.from_numpy(pose_images_np).float() / 255.0
|
| 307 |
+
|
| 308 |
+
# Build output pose_data with smoothed metas
|
| 309 |
+
smoothed_pose_data = dict(pose_data)
|
| 310 |
+
smoothed_pose_data["pose_metas"] = smoothed_metas
|
| 311 |
+
|
| 312 |
+
return (pose_images_tensor, smoothed_pose_data)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def _fallback_draw_pose(canvas, meta, height, width):
|
| 316 |
+
"""
|
| 317 |
+
Simple fallback pose renderer when ComfyUI-WanAnimatePreprocess
|
| 318 |
+
drawing functions are not available.
|
| 319 |
+
"""
|
| 320 |
+
import cv2
|
| 321 |
+
|
| 322 |
+
kp_body = _get_keypoints_array(meta, 'keypoints_body')
|
| 323 |
+
if kp_body is None:
|
| 324 |
+
return canvas
|
| 325 |
+
|
| 326 |
+
# COCO-WholeBody skeleton connections for body
|
| 327 |
+
body_connections = [
|
| 328 |
+
(0, 1), (0, 2), (1, 3), (2, 4), # head
|
| 329 |
+
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10), # arms
|
| 330 |
+
(5, 11), (6, 12), (11, 12), # torso
|
| 331 |
+
(11, 13), (13, 15), (12, 14), (14, 16), # legs
|
| 332 |
+
]
|
| 333 |
+
|
| 334 |
+
# Scale keypoints to canvas size
|
| 335 |
+
for conn in body_connections:
|
| 336 |
+
i, j = conn
|
| 337 |
+
if i < len(kp_body) and j < len(kp_body):
|
| 338 |
+
x1 = int(kp_body[i][0] * width) if kp_body[i][0] <= 1.0 else int(kp_body[i][0])
|
| 339 |
+
y1 = int(kp_body[i][1] * height) if kp_body[i][1] <= 1.0 else int(kp_body[i][1])
|
| 340 |
+
x2 = int(kp_body[j][0] * width) if kp_body[j][0] <= 1.0 else int(kp_body[j][0])
|
| 341 |
+
y2 = int(kp_body[j][1] * height) if kp_body[j][1] <= 1.0 else int(kp_body[j][1])
|
| 342 |
+
|
| 343 |
+
conf1 = kp_body[i][2] if kp_body.shape[1] > 2 else 1.0
|
| 344 |
+
conf2 = kp_body[j][2] if kp_body.shape[1] > 2 else 1.0
|
| 345 |
+
|
| 346 |
+
if conf1 > 0.1 and conf2 > 0.1:
|
| 347 |
+
cv2.line(canvas, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
| 348 |
+
|
| 349 |
+
# Draw keypoints as circles
|
| 350 |
+
for i in range(min(len(kp_body), 17)):
|
| 351 |
+
x = int(kp_body[i][0] * width) if kp_body[i][0] <= 1.0 else int(kp_body[i][0])
|
| 352 |
+
y = int(kp_body[i][1] * height) if kp_body[i][1] <= 1.0 else int(kp_body[i][1])
|
| 353 |
+
conf = kp_body[i][2] if kp_body.shape[1] > 2 else 1.0
|
| 354 |
+
if conf > 0.1:
|
| 355 |
+
cv2.circle(canvas, (x, y), 3, (0, 0, 255), -1)
|
| 356 |
+
|
| 357 |
+
return canvas
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# Node registration
|
| 361 |
+
NODE_CLASS_MAPPINGS = {
|
| 362 |
+
"TSPoseDataSmoother": TSPoseDataSmoother,
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 366 |
+
"TSPoseDataSmoother": "TS Pose Data Smoother",
|
| 367 |
+
}
|