sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
Comfy-Org/ComfyUI:comfy_extras/nodes_resolution.py
from __future__ import annotations import math from enum import Enum from typing_extensions import override from comfy_api.latest import ComfyExtension, io class AspectRatio(str, Enum): SQUARE = "1:1 (Square)" PHOTO_H = "3:2 (Photo)" STANDARD_H = "4:3 (Standard)" WIDESCREEN_H = "16:9 (Widescreen)" ULTRAWIDE_H = "21:9 (Ultrawide)" PHOTO_V = "2:3 (Portrait Photo)" STANDARD_V = "3:4 (Portrait Standard)" WIDESCREEN_V = "9:16 (Portrait Widescreen)" ASPECT_RATIOS: dict[AspectRatio, tuple[int, int]] = { AspectRatio.SQUARE: (1, 1), AspectRatio.PHOTO_H: (3, 2), AspectRatio.STANDARD_H: (4, 3), AspectRatio.WIDESCREEN_H: (16, 9), AspectRatio.ULTRAWIDE_H: (21, 9), AspectRatio.PHOTO_V: (2, 3), AspectRatio.STANDARD_V: (3, 4), AspectRatio.WIDESCREEN_V: (9, 16), } class ResolutionSelector(io.ComfyNode): """Calculate width and height from aspect ratio and megapixel target.""" @classmethod def define_schema(cls): return io.Schema( node_id="ResolutionSelector", display_name="Resolution Selector", category="utils", description="Calculate width and height from aspect ratio and megapixel target. Useful for setting up Empty Latent Image dimensions.", inputs=[ io.Combo.Input( "aspect_ratio", options=AspectRatio, default=AspectRatio.SQUARE, tooltip="The aspect ratio for the output dimensions.", ), io.Float.Input( "megapixels", default=1.0, min=0.1, max=16.0, step=0.1, tooltip="Target total megapixels. 1.0 MP ≈ 1024×1024 for square.", ), ], outputs=[ io.Int.Output( "width", tooltip="Calculated width in pixels (multiple of 8)." ), io.Int.Output( "height", tooltip="Calculated height in pixels (multiple of 8)." ), ], ) @classmethod def execute(cls, aspect_ratio: str, megapixels: float) -> io.NodeOutput: w_ratio, h_ratio = ASPECT_RATIOS[aspect_ratio] total_pixels = megapixels * 1024 * 1024 scale = math.sqrt(total_pixels / (w_ratio * h_ratio)) width = round(w_ratio * scale / 8) * 8 height = round(h_ratio * scale / 8) * 8 return io.NodeOutput(width, height) class ResolutionExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ ResolutionSelector, ] async def comfy_entrypoint() -> ResolutionExtension: return ResolutionExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_resolution.py", "license": "GNU General Public License v3.0", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/ldm/modules/sdpose.py
import torch import numpy as np from scipy.ndimage import gaussian_filter class HeatmapHead(torch.nn.Module): def __init__( self, in_channels=640, out_channels=133, input_size=(768, 1024), heatmap_scale=4, deconv_out_channels=(640,), deconv_kernel_sizes=(4,), conv_out_channels=(640,), conv_kernel_sizes=(1,), final_layer_kernel_size=1, device=None, dtype=None, operations=None ): super().__init__() self.heatmap_size = (input_size[0] // heatmap_scale, input_size[1] // heatmap_scale) self.scale_factor = ((np.array(input_size) - 1) / (np.array(self.heatmap_size) - 1)).astype(np.float32) # Deconv layers if deconv_out_channels: deconv_layers = [] for out_ch, kernel_size in zip(deconv_out_channels, deconv_kernel_sizes): if kernel_size == 4: padding, output_padding = 1, 0 elif kernel_size == 3: padding, output_padding = 1, 1 elif kernel_size == 2: padding, output_padding = 0, 0 else: raise ValueError(f'Unsupported kernel size {kernel_size}') deconv_layers.extend([ operations.ConvTranspose2d(in_channels, out_ch, kernel_size, stride=2, padding=padding, output_padding=output_padding, bias=False, device=device, dtype=dtype), torch.nn.InstanceNorm2d(out_ch, device=device, dtype=dtype), torch.nn.SiLU(inplace=True) ]) in_channels = out_ch self.deconv_layers = torch.nn.Sequential(*deconv_layers) else: self.deconv_layers = torch.nn.Identity() # Conv layers if conv_out_channels: conv_layers = [] for out_ch, kernel_size in zip(conv_out_channels, conv_kernel_sizes): padding = (kernel_size - 1) // 2 conv_layers.extend([ operations.Conv2d(in_channels, out_ch, kernel_size, stride=1, padding=padding, device=device, dtype=dtype), torch.nn.InstanceNorm2d(out_ch, device=device, dtype=dtype), torch.nn.SiLU(inplace=True) ]) in_channels = out_ch self.conv_layers = torch.nn.Sequential(*conv_layers) else: self.conv_layers = torch.nn.Identity() self.final_layer = operations.Conv2d(in_channels, out_channels, kernel_size=final_layer_kernel_size, padding=final_layer_kernel_size // 2, device=device, dtype=dtype) def forward(self, x): # Decode heatmaps to keypoints heatmaps = self.final_layer(self.conv_layers(self.deconv_layers(x))) heatmaps_np = heatmaps.float().cpu().numpy() # (B, K, H, W) B, K, H, W = heatmaps_np.shape batch_keypoints = [] batch_scores = [] for b in range(B): hm = heatmaps_np[b].copy() # (K, H, W) # --- vectorised argmax --- flat = hm.reshape(K, -1) idx = np.argmax(flat, axis=1) scores = flat[np.arange(K), idx].copy() y_locs, x_locs = np.unravel_index(idx, (H, W)) keypoints = np.stack([x_locs, y_locs], axis=-1).astype(np.float32) # (K, 2) in heatmap space invalid = scores <= 0. keypoints[invalid] = -1 # --- DARK sub-pixel refinement (UDP) --- # 1. Gaussian blur with max-preserving normalisation border = 5 # (kernel-1)//2 for kernel=11 for k in range(K): origin_max = np.max(hm[k]) dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32) dr[border:-border, border:-border] = hm[k].copy() dr = gaussian_filter(dr, sigma=2.0) hm[k] = dr[border:-border, border:-border].copy() cur_max = np.max(hm[k]) if cur_max > 0: hm[k] *= origin_max / cur_max # 2. Log-space for Taylor expansion np.clip(hm, 1e-3, 50., hm) np.log(hm, hm) # 3. Hessian-based Newton step hm_pad = np.pad(hm, ((0, 0), (1, 1), (1, 1)), mode='edge').flatten() index = keypoints[:, 0] + 1 + (keypoints[:, 1] + 1) * (W + 2) index += (W + 2) * (H + 2) * np.arange(0, K) index = index.astype(int).reshape(-1, 1) i_ = hm_pad[index] ix1 = hm_pad[index + 1] iy1 = hm_pad[index + W + 2] ix1y1 = hm_pad[index + W + 3] ix1_y1_ = hm_pad[index - W - 3] ix1_ = hm_pad[index - 1] iy1_ = hm_pad[index - 2 - W] dx = 0.5 * (ix1 - ix1_) dy = 0.5 * (iy1 - iy1_) derivative = np.concatenate([dx, dy], axis=1).reshape(K, 2, 1) dxx = ix1 - 2 * i_ + ix1_ dyy = iy1 - 2 * i_ + iy1_ dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_) hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1).reshape(K, 2, 2) hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2)) keypoints -= np.einsum('imn,ink->imk', hessian, derivative).squeeze(axis=-1) # --- restore to input image space --- keypoints = keypoints * self.scale_factor keypoints[invalid] = -1 batch_keypoints.append(keypoints) batch_scores.append(scores) return batch_keypoints, batch_scores
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/modules/sdpose.py", "license": "GNU General Public License v3.0", "lines": 116, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_sdpose.py
import torch import comfy.utils import numpy as np import math import colorsys from tqdm import tqdm from typing_extensions import override from comfy_api.latest import ComfyExtension, io from comfy_extras.nodes_lotus import LotusConditioning def _preprocess_keypoints(kp_raw, sc_raw): """Insert neck keypoint and remap from MMPose to OpenPose ordering. Returns (kp, sc) where kp has shape (134, 2) and sc has shape (134,). Layout: 0-17 body (18 kp, OpenPose order) 18-23 feet (6 kp) 24-91 face (68 kp) 92-112 right hand (21 kp) 113-133 left hand (21 kp) """ kp = np.array(kp_raw, dtype=np.float32) sc = np.array(sc_raw, dtype=np.float32) if len(kp) >= 17: neck = (kp[5] + kp[6]) / 2 neck_score = min(sc[5], sc[6]) if sc[5] > 0.3 and sc[6] > 0.3 else 0 kp = np.insert(kp, 17, neck, axis=0) sc = np.insert(sc, 17, neck_score) mmpose_idx = np.array([17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3]) openpose_idx = np.array([ 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]) tmp_kp, tmp_sc = kp.copy(), sc.copy() tmp_kp[openpose_idx] = kp[mmpose_idx] tmp_sc[openpose_idx] = sc[mmpose_idx] kp, sc = tmp_kp, tmp_sc return kp, sc def _to_openpose_frames(all_keypoints, all_scores, height, width): """Convert raw keypoint lists to a list of OpenPose-style frame dicts. Each frame dict contains: canvas_width, canvas_height, people: list of person dicts with keys: pose_keypoints_2d - 18 body kp as flat [x,y,score,...] (absolute pixels) foot_keypoints_2d - 6 foot kp as flat [x,y,score,...] (absolute pixels) face_keypoints_2d - 70 face kp as flat [x,y,score,...] (absolute pixels) indices 0-67: 68 face landmarks index 68: right eye (body[14]) index 69: left eye (body[15]) hand_right_keypoints_2d - 21 right-hand kp (absolute pixels) hand_left_keypoints_2d - 21 left-hand kp (absolute pixels) """ def _flatten(kp_slice, sc_slice): return np.stack([kp_slice[:, 0], kp_slice[:, 1], sc_slice], axis=1).flatten().tolist() frames = [] for img_idx in range(len(all_keypoints)): people = [] for kp_raw, sc_raw in zip(all_keypoints[img_idx], all_scores[img_idx]): kp, sc = _preprocess_keypoints(kp_raw, sc_raw) # 70 face kp = 68 face landmarks + REye (body[14]) + LEye (body[15]) face_kp = np.concatenate([kp[24:92], kp[[14, 15]]], axis=0) face_sc = np.concatenate([sc[24:92], sc[[14, 15]]], axis=0) people.append({ "pose_keypoints_2d": _flatten(kp[0:18], sc[0:18]), "foot_keypoints_2d": _flatten(kp[18:24], sc[18:24]), "face_keypoints_2d": _flatten(face_kp, face_sc), "hand_right_keypoints_2d": _flatten(kp[92:113], sc[92:113]), "hand_left_keypoints_2d": _flatten(kp[113:134], sc[113:134]), }) frames.append({"canvas_width": width, "canvas_height": height, "people": people}) return frames class KeypointDraw: """ Pose keypoint drawing class that supports both numpy and cv2 backends. """ def __init__(self): try: import cv2 self.draw = cv2 except ImportError: self.draw = self # Hand connections (same for both hands) self.hand_edges = [ [0, 1], [1, 2], [2, 3], [3, 4], # thumb [0, 5], [5, 6], [6, 7], [7, 8], # index [0, 9], [9, 10], [10, 11], [11, 12], # middle [0, 13], [13, 14], [14, 15], [15, 16], # ring [0, 17], [17, 18], [18, 19], [19, 20], # pinky ] # Body connections - matching DWPose limbSeq (1-indexed, converted to 0-indexed) self.body_limbSeq = [ [2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], [1, 16], [16, 18] ] # Colors matching DWPose self.colors = [ [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85] ] @staticmethod def circle(canvas_np, center, radius, color, **kwargs): """Draw a filled circle using NumPy vectorized operations.""" cx, cy = center h, w = canvas_np.shape[:2] radius_int = int(np.ceil(radius)) y_min, y_max = max(0, cy - radius_int), min(h, cy + radius_int + 1) x_min, x_max = max(0, cx - radius_int), min(w, cx + radius_int + 1) if y_max <= y_min or x_max <= x_min: return y, x = np.ogrid[y_min:y_max, x_min:x_max] mask = (x - cx)**2 + (y - cy)**2 <= radius**2 canvas_np[y_min:y_max, x_min:x_max][mask] = color @staticmethod def line(canvas_np, pt1, pt2, color, thickness=1, **kwargs): """Draw line using Bresenham's algorithm with NumPy operations.""" x0, y0, x1, y1 = *pt1, *pt2 h, w = canvas_np.shape[:2] dx, dy = abs(x1 - x0), abs(y1 - y0) sx, sy = (1 if x0 < x1 else -1), (1 if y0 < y1 else -1) err, x, y, line_points = dx - dy, x0, y0, [] while True: line_points.append((x, y)) if x == x1 and y == y1: break e2 = 2 * err if e2 > -dy: err, x = err - dy, x + sx if e2 < dx: err, y = err + dx, y + sy if thickness > 1: radius, radius_int = (thickness / 2.0) + 0.5, int(np.ceil((thickness / 2.0) + 0.5)) for px, py in line_points: y_min, y_max, x_min, x_max = max(0, py - radius_int), min(h, py + radius_int + 1), max(0, px - radius_int), min(w, px + radius_int + 1) if y_max > y_min and x_max > x_min: yy, xx = np.ogrid[y_min:y_max, x_min:x_max] canvas_np[y_min:y_max, x_min:x_max][(xx - px)**2 + (yy - py)**2 <= radius**2] = color else: line_points = np.array(line_points) valid = (line_points[:, 1] >= 0) & (line_points[:, 1] < h) & (line_points[:, 0] >= 0) & (line_points[:, 0] < w) if (valid_points := line_points[valid]).size: canvas_np[valid_points[:, 1], valid_points[:, 0]] = color @staticmethod def fillConvexPoly(canvas_np, pts, color, **kwargs): """Fill polygon using vectorized scanline algorithm.""" if len(pts) < 3: return pts = np.array(pts, dtype=np.int32) h, w = canvas_np.shape[:2] y_min, y_max, x_min, x_max = max(0, pts[:, 1].min()), min(h, pts[:, 1].max() + 1), max(0, pts[:, 0].min()), min(w, pts[:, 0].max() + 1) if y_max <= y_min or x_max <= x_min: return yy, xx = np.mgrid[y_min:y_max, x_min:x_max] mask = np.zeros((y_max - y_min, x_max - x_min), dtype=bool) for i in range(len(pts)): p1, p2 = pts[i], pts[(i + 1) % len(pts)] y1, y2 = p1[1], p2[1] if y1 == y2: continue if y1 > y2: p1, p2, y1, y2 = p2, p1, p2[1], p1[1] if not (edge_mask := (yy >= y1) & (yy < y2)).any(): continue mask ^= edge_mask & (xx >= p1[0] + (yy - y1) * (p2[0] - p1[0]) / (y2 - y1)) canvas_np[y_min:y_max, x_min:x_max][mask] = color @staticmethod def ellipse2Poly(center, axes, angle, arc_start, arc_end, delta=1, **kwargs): """Python implementation of cv2.ellipse2Poly.""" axes = (axes[0] + 0.5, axes[1] + 0.5) # to better match cv2 output angle = angle % 360 if arc_start > arc_end: arc_start, arc_end = arc_end, arc_start while arc_start < 0: arc_start, arc_end = arc_start + 360, arc_end + 360 while arc_end > 360: arc_end, arc_start = arc_end - 360, arc_start - 360 if arc_end - arc_start > 360: arc_start, arc_end = 0, 360 angle_rad = math.radians(angle) alpha, beta = math.cos(angle_rad), math.sin(angle_rad) pts = [] for i in range(arc_start, arc_end + delta, delta): theta_rad = math.radians(min(i, arc_end)) x, y = axes[0] * math.cos(theta_rad), axes[1] * math.sin(theta_rad) pts.append([int(round(center[0] + x * alpha - y * beta)), int(round(center[1] + x * beta + y * alpha))]) unique_pts, prev_pt = [], (float('inf'), float('inf')) for pt in pts: if (pt_tuple := tuple(pt)) != prev_pt: unique_pts.append(pt) prev_pt = pt_tuple return unique_pts if len(unique_pts) > 1 else [[center[0], center[1]], [center[0], center[1]]] def draw_wholebody_keypoints(self, canvas, keypoints, scores=None, threshold=0.3, draw_body=True, draw_feet=True, draw_face=True, draw_hands=True, stick_width=4, face_point_size=3): """ Draw wholebody keypoints (134 keypoints after processing) in DWPose style. Expected keypoint format (after neck insertion and remapping): - Body: 0-17 (18 keypoints in OpenPose format, neck at index 1) - Foot: 18-23 (6 keypoints) - Face: 24-91 (68 landmarks) - Right hand: 92-112 (21 keypoints) - Left hand: 113-133 (21 keypoints) Args: canvas: The canvas to draw on (numpy array) keypoints: Array of keypoint coordinates scores: Optional confidence scores for each keypoint threshold: Minimum confidence threshold for drawing keypoints Returns: canvas: The canvas with keypoints drawn """ H, W, C = canvas.shape # Draw body limbs if draw_body and len(keypoints) >= 18: for i, limb in enumerate(self.body_limbSeq): # Convert from 1-indexed to 0-indexed idx1, idx2 = limb[0] - 1, limb[1] - 1 if idx1 >= 18 or idx2 >= 18: continue if scores is not None: if scores[idx1] < threshold or scores[idx2] < threshold: continue Y = [keypoints[idx1][0], keypoints[idx2][0]] X = [keypoints[idx1][1], keypoints[idx2][1]] mX, mY = (X[0] + X[1]) / 2, (Y[0] + Y[1]) / 2 length = math.sqrt((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) if length < 1: continue angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) polygon = self.draw.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stick_width), int(angle), 0, 360, 1) self.draw.fillConvexPoly(canvas, polygon, self.colors[i % len(self.colors)]) # Draw body keypoints if draw_body and len(keypoints) >= 18: for i in range(18): if scores is not None and scores[i] < threshold: continue x, y = int(keypoints[i][0]), int(keypoints[i][1]) if 0 <= x < W and 0 <= y < H: self.draw.circle(canvas, (x, y), 4, self.colors[i % len(self.colors)], thickness=-1) # Draw foot keypoints (18-23, 6 keypoints) if draw_feet and len(keypoints) >= 24: for i in range(18, 24): if scores is not None and scores[i] < threshold: continue x, y = int(keypoints[i][0]), int(keypoints[i][1]) if 0 <= x < W and 0 <= y < H: self.draw.circle(canvas, (x, y), 4, self.colors[i % len(self.colors)], thickness=-1) # Draw right hand (92-112) if draw_hands and len(keypoints) >= 113: eps = 0.01 for ie, edge in enumerate(self.hand_edges): idx1, idx2 = 92 + edge[0], 92 + edge[1] if scores is not None: if scores[idx1] < threshold or scores[idx2] < threshold: continue x1, y1 = int(keypoints[idx1][0]), int(keypoints[idx1][1]) x2, y2 = int(keypoints[idx2][0]), int(keypoints[idx2][1]) if x1 > eps and y1 > eps and x2 > eps and y2 > eps: if 0 <= x1 < W and 0 <= y1 < H and 0 <= x2 < W and 0 <= y2 < H: # HSV to RGB conversion for rainbow colors r, g, b = colorsys.hsv_to_rgb(ie / float(len(self.hand_edges)), 1.0, 1.0) color = (int(r * 255), int(g * 255), int(b * 255)) self.draw.line(canvas, (x1, y1), (x2, y2), color, thickness=2) # Draw right hand keypoints for i in range(92, 113): if scores is not None and scores[i] < threshold: continue x, y = int(keypoints[i][0]), int(keypoints[i][1]) if x > eps and y > eps and 0 <= x < W and 0 <= y < H: self.draw.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) # Draw left hand (113-133) if draw_hands and len(keypoints) >= 134: eps = 0.01 for ie, edge in enumerate(self.hand_edges): idx1, idx2 = 113 + edge[0], 113 + edge[1] if scores is not None: if scores[idx1] < threshold or scores[idx2] < threshold: continue x1, y1 = int(keypoints[idx1][0]), int(keypoints[idx1][1]) x2, y2 = int(keypoints[idx2][0]), int(keypoints[idx2][1]) if x1 > eps and y1 > eps and x2 > eps and y2 > eps: if 0 <= x1 < W and 0 <= y1 < H and 0 <= x2 < W and 0 <= y2 < H: # HSV to RGB conversion for rainbow colors r, g, b = colorsys.hsv_to_rgb(ie / float(len(self.hand_edges)), 1.0, 1.0) color = (int(r * 255), int(g * 255), int(b * 255)) self.draw.line(canvas, (x1, y1), (x2, y2), color, thickness=2) # Draw left hand keypoints for i in range(113, 134): if scores is not None and i < len(scores) and scores[i] < threshold: continue x, y = int(keypoints[i][0]), int(keypoints[i][1]) if x > eps and y > eps and 0 <= x < W and 0 <= y < H: self.draw.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) # Draw face keypoints (24-91) - white dots only, no lines if draw_face and len(keypoints) >= 92: eps = 0.01 for i in range(24, 92): if scores is not None and scores[i] < threshold: continue x, y = int(keypoints[i][0]), int(keypoints[i][1]) if x > eps and y > eps and 0 <= x < W and 0 <= y < H: self.draw.circle(canvas, (x, y), face_point_size, (255, 255, 255), thickness=-1) return canvas class SDPoseDrawKeypoints(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="SDPoseDrawKeypoints", category="image/preprocessors", search_aliases=["openpose", "pose detection", "preprocessor", "keypoints", "pose"], inputs=[ io.Custom("POSE_KEYPOINT").Input("keypoints"), io.Boolean.Input("draw_body", default=True), io.Boolean.Input("draw_hands", default=True), io.Boolean.Input("draw_face", default=True), io.Boolean.Input("draw_feet", default=False), io.Int.Input("stick_width", default=4, min=1, max=10, step=1), io.Int.Input("face_point_size", default=3, min=1, max=10, step=1), io.Float.Input("score_threshold", default=0.3, min=0.0, max=1.0, step=0.01), ], outputs=[ io.Image.Output(), ], ) @classmethod def execute(cls, keypoints, draw_body, draw_hands, draw_face, draw_feet, stick_width, face_point_size, score_threshold) -> io.NodeOutput: if not keypoints: return io.NodeOutput(torch.zeros((1, 64, 64, 3), dtype=torch.float32)) height = keypoints[0]["canvas_height"] width = keypoints[0]["canvas_width"] def _parse(flat, n): arr = np.array(flat, dtype=np.float32).reshape(n, 3) return arr[:, :2], arr[:, 2] def _zeros(n): return np.zeros((n, 2), dtype=np.float32), np.zeros(n, dtype=np.float32) pose_outputs = [] drawer = KeypointDraw() for frame in tqdm(keypoints, desc="Drawing keypoints on frames"): canvas = np.zeros((height, width, 3), dtype=np.uint8) for person in frame["people"]: body_kp, body_sc = _parse(person["pose_keypoints_2d"], 18) foot_raw = person.get("foot_keypoints_2d") foot_kp, foot_sc = _parse(foot_raw, 6) if foot_raw else _zeros(6) face_kp, face_sc = _parse(person["face_keypoints_2d"], 70) face_kp, face_sc = face_kp[:68], face_sc[:68] # drop appended eye kp; body already draws them rhand_kp, rhand_sc = _parse(person["hand_right_keypoints_2d"], 21) lhand_kp, lhand_sc = _parse(person["hand_left_keypoints_2d"], 21) kp = np.concatenate([body_kp, foot_kp, face_kp, rhand_kp, lhand_kp], axis=0) sc = np.concatenate([body_sc, foot_sc, face_sc, rhand_sc, lhand_sc], axis=0) canvas = drawer.draw_wholebody_keypoints( canvas, kp, sc, threshold=score_threshold, draw_body=draw_body, draw_feet=draw_feet, draw_face=draw_face, draw_hands=draw_hands, stick_width=stick_width, face_point_size=face_point_size, ) pose_outputs.append(canvas) pose_outputs_np = np.stack(pose_outputs) if len(pose_outputs) > 1 else np.expand_dims(pose_outputs[0], 0) final_pose_output = torch.from_numpy(pose_outputs_np).float() / 255.0 return io.NodeOutput(final_pose_output) class SDPoseKeypointExtractor(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="SDPoseKeypointExtractor", category="image/preprocessors", search_aliases=["openpose", "pose detection", "preprocessor", "keypoints", "sdpose"], description="Extract pose keypoints from images using the SDPose model: https://huggingface.co/Comfy-Org/SDPose/tree/main/checkpoints", inputs=[ io.Model.Input("model"), io.Vae.Input("vae"), io.Image.Input("image"), io.Int.Input("batch_size", default=16, min=1, max=10000, step=1), io.BoundingBox.Input("bboxes", optional=True, force_input=True, tooltip="Optional bounding boxes for more accurate detections. Required for multi-person detection."), ], outputs=[ io.Custom("POSE_KEYPOINT").Output("keypoints", tooltip="Keypoints in OpenPose frame format (canvas_width, canvas_height, people)"), ], ) @classmethod def execute(cls, model, vae, image, batch_size, bboxes=None) -> io.NodeOutput: height, width = image.shape[-3], image.shape[-2] context = LotusConditioning().execute().result[0] # Use output_block_patch to capture the last 640-channel feature def output_patch(h, hsp, transformer_options): nonlocal captured_feat if h.shape[1] == 640: # Capture the features for wholebody captured_feat = h.clone() return h, hsp model_clone = model.clone() model_clone.model_options["transformer_options"] = {"patches": {"output_block_patch": [output_patch]}} if not hasattr(model.model.diffusion_model, 'heatmap_head'): raise ValueError("The provided model does not have a heatmap_head. Please use SDPose model from here https://huggingface.co/Comfy-Org/SDPose/tree/main/checkpoints.") head = model.model.diffusion_model.heatmap_head total_images = image.shape[0] captured_feat = None model_h = int(head.heatmap_size[0]) * 4 # e.g. 192 * 4 = 768 model_w = int(head.heatmap_size[1]) * 4 # e.g. 256 * 4 = 1024 def _run_on_latent(latent_batch): """Run one forward pass and return (keypoints_list, scores_list) for the batch.""" nonlocal captured_feat captured_feat = None _ = comfy.sample.sample( model_clone, noise=torch.zeros_like(latent_batch), steps=1, cfg=1.0, sampler_name="euler", scheduler="simple", positive=context, negative=context, latent_image=latent_batch, disable_noise=True, disable_pbar=True, ) return head(captured_feat) # keypoints_batch, scores_batch # all_keypoints / all_scores are lists-of-lists: # outer index = input image index # inner index = detected person (one per bbox, or one for full-image) all_keypoints = [] # shape: [n_images][n_persons] all_scores = [] # shape: [n_images][n_persons] pbar = comfy.utils.ProgressBar(total_images) if bboxes is not None: if not isinstance(bboxes, list): bboxes = [[bboxes]] elif len(bboxes) == 0: bboxes = [None] * total_images # --- bbox-crop mode: one forward pass per crop ------------------------- for img_idx in tqdm(range(total_images), desc="Extracting keypoints from crops"): img = image[img_idx:img_idx + 1] # (1, H, W, C) # Broadcasting: if fewer bbox lists than images, repeat the last one. img_bboxes = bboxes[min(img_idx, len(bboxes) - 1)] if bboxes else None img_keypoints = [] img_scores = [] if img_bboxes: for bbox in img_bboxes: x1 = max(0, int(bbox["x"])) y1 = max(0, int(bbox["y"])) x2 = min(width, int(bbox["x"] + bbox["width"])) y2 = min(height, int(bbox["y"] + bbox["height"])) if x2 <= x1 or y2 <= y1: continue crop_h_px, crop_w_px = y2 - y1, x2 - x1 crop = img[:, y1:y2, x1:x2, :] # (1, crop_h, crop_w, C) # scale to fit inside (model_h, model_w) while preserving aspect ratio, then pad to exact model size. scale = min(model_h / crop_h_px, model_w / crop_w_px) scaled_h, scaled_w = int(round(crop_h_px * scale)), int(round(crop_w_px * scale)) pad_top, pad_left = (model_h - scaled_h) // 2, (model_w - scaled_w) // 2 crop_chw = crop.permute(0, 3, 1, 2).float() # BHWC → BCHW scaled = comfy.utils.common_upscale(crop_chw, scaled_w, scaled_h, upscale_method="bilinear", crop="disabled") padded = torch.zeros(1, scaled.shape[1], model_h, model_w, dtype=scaled.dtype, device=scaled.device) padded[:, :, pad_top:pad_top + scaled_h, pad_left:pad_left + scaled_w] = scaled crop_resized = padded.permute(0, 2, 3, 1) # BCHW → BHWC latent_crop = vae.encode(crop_resized) kp_batch, sc_batch = _run_on_latent(latent_crop) kp, sc = kp_batch[0], sc_batch[0] # (K, 2), coords in model pixel space # remove padding offset, undo scale, offset to full-image coordinates. kp = kp.copy() if isinstance(kp, np.ndarray) else np.array(kp, dtype=np.float32) kp[..., 0] = (kp[..., 0] - pad_left) / scale + x1 kp[..., 1] = (kp[..., 1] - pad_top) / scale + y1 img_keypoints.append(kp) img_scores.append(sc) else: # No bboxes for this image – run on the full image latent_img = vae.encode(img) kp_batch, sc_batch = _run_on_latent(latent_img) img_keypoints.append(kp_batch[0]) img_scores.append(sc_batch[0]) all_keypoints.append(img_keypoints) all_scores.append(img_scores) pbar.update(1) else: # full-image mode, batched tqdm_pbar = tqdm(total=total_images, desc="Extracting keypoints") for batch_start in range(0, total_images, batch_size): batch_end = min(batch_start + batch_size, total_images) latent_batch = vae.encode(image[batch_start:batch_end]) kp_batch, sc_batch = _run_on_latent(latent_batch) for kp, sc in zip(kp_batch, sc_batch): all_keypoints.append([kp]) all_scores.append([sc]) tqdm_pbar.update(1) pbar.update(batch_end - batch_start) openpose_frames = _to_openpose_frames(all_keypoints, all_scores, height, width) return io.NodeOutput(openpose_frames) def get_face_bboxes(kp2ds, scale, image_shape): h, w = image_shape kp2ds_face = kp2ds.copy()[1:] * (w, h) min_x, min_y = np.min(kp2ds_face, axis=0) max_x, max_y = np.max(kp2ds_face, axis=0) initial_width = max_x - min_x initial_height = max_y - min_y if initial_width <= 0 or initial_height <= 0: return [0, 0, 0, 0] initial_area = initial_width * initial_height expanded_area = initial_area * scale new_width = np.sqrt(expanded_area * (initial_width / initial_height)) new_height = np.sqrt(expanded_area * (initial_height / initial_width)) delta_width = (new_width - initial_width) / 2 delta_height = (new_height - initial_height) / 4 expanded_min_x = max(min_x - delta_width, 0) expanded_max_x = min(max_x + delta_width, w) expanded_min_y = max(min_y - 3 * delta_height, 0) expanded_max_y = min(max_y + delta_height, h) return [int(expanded_min_x), int(expanded_max_x), int(expanded_min_y), int(expanded_max_y)] class SDPoseFaceBBoxes(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="SDPoseFaceBBoxes", category="image/preprocessors", search_aliases=["face bbox", "face bounding box", "pose", "keypoints"], inputs=[ io.Custom("POSE_KEYPOINT").Input("keypoints"), io.Float.Input("scale", default=1.5, min=1.0, max=10.0, step=0.1, tooltip="Multiplier for the bounding box area around each detected face."), io.Boolean.Input("force_square", default=True, tooltip="Expand the shorter bbox axis so the crop region is always square."), ], outputs=[ io.BoundingBox.Output("bboxes", tooltip="Face bounding boxes per frame, compatible with SDPoseKeypointExtractor bboxes input."), ], ) @classmethod def execute(cls, keypoints, scale, force_square) -> io.NodeOutput: all_bboxes = [] for frame in keypoints: h = frame["canvas_height"] w = frame["canvas_width"] frame_bboxes = [] for person in frame["people"]: face_flat = person.get("face_keypoints_2d", []) if not face_flat: continue # Parse absolute-pixel face keypoints (70 kp: 68 landmarks + REye + LEye) face_arr = np.array(face_flat, dtype=np.float32).reshape(-1, 3) face_xy = face_arr[:, :2] # (70, 2) in absolute pixels kp_norm = face_xy / np.array([w, h], dtype=np.float32) kp_padded = np.vstack([np.zeros((1, 2), dtype=np.float32), kp_norm]) # (71, 2) x1, x2, y1, y2 = get_face_bboxes(kp_padded, scale, (h, w)) if x2 > x1 and y2 > y1: if force_square: bw, bh = x2 - x1, y2 - y1 if bw != bh: side = max(bw, bh) cx, cy = (x1 + x2) // 2, (y1 + y2) // 2 half = side // 2 x1 = max(0, cx - half) y1 = max(0, cy - half) x2 = min(w, x1 + side) y2 = min(h, y1 + side) # Re-anchor if clamped x1 = max(0, x2 - side) y1 = max(0, y2 - side) frame_bboxes.append({"x": x1, "y": y1, "width": x2 - x1, "height": y2 - y1}) all_bboxes.append(frame_bboxes) return io.NodeOutput(all_bboxes) class CropByBBoxes(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="CropByBBoxes", category="image/preprocessors", search_aliases=["crop", "face crop", "bbox crop", "pose", "bounding box"], description="Crop and resize regions from the input image batch based on provided bounding boxes.", inputs=[ io.Image.Input("image"), io.BoundingBox.Input("bboxes", force_input=True), io.Int.Input("output_width", default=512, min=64, max=4096, step=8, tooltip="Width each crop is resized to."), io.Int.Input("output_height", default=512, min=64, max=4096, step=8, tooltip="Height each crop is resized to."), io.Int.Input("padding", default=0, min=0, max=1024, step=1, tooltip="Extra padding in pixels added on each side of the bbox before cropping."), ], outputs=[ io.Image.Output(tooltip="All crops stacked into a single image batch."), ], ) @classmethod def execute(cls, image, bboxes, output_width, output_height, padding) -> io.NodeOutput: total_frames = image.shape[0] img_h = image.shape[1] img_w = image.shape[2] num_ch = image.shape[3] if not isinstance(bboxes, list): bboxes = [[bboxes]] elif len(bboxes) == 0: return io.NodeOutput(image) crops = [] for frame_idx in range(total_frames): frame_bboxes = bboxes[min(frame_idx, len(bboxes) - 1)] if not frame_bboxes: continue frame_chw = image[frame_idx].permute(2, 0, 1).unsqueeze(0) # BHWC → BCHW (1, C, H, W) # Union all bboxes for this frame into a single crop region x1 = min(int(b["x"]) for b in frame_bboxes) y1 = min(int(b["y"]) for b in frame_bboxes) x2 = max(int(b["x"] + b["width"]) for b in frame_bboxes) y2 = max(int(b["y"] + b["height"]) for b in frame_bboxes) if padding > 0: x1 = max(0, x1 - padding) y1 = max(0, y1 - padding) x2 = min(img_w, x2 + padding) y2 = min(img_h, y2 + padding) x1, x2 = max(0, x1), min(img_w, x2) y1, y2 = max(0, y1), min(img_h, y2) # Fallback for empty/degenerate crops if x2 <= x1 or y2 <= y1: fallback_size = int(min(img_h, img_w) * 0.3) fb_x1 = max(0, (img_w - fallback_size) // 2) fb_y1 = max(0, int(img_h * 0.1)) fb_x2 = min(img_w, fb_x1 + fallback_size) fb_y2 = min(img_h, fb_y1 + fallback_size) if fb_x2 <= fb_x1 or fb_y2 <= fb_y1: crops.append(torch.zeros(1, num_ch, output_height, output_width, dtype=image.dtype, device=image.device)) continue x1, y1, x2, y2 = fb_x1, fb_y1, fb_x2, fb_y2 crop_chw = frame_chw[:, :, y1:y2, x1:x2] # (1, C, crop_h, crop_w) resized = comfy.utils.common_upscale(crop_chw, output_width, output_height, upscale_method="bilinear", crop="disabled") crops.append(resized) if not crops: return io.NodeOutput(image) out_images = torch.cat(crops, dim=0).permute(0, 2, 3, 1) # (N, H, W, C) return io.NodeOutput(out_images) class SDPoseExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ SDPoseKeypointExtractor, SDPoseDrawKeypoints, SDPoseFaceBBoxes, CropByBBoxes, ] async def comfy_entrypoint() -> SDPoseExtension: return SDPoseExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_sdpose.py", "license": "GNU General Public License v3.0", "lines": 619, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/apis/elevenlabs.py
from pydantic import BaseModel, Field class SpeechToTextRequest(BaseModel): model_id: str = Field(...) cloud_storage_url: str = Field(...) language_code: str | None = Field(None, description="ISO-639-1 or ISO-639-3 language code") tag_audio_events: bool | None = Field(None, description="Annotate sounds like (laughter) in transcript") num_speakers: int | None = Field(None, description="Max speakers predicted") timestamps_granularity: str = Field(default="word", description="Timing precision: none, word, or character") diarize: bool | None = Field(None, description="Annotate which speaker is talking") diarization_threshold: float | None = Field(None, description="Speaker separation sensitivity") temperature: float | None = Field(None, description="Randomness control") seed: int = Field(..., description="Seed for deterministic sampling") class SpeechToTextWord(BaseModel): text: str = Field(..., description="The word text") type: str = Field(default="word", description="Type of text element (word, spacing, etc.)") start: float | None = Field(None, description="Start time in seconds (when timestamps enabled)") end: float | None = Field(None, description="End time in seconds (when timestamps enabled)") speaker_id: str | None = Field(None, description="Speaker identifier when diarization is enabled") logprob: float | None = Field(None, description="Log probability of the word") class SpeechToTextResponse(BaseModel): language_code: str = Field(..., description="Detected or specified language code") language_probability: float | None = Field(None, description="Confidence of language detection") text: str = Field(..., description="Full transcript text") words: list[SpeechToTextWord] | None = Field(None, description="Word-level timing information") class TextToSpeechVoiceSettings(BaseModel): stability: float | None = Field(None, description="Voice stability") similarity_boost: float | None = Field(None, description="Similarity boost") style: float | None = Field(None, description="Style exaggeration") use_speaker_boost: bool | None = Field(None, description="Boost similarity to original speaker") speed: float | None = Field(None, description="Speech speed") class TextToSpeechRequest(BaseModel): text: str = Field(..., description="Text to convert to speech") model_id: str = Field(..., description="Model ID for TTS") language_code: str | None = Field(None, description="ISO-639-1 or ISO-639-3 language code") voice_settings: TextToSpeechVoiceSettings | None = Field(None, description="Voice settings") seed: int = Field(..., description="Seed for deterministic sampling") apply_text_normalization: str | None = Field(None, description="Text normalization mode: auto, on, off") class TextToSoundEffectsRequest(BaseModel): text: str = Field(..., description="Text prompt to convert into a sound effect") duration_seconds: float = Field(..., description="Duration of generated sound in seconds") prompt_influence: float = Field(..., description="How closely generation follows the prompt") loop: bool | None = Field(None, description="Whether to create a smoothly looping sound effect") class AddVoiceRequest(BaseModel): name: str = Field(..., description="Name that identifies the voice") remove_background_noise: bool = Field(..., description="Remove background noise from voice samples") class AddVoiceResponse(BaseModel): voice_id: str = Field(..., description="The newly created voice's unique identifier") class SpeechToSpeechRequest(BaseModel): model_id: str = Field(..., description="Model ID for speech-to-speech") voice_settings: str = Field(..., description="JSON string of voice settings") seed: int = Field(..., description="Seed for deterministic sampling") remove_background_noise: bool = Field(..., description="Remove background noise from input audio") class DialogueInput(BaseModel): text: str = Field(..., description="Text content to convert to speech") voice_id: str = Field(..., description="Voice identifier for this dialogue segment") class DialogueSettings(BaseModel): stability: float | None = Field(None, description="Voice stability (0-1)") class TextToDialogueRequest(BaseModel): inputs: list[DialogueInput] = Field(..., description="List of dialogue segments") model_id: str = Field(..., description="Model ID for dialogue generation") language_code: str | None = Field(None, description="ISO-639-1 language code") settings: DialogueSettings | None = Field(None, description="Voice settings") seed: int | None = Field(None, description="Seed for deterministic sampling") apply_text_normalization: str | None = Field(None, description="Text normalization mode: auto, on, off")
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/elevenlabs.py", "license": "GNU General Public License v3.0", "lines": 64, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_elevenlabs.py
import json import uuid from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.elevenlabs import ( AddVoiceRequest, AddVoiceResponse, DialogueInput, DialogueSettings, SpeechToSpeechRequest, SpeechToTextRequest, SpeechToTextResponse, TextToDialogueRequest, TextToSoundEffectsRequest, TextToSpeechRequest, TextToSpeechVoiceSettings, ) from comfy_api_nodes.util import ( ApiEndpoint, audio_bytes_to_audio_input, audio_ndarray_to_bytesio, audio_tensor_to_contiguous_ndarray, sync_op, sync_op_raw, upload_audio_to_comfyapi, validate_string, ) ELEVENLABS_MUSIC_SECTIONS = "ELEVENLABS_MUSIC_SECTIONS" # Custom type for music sections ELEVENLABS_COMPOSITION_PLAN = "ELEVENLABS_COMPOSITION_PLAN" # Custom type for composition plan ELEVENLABS_VOICE = "ELEVENLABS_VOICE" # Custom type for voice selection # Predefined ElevenLabs voices: (voice_id, display_name, gender, accent) ELEVENLABS_VOICES = [ ("CwhRBWXzGAHq8TQ4Fs17", "Roger", "male", "american"), ("EXAVITQu4vr4xnSDxMaL", "Sarah", "female", "american"), ("FGY2WhTYpPnrIDTdsKH5", "Laura", "female", "american"), ("IKne3meq5aSn9XLyUdCD", "Charlie", "male", "australian"), ("JBFqnCBsd6RMkjVDRZzb", "George", "male", "british"), ("N2lVS1w4EtoT3dr4eOWO", "Callum", "male", "american"), ("SAz9YHcvj6GT2YYXdXww", "River", "neutral", "american"), ("SOYHLrjzK2X1ezoPC6cr", "Harry", "male", "american"), ("TX3LPaxmHKxFdv7VOQHJ", "Liam", "male", "american"), ("Xb7hH8MSUJpSbSDYk0k2", "Alice", "female", "british"), ("XrExE9yKIg1WjnnlVkGX", "Matilda", "female", "american"), ("bIHbv24MWmeRgasZH58o", "Will", "male", "american"), ("cgSgspJ2msm6clMCkdW9", "Jessica", "female", "american"), ("cjVigY5qzO86Huf0OWal", "Eric", "male", "american"), ("hpp4J3VqNfWAUOO0d1Us", "Bella", "female", "american"), ("iP95p4xoKVk53GoZ742B", "Chris", "male", "american"), ("nPczCjzI2devNBz1zQrb", "Brian", "male", "american"), ("onwK4e9ZLuTAKqWW03F9", "Daniel", "male", "british"), ("pFZP5JQG7iQjIQuC4Bku", "Lily", "female", "british"), ("pNInz6obpgDQGcFmaJgB", "Adam", "male", "american"), ("pqHfZKP75CvOlQylNhV4", "Bill", "male", "american"), ] ELEVENLABS_VOICE_OPTIONS = [f"{name} ({gender}, {accent})" for _, name, gender, accent in ELEVENLABS_VOICES] ELEVENLABS_VOICE_MAP = { f"{name} ({gender}, {accent})": voice_id for voice_id, name, gender, accent in ELEVENLABS_VOICES } class ElevenLabsSpeechToText(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsSpeechToText", display_name="ElevenLabs Speech to Text", category="api node/audio/ElevenLabs", description="Transcribe audio to text. " "Supports automatic language detection, speaker diarization, and audio event tagging.", inputs=[ IO.Audio.Input( "audio", tooltip="Audio to transcribe.", ), IO.DynamicCombo.Input( "model", options=[ IO.DynamicCombo.Option( "scribe_v2", [ IO.Boolean.Input( "tag_audio_events", default=False, tooltip="Annotate sounds like (laughter), (music), etc. in transcript.", ), IO.Boolean.Input( "diarize", default=False, tooltip="Annotate which speaker is talking.", ), IO.Float.Input( "diarization_threshold", default=0.22, min=0.1, max=0.4, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Speaker separation sensitivity. " "Lower values are more sensitive to speaker changes.", ), IO.Float.Input( "temperature", default=0.0, min=0.0, max=2.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Randomness control. " "0.0 uses model default. Higher values increase randomness.", ), IO.Combo.Input( "timestamps_granularity", options=["word", "character", "none"], default="word", tooltip="Timing precision for transcript words.", ), ], ), ], tooltip="Model to use for transcription.", ), IO.String.Input( "language_code", default="", tooltip="ISO-639-1 or ISO-639-3 language code (e.g., 'en', 'es', 'fra'). " "Leave empty for automatic detection.", ), IO.Int.Input( "num_speakers", default=0, min=0, max=32, display_mode=IO.NumberDisplay.slider, tooltip="Maximum number of speakers to predict. Set to 0 for automatic detection.", ), IO.Int.Input( "seed", default=1, min=0, max=2147483647, tooltip="Seed for reproducibility (determinism not guaranteed).", ), ], outputs=[ IO.String.Output(display_name="text"), IO.String.Output(display_name="language_code"), IO.String.Output(display_name="words_json"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.0073,"format":{"approximate":true,"suffix":"/minute"}}""", ), ) @classmethod async def execute( cls, audio: Input.Audio, model: dict, language_code: str, num_speakers: int, seed: int, ) -> IO.NodeOutput: if model["diarize"] and num_speakers: raise ValueError( "Number of speakers cannot be specified when diarization is enabled. " "Either disable diarization or set num_speakers to 0." ) request = SpeechToTextRequest( model_id=model["model"], cloud_storage_url=await upload_audio_to_comfyapi( cls, audio, container_format="mp4", codec_name="aac", mime_type="audio/mp4" ), language_code=language_code if language_code.strip() else None, tag_audio_events=model["tag_audio_events"], num_speakers=num_speakers if num_speakers > 0 else None, timestamps_granularity=model["timestamps_granularity"], diarize=model["diarize"], diarization_threshold=model["diarization_threshold"] if model["diarize"] else None, seed=seed, temperature=model["temperature"], ) response = await sync_op( cls, ApiEndpoint(path="/proxy/elevenlabs/v1/speech-to-text", method="POST"), response_model=SpeechToTextResponse, data=request, content_type="multipart/form-data", ) words_json = json.dumps( [w.model_dump(exclude_none=True) for w in response.words] if response.words else [], indent=2, ) return IO.NodeOutput(response.text, response.language_code, words_json) class ElevenLabsVoiceSelector(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsVoiceSelector", display_name="ElevenLabs Voice Selector", category="api node/audio/ElevenLabs", description="Select a predefined ElevenLabs voice for text-to-speech generation.", inputs=[ IO.Combo.Input( "voice", options=ELEVENLABS_VOICE_OPTIONS, tooltip="Choose a voice from the predefined ElevenLabs voices.", ), ], outputs=[ IO.Custom(ELEVENLABS_VOICE).Output(display_name="voice"), ], is_api_node=False, ) @classmethod def execute(cls, voice: str) -> IO.NodeOutput: voice_id = ELEVENLABS_VOICE_MAP.get(voice) if not voice_id: raise ValueError(f"Unknown voice: {voice}") return IO.NodeOutput(voice_id) class ElevenLabsTextToSpeech(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsTextToSpeech", display_name="ElevenLabs Text to Speech", category="api node/audio/ElevenLabs", description="Convert text to speech.", inputs=[ IO.Custom(ELEVENLABS_VOICE).Input( "voice", tooltip="Voice to use for speech synthesis. Connect from Voice Selector or Instant Voice Clone.", ), IO.String.Input( "text", multiline=True, default="", tooltip="The text to convert to speech.", ), IO.Float.Input( "stability", default=0.5, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Voice stability. Lower values give broader emotional range, " "higher values produce more consistent but potentially monotonous speech.", ), IO.Combo.Input( "apply_text_normalization", options=["auto", "on", "off"], tooltip="Text normalization mode. 'auto' lets the system decide, " "'on' always applies normalization, 'off' skips it.", ), IO.DynamicCombo.Input( "model", options=[ IO.DynamicCombo.Option( "eleven_multilingual_v2", [ IO.Float.Input( "speed", default=1.0, min=0.7, max=1.3, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", ), IO.Float.Input( "similarity_boost", default=0.75, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Similarity boost. Higher values make the voice more similar to the original.", ), IO.Boolean.Input( "use_speaker_boost", default=False, tooltip="Boost similarity to the original speaker voice.", ), IO.Float.Input( "style", default=0.0, min=0.0, max=0.2, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Style exaggeration. Higher values increase stylistic expression " "but may reduce stability.", ), ], ), IO.DynamicCombo.Option( "eleven_v3", [ IO.Float.Input( "speed", default=1.0, min=0.7, max=1.3, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", ), IO.Float.Input( "similarity_boost", default=0.75, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Similarity boost. Higher values make the voice more similar to the original.", ), ], ), ], tooltip="Model to use for text-to-speech.", ), IO.String.Input( "language_code", default="", tooltip="ISO-639-1 or ISO-639-3 language code (e.g., 'en', 'es', 'fra'). " "Leave empty for automatic detection.", ), IO.Int.Input( "seed", default=1, min=0, max=2147483647, tooltip="Seed for reproducibility (determinism not guaranteed).", ), IO.Combo.Input( "output_format", options=["mp3_44100_192", "opus_48000_192"], tooltip="Audio output format.", ), ], outputs=[ IO.Audio.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.24,"format":{"approximate":true,"suffix":"/1K chars"}}""", ), ) @classmethod async def execute( cls, voice: str, text: str, stability: float, apply_text_normalization: str, model: dict, language_code: str, seed: int, output_format: str, ) -> IO.NodeOutput: validate_string(text, min_length=1) request = TextToSpeechRequest( text=text, model_id=model["model"], language_code=language_code if language_code.strip() else None, voice_settings=TextToSpeechVoiceSettings( stability=stability, similarity_boost=model["similarity_boost"], speed=model["speed"], use_speaker_boost=model.get("use_speaker_boost", None), style=model.get("style", None), ), seed=seed, apply_text_normalization=apply_text_normalization, ) response = await sync_op_raw( cls, ApiEndpoint( path=f"/proxy/elevenlabs/v1/text-to-speech/{voice}", method="POST", query_params={"output_format": output_format}, ), data=request, as_binary=True, ) return IO.NodeOutput(audio_bytes_to_audio_input(response)) class ElevenLabsAudioIsolation(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsAudioIsolation", display_name="ElevenLabs Voice Isolation", category="api node/audio/ElevenLabs", description="Remove background noise from audio, isolating vocals or speech.", inputs=[ IO.Audio.Input( "audio", tooltip="Audio to process for background noise removal.", ), ], outputs=[ IO.Audio.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.24,"format":{"approximate":true,"suffix":"/minute"}}""", ), ) @classmethod async def execute( cls, audio: Input.Audio, ) -> IO.NodeOutput: audio_data_np = audio_tensor_to_contiguous_ndarray(audio["waveform"]) audio_bytes_io = audio_ndarray_to_bytesio(audio_data_np, audio["sample_rate"], "mp4", "aac") response = await sync_op_raw( cls, ApiEndpoint(path="/proxy/elevenlabs/v1/audio-isolation", method="POST"), files={"audio": ("audio.mp4", audio_bytes_io, "audio/mp4")}, content_type="multipart/form-data", as_binary=True, ) return IO.NodeOutput(audio_bytes_to_audio_input(response)) class ElevenLabsTextToSoundEffects(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsTextToSoundEffects", display_name="ElevenLabs Text to Sound Effects", category="api node/audio/ElevenLabs", description="Generate sound effects from text descriptions.", inputs=[ IO.String.Input( "text", multiline=True, default="", tooltip="Text description of the sound effect to generate.", ), IO.DynamicCombo.Input( "model", options=[ IO.DynamicCombo.Option( "eleven_sfx_v2", [ IO.Float.Input( "duration", default=5.0, min=0.5, max=30.0, step=0.1, display_mode=IO.NumberDisplay.slider, tooltip="Duration of generated sound in seconds.", ), IO.Boolean.Input( "loop", default=False, tooltip="Create a smoothly looping sound effect.", ), IO.Float.Input( "prompt_influence", default=0.3, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="How closely generation follows the prompt. " "Higher values make the sound follow the text more closely.", ), ], ), ], tooltip="Model to use for sound effect generation.", ), IO.Combo.Input( "output_format", options=["mp3_44100_192", "opus_48000_192"], tooltip="Audio output format.", ), ], outputs=[ IO.Audio.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.14,"format":{"approximate":true,"suffix":"/minute"}}""", ), ) @classmethod async def execute( cls, text: str, model: dict, output_format: str, ) -> IO.NodeOutput: validate_string(text, min_length=1) response = await sync_op_raw( cls, ApiEndpoint( path="/proxy/elevenlabs/v1/sound-generation", method="POST", query_params={"output_format": output_format}, ), data=TextToSoundEffectsRequest( text=text, duration_seconds=model["duration"], prompt_influence=model["prompt_influence"], loop=model.get("loop", None), ), as_binary=True, ) return IO.NodeOutput(audio_bytes_to_audio_input(response)) class ElevenLabsInstantVoiceClone(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsInstantVoiceClone", display_name="ElevenLabs Instant Voice Clone", category="api node/audio/ElevenLabs", description="Create a cloned voice from audio samples. " "Provide 1-8 audio recordings of the voice to clone.", inputs=[ IO.Autogrow.Input( "files", template=IO.Autogrow.TemplatePrefix( IO.Audio.Input("audio"), prefix="audio", min=1, max=8, ), tooltip="Audio recordings for voice cloning.", ), IO.Boolean.Input( "remove_background_noise", default=False, tooltip="Remove background noise from voice samples using audio isolation.", ), ], outputs=[ IO.Custom(ELEVENLABS_VOICE).Output(display_name="voice"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge(expr="""{"type":"usd","usd":0.15}"""), ) @classmethod async def execute( cls, files: IO.Autogrow.Type, remove_background_noise: bool, ) -> IO.NodeOutput: file_tuples: list[tuple[str, tuple[str, bytes, str]]] = [] for key in files: audio = files[key] sample_rate: int = audio["sample_rate"] waveform = audio["waveform"] audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) audio_bytes_io = audio_ndarray_to_bytesio(audio_data_np, sample_rate, "mp4", "aac") file_tuples.append(("files", (f"{key}.mp4", audio_bytes_io.getvalue(), "audio/mp4"))) response = await sync_op( cls, ApiEndpoint(path="/proxy/elevenlabs/v1/voices/add", method="POST"), response_model=AddVoiceResponse, data=AddVoiceRequest( name=str(uuid.uuid4()), remove_background_noise=remove_background_noise, ), files=file_tuples, content_type="multipart/form-data", ) return IO.NodeOutput(response.voice_id) ELEVENLABS_STS_VOICE_SETTINGS = [ IO.Float.Input( "speed", default=1.0, min=0.7, max=1.3, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", ), IO.Float.Input( "similarity_boost", default=0.75, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Similarity boost. Higher values make the voice more similar to the original.", ), IO.Boolean.Input( "use_speaker_boost", default=False, tooltip="Boost similarity to the original speaker voice.", ), IO.Float.Input( "style", default=0.0, min=0.0, max=0.2, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Style exaggeration. Higher values increase stylistic expression but may reduce stability.", ), ] class ElevenLabsSpeechToSpeech(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsSpeechToSpeech", display_name="ElevenLabs Speech to Speech", category="api node/audio/ElevenLabs", description="Transform speech from one voice to another while preserving the original content and emotion.", inputs=[ IO.Custom(ELEVENLABS_VOICE).Input( "voice", tooltip="Target voice for the transformation. " "Connect from Voice Selector or Instant Voice Clone.", ), IO.Audio.Input( "audio", tooltip="Source audio to transform.", ), IO.Float.Input( "stability", default=0.5, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.slider, tooltip="Voice stability. Lower values give broader emotional range, " "higher values produce more consistent but potentially monotonous speech.", ), IO.DynamicCombo.Input( "model", options=[ IO.DynamicCombo.Option( "eleven_multilingual_sts_v2", ELEVENLABS_STS_VOICE_SETTINGS, ), IO.DynamicCombo.Option( "eleven_english_sts_v2", ELEVENLABS_STS_VOICE_SETTINGS, ), ], tooltip="Model to use for speech-to-speech transformation.", ), IO.Combo.Input( "output_format", options=["mp3_44100_192", "opus_48000_192"], tooltip="Audio output format.", ), IO.Int.Input( "seed", default=0, min=0, max=4294967295, tooltip="Seed for reproducibility.", ), IO.Boolean.Input( "remove_background_noise", default=False, tooltip="Remove background noise from input audio using audio isolation.", ), ], outputs=[ IO.Audio.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.24,"format":{"approximate":true,"suffix":"/minute"}}""", ), ) @classmethod async def execute( cls, voice: str, audio: Input.Audio, stability: float, model: dict, output_format: str, seed: int, remove_background_noise: bool, ) -> IO.NodeOutput: audio_data_np = audio_tensor_to_contiguous_ndarray(audio["waveform"]) audio_bytes_io = audio_ndarray_to_bytesio(audio_data_np, audio["sample_rate"], "mp4", "aac") voice_settings = TextToSpeechVoiceSettings( stability=stability, similarity_boost=model["similarity_boost"], style=model["style"], use_speaker_boost=model["use_speaker_boost"], speed=model["speed"], ) response = await sync_op_raw( cls, ApiEndpoint( path=f"/proxy/elevenlabs/v1/speech-to-speech/{voice}", method="POST", query_params={"output_format": output_format}, ), data=SpeechToSpeechRequest( model_id=model["model"], voice_settings=voice_settings.model_dump_json(exclude_none=True), seed=seed, remove_background_noise=remove_background_noise, ), files={"audio": ("audio.mp4", audio_bytes_io.getvalue(), "audio/mp4")}, content_type="multipart/form-data", as_binary=True, ) return IO.NodeOutput(audio_bytes_to_audio_input(response)) def _generate_dialogue_inputs(count: int) -> list: """Generate input widgets for a given number of dialogue entries.""" inputs = [] for i in range(1, count + 1): inputs.extend( [ IO.String.Input( f"text{i}", multiline=True, default="", tooltip=f"Text content for dialogue entry {i}.", ), IO.Custom(ELEVENLABS_VOICE).Input( f"voice{i}", tooltip=f"Voice for dialogue entry {i}. Connect from Voice Selector or Instant Voice Clone.", ), ] ) return inputs class ElevenLabsTextToDialogue(IO.ComfyNode): @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="ElevenLabsTextToDialogue", display_name="ElevenLabs Text to Dialogue", category="api node/audio/ElevenLabs", description="Generate multi-speaker dialogue from text. Each dialogue entry has its own text and voice.", inputs=[ IO.Float.Input( "stability", default=0.5, min=0.0, max=1.0, step=0.5, display_mode=IO.NumberDisplay.slider, tooltip="Voice stability. Lower values give broader emotional range, " "higher values produce more consistent but potentially monotonous speech.", ), IO.Combo.Input( "apply_text_normalization", options=["auto", "on", "off"], tooltip="Text normalization mode. 'auto' lets the system decide, " "'on' always applies normalization, 'off' skips it.", ), IO.Combo.Input( "model", options=["eleven_v3"], tooltip="Model to use for dialogue generation.", ), IO.DynamicCombo.Input( "inputs", options=[ IO.DynamicCombo.Option("1", _generate_dialogue_inputs(1)), IO.DynamicCombo.Option("2", _generate_dialogue_inputs(2)), IO.DynamicCombo.Option("3", _generate_dialogue_inputs(3)), IO.DynamicCombo.Option("4", _generate_dialogue_inputs(4)), IO.DynamicCombo.Option("5", _generate_dialogue_inputs(5)), IO.DynamicCombo.Option("6", _generate_dialogue_inputs(6)), IO.DynamicCombo.Option("7", _generate_dialogue_inputs(7)), IO.DynamicCombo.Option("8", _generate_dialogue_inputs(8)), IO.DynamicCombo.Option("9", _generate_dialogue_inputs(9)), IO.DynamicCombo.Option("10", _generate_dialogue_inputs(10)), ], tooltip="Number of dialogue entries.", ), IO.String.Input( "language_code", default="", tooltip="ISO-639-1 or ISO-639-3 language code (e.g., 'en', 'es', 'fra'). " "Leave empty for automatic detection.", ), IO.Int.Input( "seed", default=1, min=0, max=4294967295, tooltip="Seed for reproducibility.", ), IO.Combo.Input( "output_format", options=["mp3_44100_192", "opus_48000_192"], tooltip="Audio output format.", ), ], outputs=[ IO.Audio.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.24,"format":{"approximate":true,"suffix":"/1K chars"}}""", ), ) @classmethod async def execute( cls, stability: float, apply_text_normalization: str, model: str, inputs: dict, language_code: str, seed: int, output_format: str, ) -> IO.NodeOutput: num_entries = int(inputs["inputs"]) dialogue_inputs: list[DialogueInput] = [] for i in range(1, num_entries + 1): text = inputs[f"text{i}"] voice_id = inputs[f"voice{i}"] validate_string(text, min_length=1) dialogue_inputs.append(DialogueInput(text=text, voice_id=voice_id)) request = TextToDialogueRequest( inputs=dialogue_inputs, model_id=model, language_code=language_code if language_code.strip() else None, settings=DialogueSettings(stability=stability), seed=seed, apply_text_normalization=apply_text_normalization, ) response = await sync_op_raw( cls, ApiEndpoint( path="/proxy/elevenlabs/v1/text-to-dialogue", method="POST", query_params={"output_format": output_format}, ), data=request, as_binary=True, ) return IO.NodeOutput(audio_bytes_to_audio_input(response)) class ElevenLabsExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ ElevenLabsSpeechToText, ElevenLabsVoiceSelector, ElevenLabsTextToSpeech, ElevenLabsAudioIsolation, ElevenLabsTextToSoundEffects, ElevenLabsInstantVoiceClone, ElevenLabsSpeechToSpeech, ElevenLabsTextToDialogue, ] async def comfy_entrypoint() -> ElevenLabsExtension: return ElevenLabsExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_elevenlabs.py", "license": "GNU General Public License v3.0", "lines": 886, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:blueprints/.glsl/update_blueprints.py
#!/usr/bin/env python3 """ Shader Blueprint Updater Syncs GLSL shader files between this folder and blueprint JSON files. File naming convention: {Blueprint Name}_{node_id}.frag Usage: python update_blueprints.py extract # Extract shaders from JSONs to here python update_blueprints.py patch # Patch shaders back into JSONs python update_blueprints.py # Same as patch (default) """ import json import logging import sys import re from pathlib import Path logging.basicConfig(level=logging.INFO, format='%(message)s') logger = logging.getLogger(__name__) GLSL_DIR = Path(__file__).parent BLUEPRINTS_DIR = GLSL_DIR.parent def get_blueprint_files(): """Get all blueprint JSON files.""" return sorted(BLUEPRINTS_DIR.glob("*.json")) def sanitize_filename(name): """Convert blueprint name to safe filename.""" return re.sub(r'[^\w\-]', '_', name) def extract_shaders(): """Extract all shaders from blueprint JSONs to this folder.""" extracted = 0 for json_path in get_blueprint_files(): blueprint_name = json_path.stem try: with open(json_path, 'r') as f: data = json.load(f) except (json.JSONDecodeError, IOError) as e: logger.warning("Skipping %s: %s", json_path.name, e) continue # Find GLSLShader nodes in subgraphs for subgraph in data.get('definitions', {}).get('subgraphs', []): for node in subgraph.get('nodes', []): if node.get('type') == 'GLSLShader': node_id = node.get('id') widgets = node.get('widgets_values', []) # Find shader code (first string that looks like GLSL) for widget in widgets: if isinstance(widget, str) and widget.startswith('#version'): safe_name = sanitize_filename(blueprint_name) frag_name = f"{safe_name}_{node_id}.frag" frag_path = GLSL_DIR / frag_name with open(frag_path, 'w') as f: f.write(widget) logger.info(" Extracted: %s", frag_name) extracted += 1 break logger.info("\nExtracted %d shader(s)", extracted) def patch_shaders(): """Patch shaders from this folder back into blueprint JSONs.""" # Build lookup: blueprint_name -> [(node_id, shader_code), ...] shader_updates = {} for frag_path in sorted(GLSL_DIR.glob("*.frag")): # Parse filename: {blueprint_name}_{node_id}.frag parts = frag_path.stem.rsplit('_', 1) if len(parts) != 2: logger.warning("Skipping %s: invalid filename format", frag_path.name) continue blueprint_name, node_id_str = parts try: node_id = int(node_id_str) except ValueError: logger.warning("Skipping %s: invalid node_id", frag_path.name) continue with open(frag_path, 'r') as f: shader_code = f.read() if blueprint_name not in shader_updates: shader_updates[blueprint_name] = [] shader_updates[blueprint_name].append((node_id, shader_code)) # Apply updates to JSON files patched = 0 for json_path in get_blueprint_files(): blueprint_name = sanitize_filename(json_path.stem) if blueprint_name not in shader_updates: continue try: with open(json_path, 'r') as f: data = json.load(f) except (json.JSONDecodeError, IOError) as e: logger.error("Error reading %s: %s", json_path.name, e) continue modified = False for node_id, shader_code in shader_updates[blueprint_name]: # Find the node and update for subgraph in data.get('definitions', {}).get('subgraphs', []): for node in subgraph.get('nodes', []): if node.get('id') == node_id and node.get('type') == 'GLSLShader': widgets = node.get('widgets_values', []) if len(widgets) > 0 and widgets[0] != shader_code: widgets[0] = shader_code modified = True logger.info(" Patched: %s (node %d)", json_path.name, node_id) patched += 1 if modified: with open(json_path, 'w') as f: json.dump(data, f) if patched == 0: logger.info("No changes to apply.") else: logger.info("\nPatched %d shader(s)", patched) def main(): if len(sys.argv) < 2: command = "patch" else: command = sys.argv[1].lower() if command == "extract": logger.info("Extracting shaders from blueprints...") extract_shaders() elif command in ("patch", "update", "apply"): logger.info("Patching shaders into blueprints...") patch_shaders() else: logger.info(__doc__) sys.exit(1) if __name__ == "__main__": main()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "blueprints/.glsl/update_blueprints.py", "license": "GNU General Public License v3.0", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_glsl.py
import os import sys import re import logging import ctypes.util import importlib.util from typing import TypedDict import numpy as np import torch import nodes from comfy_api.latest import ComfyExtension, io, ui from typing_extensions import override from utils.install_util import get_missing_requirements_message logger = logging.getLogger(__name__) def _check_opengl_availability(): """Early check for OpenGL availability. Raises RuntimeError if unlikely to work.""" logger.debug("_check_opengl_availability: starting") missing = [] # Check Python packages (using find_spec to avoid importing) logger.debug("_check_opengl_availability: checking for glfw package") if importlib.util.find_spec("glfw") is None: missing.append("glfw") logger.debug("_check_opengl_availability: checking for OpenGL package") if importlib.util.find_spec("OpenGL") is None: missing.append("PyOpenGL") if missing: raise RuntimeError( f"OpenGL dependencies not available.\n{get_missing_requirements_message()}\n" ) # On Linux without display, check if headless backends are available logger.debug(f"_check_opengl_availability: platform={sys.platform}") if sys.platform.startswith("linux"): has_display = os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY") logger.debug(f"_check_opengl_availability: has_display={bool(has_display)}") if not has_display: # Check for EGL or OSMesa libraries logger.debug("_check_opengl_availability: checking for EGL library") has_egl = ctypes.util.find_library("EGL") logger.debug("_check_opengl_availability: checking for OSMesa library") has_osmesa = ctypes.util.find_library("OSMesa") # Error disabled for CI as it fails this check # if not has_egl and not has_osmesa: # raise RuntimeError( # "GLSL Shader node: No display and no headless backend (EGL/OSMesa) found.\n" # "See error below for installation instructions." # ) logger.debug(f"Headless mode: EGL={'yes' if has_egl else 'no'}, OSMesa={'yes' if has_osmesa else 'no'}") logger.debug("_check_opengl_availability: completed") # Run early check at import time logger.debug("nodes_glsl: running _check_opengl_availability at import time") _check_opengl_availability() # OpenGL modules - initialized lazily when context is created gl = None glfw = None EGL = None def _import_opengl(): """Import OpenGL module. Called after context is created.""" global gl if gl is None: logger.debug("_import_opengl: importing OpenGL.GL") import OpenGL.GL as _gl gl = _gl logger.debug("_import_opengl: import completed") return gl class SizeModeInput(TypedDict): size_mode: str width: int height: int MAX_IMAGES = 5 # u_image0-4 MAX_UNIFORMS = 5 # u_float0-4, u_int0-4 MAX_OUTPUTS = 4 # fragColor0-3 (MRT) # Vertex shader using gl_VertexID trick - no VBO needed. # Draws a single triangle that covers the entire screen: # # (-1,3) # /| # / | <- visible area is the unit square from (-1,-1) to (1,1) # / | parts outside get clipped away # (-1,-1)---(3,-1) # # v_texCoord is computed from clip space: * 0.5 + 0.5 maps (-1,1) -> (0,1) VERTEX_SHADER = """#version 330 core out vec2 v_texCoord; void main() { vec2 verts[3] = vec2[](vec2(-1, -1), vec2(3, -1), vec2(-1, 3)); v_texCoord = verts[gl_VertexID] * 0.5 + 0.5; gl_Position = vec4(verts[gl_VertexID], 0, 1); } """ DEFAULT_FRAGMENT_SHADER = """#version 300 es precision highp float; uniform sampler2D u_image0; uniform vec2 u_resolution; in vec2 v_texCoord; layout(location = 0) out vec4 fragColor0; void main() { fragColor0 = texture(u_image0, v_texCoord); } """ def _convert_es_to_desktop(source: str) -> str: """Convert GLSL ES (WebGL) shader source to desktop GLSL 330 core.""" # Remove any existing #version directive source = re.sub(r"#version\s+\d+(\s+es)?\s*\n?", "", source, flags=re.IGNORECASE) # Remove precision qualifiers (not needed in desktop GLSL) source = re.sub(r"precision\s+(lowp|mediump|highp)\s+\w+\s*;\s*\n?", "", source) # Prepend desktop GLSL version return "#version 330 core\n" + source def _detect_output_count(source: str) -> int: """Detect how many fragColor outputs are used in the shader. Returns the count of outputs needed (1 to MAX_OUTPUTS). """ matches = re.findall(r"fragColor(\d+)", source) if not matches: return 1 # Default to 1 output if none found max_index = max(int(m) for m in matches) return min(max_index + 1, MAX_OUTPUTS) def _detect_pass_count(source: str) -> int: """Detect multi-pass rendering from #pragma passes N directive. Returns the number of passes (1 if not specified). """ match = re.search(r'#pragma\s+passes\s+(\d+)', source) if match: return max(1, int(match.group(1))) return 1 def _init_glfw(): """Initialize GLFW. Returns (window, glfw_module). Raises RuntimeError on failure.""" logger.debug("_init_glfw: starting") # On macOS, glfw.init() must be called from main thread or it hangs forever if sys.platform == "darwin": logger.debug("_init_glfw: skipping on macOS") raise RuntimeError("GLFW backend not supported on macOS") logger.debug("_init_glfw: importing glfw module") import glfw as _glfw logger.debug("_init_glfw: calling glfw.init()") if not _glfw.init(): raise RuntimeError("glfw.init() failed") try: logger.debug("_init_glfw: setting window hints") _glfw.window_hint(_glfw.VISIBLE, _glfw.FALSE) _glfw.window_hint(_glfw.CONTEXT_VERSION_MAJOR, 3) _glfw.window_hint(_glfw.CONTEXT_VERSION_MINOR, 3) _glfw.window_hint(_glfw.OPENGL_PROFILE, _glfw.OPENGL_CORE_PROFILE) logger.debug("_init_glfw: calling create_window()") window = _glfw.create_window(64, 64, "ComfyUI GLSL", None, None) if not window: raise RuntimeError("glfw.create_window() failed") logger.debug("_init_glfw: calling make_context_current()") _glfw.make_context_current(window) logger.debug("_init_glfw: completed successfully") return window, _glfw except Exception: logger.debug("_init_glfw: failed, terminating glfw") _glfw.terminate() raise def _init_egl(): """Initialize EGL for headless rendering. Returns (display, context, surface, EGL_module). Raises RuntimeError on failure.""" logger.debug("_init_egl: starting") from OpenGL import EGL as _EGL from OpenGL.EGL import ( eglGetDisplay, eglInitialize, eglChooseConfig, eglCreateContext, eglMakeCurrent, eglCreatePbufferSurface, eglBindAPI, eglTerminate, eglDestroyContext, eglDestroySurface, EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT, EGL_NONE, EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_BLUE_SIZE, EGL_ALPHA_SIZE, EGL_DEPTH_SIZE, EGL_WIDTH, EGL_HEIGHT, EGL_OPENGL_API, ) logger.debug("_init_egl: imports completed") display = None context = None surface = None try: logger.debug("_init_egl: calling eglGetDisplay()") display = eglGetDisplay(EGL_DEFAULT_DISPLAY) if display == _EGL.EGL_NO_DISPLAY: raise RuntimeError("eglGetDisplay() failed") logger.debug("_init_egl: calling eglInitialize()") major, minor = _EGL.EGLint(), _EGL.EGLint() if not eglInitialize(display, major, minor): display = None # Not initialized, don't terminate raise RuntimeError("eglInitialize() failed") logger.debug(f"_init_egl: EGL version {major.value}.{minor.value}") config_attribs = [ EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8, EGL_DEPTH_SIZE, 0, EGL_NONE ] configs = (_EGL.EGLConfig * 1)() num_configs = _EGL.EGLint() if not eglChooseConfig(display, config_attribs, configs, 1, num_configs) or num_configs.value == 0: raise RuntimeError("eglChooseConfig() failed") config = configs[0] logger.debug(f"_init_egl: config chosen, num_configs={num_configs.value}") if not eglBindAPI(EGL_OPENGL_API): raise RuntimeError("eglBindAPI() failed") logger.debug("_init_egl: calling eglCreateContext()") context_attribs = [ _EGL.EGL_CONTEXT_MAJOR_VERSION, 3, _EGL.EGL_CONTEXT_MINOR_VERSION, 3, _EGL.EGL_CONTEXT_OPENGL_PROFILE_MASK, _EGL.EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT, EGL_NONE ] context = eglCreateContext(display, config, EGL_NO_CONTEXT, context_attribs) if context == EGL_NO_CONTEXT: raise RuntimeError("eglCreateContext() failed") logger.debug("_init_egl: calling eglCreatePbufferSurface()") pbuffer_attribs = [EGL_WIDTH, 64, EGL_HEIGHT, 64, EGL_NONE] surface = eglCreatePbufferSurface(display, config, pbuffer_attribs) if surface == _EGL.EGL_NO_SURFACE: raise RuntimeError("eglCreatePbufferSurface() failed") logger.debug("_init_egl: calling eglMakeCurrent()") if not eglMakeCurrent(display, surface, surface, context): raise RuntimeError("eglMakeCurrent() failed") logger.debug("_init_egl: completed successfully") return display, context, surface, _EGL except Exception: logger.debug("_init_egl: failed, cleaning up") # Clean up any resources on failure if surface is not None: eglDestroySurface(display, surface) if context is not None: eglDestroyContext(display, context) if display is not None: eglTerminate(display) raise def _init_osmesa(): """Initialize OSMesa for software rendering. Returns (context, buffer). Raises RuntimeError on failure.""" import ctypes logger.debug("_init_osmesa: starting") os.environ["PYOPENGL_PLATFORM"] = "osmesa" logger.debug("_init_osmesa: importing OpenGL.osmesa") from OpenGL import GL as _gl from OpenGL.osmesa import ( OSMesaCreateContextExt, OSMesaMakeCurrent, OSMesaDestroyContext, OSMESA_RGBA, ) logger.debug("_init_osmesa: imports completed") ctx = OSMesaCreateContextExt(OSMESA_RGBA, 24, 0, 0, None) if not ctx: raise RuntimeError("OSMesaCreateContextExt() failed") width, height = 64, 64 buffer = (ctypes.c_ubyte * (width * height * 4))() logger.debug("_init_osmesa: calling OSMesaMakeCurrent()") if not OSMesaMakeCurrent(ctx, buffer, _gl.GL_UNSIGNED_BYTE, width, height): OSMesaDestroyContext(ctx) raise RuntimeError("OSMesaMakeCurrent() failed") logger.debug("_init_osmesa: completed successfully") return ctx, buffer class GLContext: """Manages OpenGL context and resources for shader execution. Tries backends in order: GLFW (desktop) → EGL (headless GPU) → OSMesa (software). """ _instance = None _initialized = False def __new__(cls): if cls._instance is None: cls._instance = super().__new__(cls) return cls._instance def __init__(self): if GLContext._initialized: logger.debug("GLContext.__init__: already initialized, skipping") return logger.debug("GLContext.__init__: starting initialization") global glfw, EGL import time start = time.perf_counter() self._backend = None self._window = None self._egl_display = None self._egl_context = None self._egl_surface = None self._osmesa_ctx = None self._osmesa_buffer = None self._vao = None # Try backends in order: GLFW → EGL → OSMesa errors = [] logger.debug("GLContext.__init__: trying GLFW backend") try: self._window, glfw = _init_glfw() self._backend = "glfw" logger.debug("GLContext.__init__: GLFW backend succeeded") except Exception as e: logger.debug(f"GLContext.__init__: GLFW backend failed: {e}") errors.append(("GLFW", e)) if self._backend is None: logger.debug("GLContext.__init__: trying EGL backend") try: self._egl_display, self._egl_context, self._egl_surface, EGL = _init_egl() self._backend = "egl" logger.debug("GLContext.__init__: EGL backend succeeded") except Exception as e: logger.debug(f"GLContext.__init__: EGL backend failed: {e}") errors.append(("EGL", e)) if self._backend is None: logger.debug("GLContext.__init__: trying OSMesa backend") try: self._osmesa_ctx, self._osmesa_buffer = _init_osmesa() self._backend = "osmesa" logger.debug("GLContext.__init__: OSMesa backend succeeded") except Exception as e: logger.debug(f"GLContext.__init__: OSMesa backend failed: {e}") errors.append(("OSMesa", e)) if self._backend is None: if sys.platform == "win32": platform_help = ( "Windows: Ensure GPU drivers are installed and display is available.\n" " CPU-only/headless mode is not supported on Windows." ) elif sys.platform == "darwin": platform_help = ( "macOS: GLFW is not supported.\n" " Install OSMesa via Homebrew: brew install mesa\n" " Then: pip install PyOpenGL PyOpenGL-accelerate" ) else: platform_help = ( "Linux: Install one of these backends:\n" " Desktop: sudo apt install libgl1-mesa-glx libglfw3\n" " Headless with GPU: sudo apt install libegl1-mesa libgl1-mesa-dri\n" " Headless (CPU): sudo apt install libosmesa6" ) error_details = "\n".join(f" {name}: {err}" for name, err in errors) raise RuntimeError( f"Failed to create OpenGL context.\n\n" f"Backend errors:\n{error_details}\n\n" f"{platform_help}" ) # Now import OpenGL.GL (after context is current) logger.debug("GLContext.__init__: importing OpenGL.GL") _import_opengl() # Create VAO (required for core profile, but OSMesa may use compat profile) logger.debug("GLContext.__init__: creating VAO") try: vao = gl.glGenVertexArrays(1) gl.glBindVertexArray(vao) self._vao = vao # Only store after successful bind logger.debug("GLContext.__init__: VAO created successfully") except Exception as e: logger.debug(f"GLContext.__init__: VAO creation failed (may be expected for OSMesa): {e}") # OSMesa with older Mesa may not support VAOs # Clean up if we created but couldn't bind if vao: try: gl.glDeleteVertexArrays(1, [vao]) except Exception: pass elapsed = (time.perf_counter() - start) * 1000 # Log device info renderer = gl.glGetString(gl.GL_RENDERER) vendor = gl.glGetString(gl.GL_VENDOR) version = gl.glGetString(gl.GL_VERSION) renderer = renderer.decode() if renderer else "Unknown" vendor = vendor.decode() if vendor else "Unknown" version = version.decode() if version else "Unknown" GLContext._initialized = True logger.info(f"GLSL context initialized in {elapsed:.1f}ms ({self._backend}) - {renderer} ({vendor}), GL {version}") def make_current(self): if self._backend == "glfw": glfw.make_context_current(self._window) elif self._backend == "egl": from OpenGL.EGL import eglMakeCurrent eglMakeCurrent(self._egl_display, self._egl_surface, self._egl_surface, self._egl_context) elif self._backend == "osmesa": from OpenGL.osmesa import OSMesaMakeCurrent OSMesaMakeCurrent(self._osmesa_ctx, self._osmesa_buffer, gl.GL_UNSIGNED_BYTE, 64, 64) if self._vao is not None: gl.glBindVertexArray(self._vao) def _compile_shader(source: str, shader_type: int) -> int: """Compile a shader and return its ID.""" shader = gl.glCreateShader(shader_type) gl.glShaderSource(shader, source) gl.glCompileShader(shader) if gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS) != gl.GL_TRUE: error = gl.glGetShaderInfoLog(shader).decode() gl.glDeleteShader(shader) raise RuntimeError(f"Shader compilation failed:\n{error}") return shader def _create_program(vertex_source: str, fragment_source: str) -> int: """Create and link a shader program.""" vertex_shader = _compile_shader(vertex_source, gl.GL_VERTEX_SHADER) try: fragment_shader = _compile_shader(fragment_source, gl.GL_FRAGMENT_SHADER) except RuntimeError: gl.glDeleteShader(vertex_shader) raise program = gl.glCreateProgram() gl.glAttachShader(program, vertex_shader) gl.glAttachShader(program, fragment_shader) gl.glLinkProgram(program) gl.glDeleteShader(vertex_shader) gl.glDeleteShader(fragment_shader) if gl.glGetProgramiv(program, gl.GL_LINK_STATUS) != gl.GL_TRUE: error = gl.glGetProgramInfoLog(program).decode() gl.glDeleteProgram(program) raise RuntimeError(f"Program linking failed:\n{error}") return program def _render_shader_batch( fragment_code: str, width: int, height: int, image_batches: list[list[np.ndarray]], floats: list[float], ints: list[int], ) -> list[list[np.ndarray]]: """ Render a fragment shader for multiple batches efficiently. Compiles shader once, reuses framebuffer/textures across batches. Supports multi-pass rendering via #pragma passes N directive. Args: fragment_code: User's fragment shader code width: Output width height: Output height image_batches: List of batches, each batch is a list of input images (H, W, C) float32 [0,1] floats: List of float uniforms ints: List of int uniforms Returns: List of batch outputs, each is a list of output images (H, W, 4) float32 [0,1] """ import time start_time = time.perf_counter() if not image_batches: return [] ctx = GLContext() ctx.make_current() # Convert from GLSL ES to desktop GLSL 330 fragment_source = _convert_es_to_desktop(fragment_code) # Detect how many outputs the shader actually uses num_outputs = _detect_output_count(fragment_code) # Detect multi-pass rendering num_passes = _detect_pass_count(fragment_code) # Track resources for cleanup program = None fbo = None output_textures = [] input_textures = [] ping_pong_textures = [] ping_pong_fbos = [] num_inputs = len(image_batches[0]) try: # Compile shaders (once for all batches) try: program = _create_program(VERTEX_SHADER, fragment_source) except RuntimeError: logger.error(f"Fragment shader:\n{fragment_source}") raise gl.glUseProgram(program) # Create framebuffer with only the needed color attachments fbo = gl.glGenFramebuffers(1) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) draw_buffers = [] for i in range(num_outputs): tex = gl.glGenTextures(1) output_textures.append(tex) gl.glBindTexture(gl.GL_TEXTURE_2D, tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, width, height, 0, gl.GL_RGBA, gl.GL_FLOAT, None) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0 + i, gl.GL_TEXTURE_2D, tex, 0) draw_buffers.append(gl.GL_COLOR_ATTACHMENT0 + i) gl.glDrawBuffers(num_outputs, draw_buffers) if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: raise RuntimeError("Framebuffer is not complete") # Create ping-pong resources for multi-pass rendering if num_passes > 1: for _ in range(2): pp_tex = gl.glGenTextures(1) ping_pong_textures.append(pp_tex) gl.glBindTexture(gl.GL_TEXTURE_2D, pp_tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, width, height, 0, gl.GL_RGBA, gl.GL_FLOAT, None) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE) pp_fbo = gl.glGenFramebuffers(1) ping_pong_fbos.append(pp_fbo) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, pp_fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, pp_tex, 0) gl.glDrawBuffers(1, [gl.GL_COLOR_ATTACHMENT0]) if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: raise RuntimeError("Ping-pong framebuffer is not complete") # Create input textures (reused for all batches) for i in range(num_inputs): tex = gl.glGenTextures(1) input_textures.append(tex) gl.glActiveTexture(gl.GL_TEXTURE0 + i) gl.glBindTexture(gl.GL_TEXTURE_2D, tex) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE) loc = gl.glGetUniformLocation(program, f"u_image{i}") if loc >= 0: gl.glUniform1i(loc, i) # Set static uniforms (once for all batches) loc = gl.glGetUniformLocation(program, "u_resolution") if loc >= 0: gl.glUniform2f(loc, float(width), float(height)) for i, v in enumerate(floats): loc = gl.glGetUniformLocation(program, f"u_float{i}") if loc >= 0: gl.glUniform1f(loc, v) for i, v in enumerate(ints): loc = gl.glGetUniformLocation(program, f"u_int{i}") if loc >= 0: gl.glUniform1i(loc, v) # Get u_pass uniform location for multi-pass pass_loc = gl.glGetUniformLocation(program, "u_pass") gl.glViewport(0, 0, width, height) gl.glDisable(gl.GL_BLEND) # Ensure no alpha blending - write output directly # Process each batch all_batch_outputs = [] for images in image_batches: # Update input textures with this batch's images for i, img in enumerate(images): gl.glActiveTexture(gl.GL_TEXTURE0 + i) gl.glBindTexture(gl.GL_TEXTURE_2D, input_textures[i]) # Flip vertically for GL coordinates, ensure RGBA h, w, c = img.shape if c == 3: img_upload = np.empty((h, w, 4), dtype=np.float32) img_upload[:, :, :3] = img[::-1, :, :] img_upload[:, :, 3] = 1.0 else: img_upload = np.ascontiguousarray(img[::-1, :, :]) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, w, h, 0, gl.GL_RGBA, gl.GL_FLOAT, img_upload) if num_passes == 1: # Single pass - render directly to output FBO gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) if pass_loc >= 0: gl.glUniform1i(pass_loc, 0) gl.glClearColor(0, 0, 0, 0) gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glDrawArrays(gl.GL_TRIANGLES, 0, 3) else: # Multi-pass rendering with ping-pong for p in range(num_passes): is_last_pass = (p == num_passes - 1) # Set pass uniform if pass_loc >= 0: gl.glUniform1i(pass_loc, p) if is_last_pass: # Last pass renders to the main output FBO gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) else: # Intermediate passes render to ping-pong FBO target_fbo = ping_pong_fbos[p % 2] gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, target_fbo) # Set input texture for this pass gl.glActiveTexture(gl.GL_TEXTURE0) if p == 0: # First pass reads from original input gl.glBindTexture(gl.GL_TEXTURE_2D, input_textures[0]) else: # Subsequent passes read from previous pass output source_tex = ping_pong_textures[(p - 1) % 2] gl.glBindTexture(gl.GL_TEXTURE_2D, source_tex) gl.glClearColor(0, 0, 0, 0) gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glDrawArrays(gl.GL_TRIANGLES, 0, 3) # Read back outputs for this batch # (glGetTexImage is synchronous, implicitly waits for rendering) batch_outputs = [] for tex in output_textures: gl.glBindTexture(gl.GL_TEXTURE_2D, tex) data = gl.glGetTexImage(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, gl.GL_FLOAT) img = np.frombuffer(data, dtype=np.float32).reshape(height, width, 4) batch_outputs.append(img[::-1, :, :].copy()) # Pad with black images for unused outputs black_img = np.zeros((height, width, 4), dtype=np.float32) for _ in range(num_outputs, MAX_OUTPUTS): batch_outputs.append(black_img) all_batch_outputs.append(batch_outputs) elapsed = (time.perf_counter() - start_time) * 1000 num_batches = len(image_batches) pass_info = f", {num_passes} passes" if num_passes > 1 else "" logger.info(f"GLSL shader executed in {elapsed:.1f}ms ({num_batches} batch{'es' if num_batches != 1 else ''}, {width}x{height}{pass_info})") return all_batch_outputs finally: # Unbind before deleting gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glUseProgram(0) for tex in input_textures: gl.glDeleteTextures(int(tex)) for tex in output_textures: gl.glDeleteTextures(int(tex)) for tex in ping_pong_textures: gl.glDeleteTextures(int(tex)) if fbo is not None: gl.glDeleteFramebuffers(1, [fbo]) for pp_fbo in ping_pong_fbos: gl.glDeleteFramebuffers(1, [pp_fbo]) if program is not None: gl.glDeleteProgram(program) class GLSLShader(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: image_template = io.Autogrow.TemplatePrefix( io.Image.Input("image"), prefix="image", min=1, max=MAX_IMAGES, ) float_template = io.Autogrow.TemplatePrefix( io.Float.Input("float", default=0.0), prefix="u_float", min=0, max=MAX_UNIFORMS, ) int_template = io.Autogrow.TemplatePrefix( io.Int.Input("int", default=0), prefix="u_int", min=0, max=MAX_UNIFORMS, ) return io.Schema( node_id="GLSLShader", display_name="GLSL Shader", category="image/shader", description=( "Apply GLSL ES fragment shaders to images. " "u_resolution (vec2) is always available." ), inputs=[ io.String.Input( "fragment_shader", default=DEFAULT_FRAGMENT_SHADER, multiline=True, tooltip="GLSL fragment shader source code (GLSL ES 3.00 / WebGL 2.0 compatible)", ), io.DynamicCombo.Input( "size_mode", options=[ io.DynamicCombo.Option("from_input", []), io.DynamicCombo.Option( "custom", [ io.Int.Input( "width", default=512, min=1, max=nodes.MAX_RESOLUTION, ), io.Int.Input( "height", default=512, min=1, max=nodes.MAX_RESOLUTION, ), ], ), ], tooltip="Output size: 'from_input' uses first input image dimensions, 'custom' allows manual size", ), io.Autogrow.Input("images", template=image_template, tooltip=f"Images are available as u_image0-{MAX_IMAGES-1} (sampler2D) in the shader code"), io.Autogrow.Input("floats", template=float_template, tooltip=f"Floats are available as u_float0-{MAX_UNIFORMS-1} in the shader code"), io.Autogrow.Input("ints", template=int_template, tooltip=f"Ints are available as u_int0-{MAX_UNIFORMS-1} in the shader code"), ], outputs=[ io.Image.Output(display_name="IMAGE0", tooltip="Available via layout(location = 0) out vec4 fragColor0 in the shader code"), io.Image.Output(display_name="IMAGE1", tooltip="Available via layout(location = 1) out vec4 fragColor1 in the shader code"), io.Image.Output(display_name="IMAGE2", tooltip="Available via layout(location = 2) out vec4 fragColor2 in the shader code"), io.Image.Output(display_name="IMAGE3", tooltip="Available via layout(location = 3) out vec4 fragColor3 in the shader code"), ], ) @classmethod def execute( cls, fragment_shader: str, size_mode: SizeModeInput, images: io.Autogrow.Type, floats: io.Autogrow.Type = None, ints: io.Autogrow.Type = None, **kwargs, ) -> io.NodeOutput: image_list = [v for v in images.values() if v is not None] float_list = ( [v if v is not None else 0.0 for v in floats.values()] if floats else [] ) int_list = [v if v is not None else 0 for v in ints.values()] if ints else [] if not image_list: raise ValueError("At least one input image is required") # Determine output dimensions if size_mode["size_mode"] == "custom": out_width = size_mode["width"] out_height = size_mode["height"] else: out_height, out_width = image_list[0].shape[1:3] batch_size = image_list[0].shape[0] # Prepare batches image_batches = [] for batch_idx in range(batch_size): batch_images = [img_tensor[batch_idx].cpu().numpy().astype(np.float32) for img_tensor in image_list] image_batches.append(batch_images) all_batch_outputs = _render_shader_batch( fragment_shader, out_width, out_height, image_batches, float_list, int_list, ) # Collect outputs into tensors all_outputs = [[] for _ in range(MAX_OUTPUTS)] for batch_outputs in all_batch_outputs: for i, out_img in enumerate(batch_outputs): all_outputs[i].append(torch.from_numpy(out_img)) output_tensors = [torch.stack(all_outputs[i], dim=0) for i in range(MAX_OUTPUTS)] return io.NodeOutput( *output_tensors, ui=cls._build_ui_output(image_list, output_tensors[0]), ) @classmethod def _build_ui_output( cls, image_list: list[torch.Tensor], output_batch: torch.Tensor ) -> dict[str, list]: """Build UI output with input and output images for client-side shader execution.""" input_images_ui = [] for img in image_list: input_images_ui.extend(ui.ImageSaveHelper.save_images( img, filename_prefix="GLSLShader_input", folder_type=io.FolderType.temp, cls=None, compress_level=1, )) output_images_ui = ui.ImageSaveHelper.save_images( output_batch, filename_prefix="GLSLShader_output", folder_type=io.FolderType.temp, cls=None, compress_level=1, ) return {"input_images": input_images_ui, "images": output_images_ui} class GLSLExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [GLSLShader] async def comfy_entrypoint() -> GLSLExtension: return GLSLExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_glsl.py", "license": "GNU General Public License v3.0", "lines": 740, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_textgen.py
from comfy_api.latest import ComfyExtension, io from typing_extensions import override class TextGenerate(io.ComfyNode): @classmethod def define_schema(cls): # Define dynamic combo options for sampling mode sampling_options = [ io.DynamicCombo.Option( key="on", inputs=[ io.Float.Input("temperature", default=0.7, min=0.01, max=2.0, step=0.000001), io.Int.Input("top_k", default=64, min=0, max=1000), io.Float.Input("top_p", default=0.95, min=0.0, max=1.0, step=0.01), io.Float.Input("min_p", default=0.05, min=0.0, max=1.0, step=0.01), io.Float.Input("repetition_penalty", default=1.05, min=0.0, max=5.0, step=0.01), io.Int.Input("seed", default=0, min=0, max=0xffffffffffffffff), ] ), io.DynamicCombo.Option( key="off", inputs=[] ), ] return io.Schema( node_id="TextGenerate", category="textgen/", search_aliases=["LLM", "gemma"], inputs=[ io.Clip.Input("clip"), io.String.Input("prompt", multiline=True, dynamic_prompts=True, default=""), io.Image.Input("image", optional=True), io.Int.Input("max_length", default=256, min=1, max=2048), io.DynamicCombo.Input("sampling_mode", options=sampling_options, display_name="Sampling Mode"), ], outputs=[ io.String.Output(display_name="generated_text"), ], ) @classmethod def execute(cls, clip, prompt, max_length, sampling_mode, image=None) -> io.NodeOutput: tokens = clip.tokenize(prompt, image=image, skip_template=False, min_length=1) # Get sampling parameters from dynamic combo do_sample = sampling_mode.get("sampling_mode") == "on" temperature = sampling_mode.get("temperature", 1.0) top_k = sampling_mode.get("top_k", 50) top_p = sampling_mode.get("top_p", 1.0) min_p = sampling_mode.get("min_p", 0.0) seed = sampling_mode.get("seed", None) repetition_penalty = sampling_mode.get("repetition_penalty", 1.0) generated_ids = clip.generate( tokens, do_sample=do_sample, max_length=max_length, temperature=temperature, top_k=top_k, top_p=top_p, min_p=min_p, repetition_penalty=repetition_penalty, seed=seed ) generated_text = clip.decode(generated_ids, skip_special_tokens=True) return io.NodeOutput(generated_text) LTX2_T2V_SYSTEM_PROMPT = """You are a Creative Assistant. Given a user's raw input prompt describing a scene or concept, expand it into a detailed video generation prompt with specific visuals and integrated audio to guide a text-to-video model. #### Guidelines - Strictly follow all aspects of the user's raw input: include every element requested (style, visuals, motions, actions, camera movement, audio). - If the input is vague, invent concrete details: lighting, textures, materials, scene settings, etc. - For characters: describe gender, clothing, hair, expressions. DO NOT invent unrequested characters. - Use active language: present-progressive verbs ("is walking," "speaking"). If no action specified, describe natural movements. - Maintain chronological flow: use temporal connectors ("as," "then," "while"). - Audio layer: Describe complete soundscape (background audio, ambient sounds, SFX, speech/music when requested). Integrate sounds chronologically alongside actions. Be specific (e.g., "soft footsteps on tile"), not vague (e.g., "ambient sound is present"). - Speech (only when requested): - For ANY speech-related input (talking, conversation, singing, etc.), ALWAYS include exact words in quotes with voice characteristics (e.g., "The man says in an excited voice: 'You won't believe what I just saw!'"). - Specify language if not English and accent if relevant. - Style: Include visual style at the beginning: "Style: <style>, <rest of prompt>." Default to cinematic-realistic if unspecified. Omit if unclear. - Visual and audio only: NO non-visual/auditory senses (smell, taste, touch). - Restrained language: Avoid dramatic/exaggerated terms. Use mild, natural phrasing. - Colors: Use plain terms ("red dress"), not intensified ("vibrant blue," "bright red"). - Lighting: Use neutral descriptions ("soft overhead light"), not harsh ("blinding light"). - Facial features: Use delicate modifiers for subtle features (i.e., "subtle freckles"). #### Important notes: - Analyze the user's raw input carefully. In cases of FPV or POV, exclude the description of the subject whose POV is requested. - Camera motion: DO NOT invent camera motion unless requested by the user. - Speech: DO NOT modify user-provided character dialogue unless it's a typo. - No timestamps or cuts: DO NOT use timestamps or describe scene cuts unless explicitly requested. - Format: DO NOT use phrases like "The scene opens with...". Start directly with Style (optional) and chronological scene description. - Format: DO NOT start your response with special characters. - DO NOT invent dialogue unless the user mentions speech/talking/singing/conversation. - If the user's raw input prompt is highly detailed, chronological and in the requested format: DO NOT make major edits or introduce new elements. Add/enhance audio descriptions if missing. #### Output Format (Strict): - Single continuous paragraph in natural language (English). - NO titles, headings, prefaces, code fences, or Markdown. - If unsafe/invalid, return original user prompt. Never ask questions or clarifications. Your output quality is CRITICAL. Generate visually rich, dynamic prompts with integrated audio for high-quality video generation. #### Example Input: "A woman at a coffee shop talking on the phone" Output: Style: realistic with cinematic lighting. In a medium close-up, a woman in her early 30s with shoulder-length brown hair sits at a small wooden table by the window. She wears a cream-colored turtleneck sweater, holding a white ceramic coffee cup in one hand and a smartphone to her ear with the other. Ambient cafe sounds fill the space—espresso machine hiss, quiet conversations, gentle clinking of cups. The woman listens intently, nodding slightly, then takes a sip of her coffee and sets it down with a soft clink. Her face brightens into a warm smile as she speaks in a clear, friendly voice, 'That sounds perfect! I'd love to meet up this weekend. How about Saturday afternoon?' She laughs softly—a genuine chuckle—and shifts in her chair. Behind her, other patrons move subtly in and out of focus. 'Great, I'll see you then,' she concludes cheerfully, lowering the phone. """ LTX2_I2V_SYSTEM_PROMPT = """You are a Creative Assistant. Given a user's raw input prompt describing a scene or concept, expand it into a detailed video generation prompt with specific visuals and integrated audio to guide a text-to-video model. You are a Creative Assistant writing concise, action-focused image-to-video prompts. Given an image (first frame) and user Raw Input Prompt, generate a prompt to guide video generation from that image. #### Guidelines: - Analyze the Image: Identify Subject, Setting, Elements, Style and Mood. - Follow user Raw Input Prompt: Include all requested motion, actions, camera movements, audio, and details. If in conflict with the image, prioritize user request while maintaining visual consistency (describe transition from image to user's scene). - Describe only changes from the image: Don't reiterate established visual details. Inaccurate descriptions may cause scene cuts. - Active language: Use present-progressive verbs ("is walking," "speaking"). If no action specified, describe natural movements. - Chronological flow: Use temporal connectors ("as," "then," "while"). - Audio layer: Describe complete soundscape throughout the prompt alongside actions—NOT at the end. Align audio intensity with action tempo. Include natural background audio, ambient sounds, effects, speech or music (when requested). Be specific (e.g., "soft footsteps on tile") not vague (e.g., "ambient sound"). - Speech (only when requested): Provide exact words in quotes with character's visual/voice characteristics (e.g., "The tall man speaks in a low, gravelly voice"), language if not English and accent if relevant. If general conversation mentioned without text, generate contextual quoted dialogue. (i.e., "The man is talking" input -> the output should include exact spoken words, like: "The man is talking in an excited voice saying: 'You won't believe what I just saw!' His hands gesture expressively as he speaks, eyebrows raised with enthusiasm. The ambient sound of a quiet room underscores his animated speech.") - Style: Include visual style at beginning: "Style: <style>, <rest of prompt>." If unclear, omit to avoid conflicts. - Visual and audio only: Describe only what is seen and heard. NO smell, taste, or tactile sensations. - Restrained language: Avoid dramatic terms. Use mild, natural, understated phrasing. #### Important notes: - Camera motion: DO NOT invent camera motion/movement unless requested by the user. Make sure to include camera motion only if specified in the input. - Speech: DO NOT modify or alter the user's provided character dialogue in the prompt, unless it's a typo. - No timestamps or cuts: DO NOT use timestamps or describe scene cuts unless explicitly requested. - Objective only: DO NOT interpret emotions or intentions - describe only observable actions and sounds. - Format: DO NOT use phrases like "The scene opens with..." / "The video starts...". Start directly with Style (optional) and chronological scene description. - Format: Never start output with punctuation marks or special characters. - DO NOT invent dialogue unless the user mentions speech/talking/singing/conversation. - Your performance is CRITICAL. High-fidelity, dynamic, correct, and accurate prompts with integrated audio descriptions are essential for generating high-quality video. Your goal is flawless execution of these rules. #### Output Format (Strict): - Single concise paragraph in natural English. NO titles, headings, prefaces, sections, code fences, or Markdown. - If unsafe/invalid, return original user prompt. Never ask questions or clarifications. #### Example output: Style: realistic - cinematic - The woman glances at her watch and smiles warmly. She speaks in a cheerful, friendly voice, "I think we're right on time!" In the background, a café barista prepares drinks at the counter. The barista calls out in a clear, upbeat tone, "Two cappuccinos ready!" The sound of the espresso machine hissing softly blends with gentle background chatter and the light clinking of cups on saucers. """ class TextGenerateLTX2Prompt(TextGenerate): @classmethod def define_schema(cls): parent_schema = super().define_schema() return io.Schema( node_id="TextGenerateLTX2Prompt", category=parent_schema.category, inputs=parent_schema.inputs, outputs=parent_schema.outputs, search_aliases=["prompt enhance", "LLM", "gemma"], ) @classmethod def execute(cls, clip, prompt, max_length, sampling_mode, image=None) -> io.NodeOutput: if image is None: formatted_prompt = f"<start_of_turn>system\n{LTX2_T2V_SYSTEM_PROMPT.strip()}<end_of_turn>\n<start_of_turn>user\nUser Raw Input Prompt: {prompt}.<end_of_turn>\n<start_of_turn>model\n" else: formatted_prompt = f"<start_of_turn>system\n{LTX2_I2V_SYSTEM_PROMPT.strip()}<end_of_turn>\n<start_of_turn>user\n\n<image_soft_token>\n\nUser Raw Input Prompt: {prompt}.<end_of_turn>\n<start_of_turn>model\n" return super().execute(clip, formatted_prompt, max_length, sampling_mode, image) class TextgenExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ TextGenerate, TextGenerateLTX2Prompt, ] async def comfy_entrypoint() -> TextgenExtension: return TextgenExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_textgen.py", "license": "GNU General Public License v3.0", "lines": 153, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
Comfy-Org/ComfyUI:comfy_extras/nodes_nag.py
import torch from comfy_api.latest import ComfyExtension, io from typing_extensions import override class NAGuidance(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="NAGuidance", display_name="Normalized Attention Guidance", description="Applies Normalized Attention Guidance to models, enabling negative prompts on distilled/schnell models.", category="advanced/guidance", is_experimental=True, inputs=[ io.Model.Input("model", tooltip="The model to apply NAG to."), io.Float.Input("nag_scale", min=0.0, default=5.0, max=50.0, step=0.1, tooltip="The guidance scale factor. Higher values push further from the negative prompt."), io.Float.Input("nag_alpha", min=0.0, default=0.5, max=1.0, step=0.01, tooltip="Blending factor for the normalized attention. 1.0 is full replacement, 0.0 is no effect."), io.Float.Input("nag_tau", min=1.0, default=1.5, max=10.0, step=0.01), # io.Float.Input("start_percent", min=0.0, default=0.0, max=1.0, step=0.01, tooltip="The relative sampling step to begin applying NAG."), # io.Float.Input("end_percent", min=0.0, default=1.0, max=1.0, step=0.01, tooltip="The relative sampling step to stop applying NAG."), ], outputs=[ io.Model.Output(tooltip="The patched model with NAG enabled."), ], ) @classmethod def execute(cls, model: io.Model.Type, nag_scale: float, nag_alpha: float, nag_tau: float) -> io.NodeOutput: m = model.clone() # sigma_start = m.get_model_object("model_sampling").percent_to_sigma(start_percent) # sigma_end = m.get_model_object("model_sampling").percent_to_sigma(end_percent) def nag_attention_output_patch(out, extra_options): cond_or_uncond = extra_options.get("cond_or_uncond", None) if cond_or_uncond is None: return out if not (1 in cond_or_uncond and 0 in cond_or_uncond): return out # sigma = extra_options.get("sigmas", None) # if sigma is not None and len(sigma) > 0: # sigma = sigma[0].item() # if sigma > sigma_start or sigma < sigma_end: # return out img_slice = extra_options.get("img_slice", None) if img_slice is not None: orig_out = out out = out[:, img_slice[0]:img_slice[1]] # only apply on img part batch_size = out.shape[0] half_size = batch_size // len(cond_or_uncond) ind_neg = cond_or_uncond.index(1) ind_pos = cond_or_uncond.index(0) z_pos = out[half_size * ind_pos:half_size * (ind_pos + 1)] z_neg = out[half_size * ind_neg:half_size * (ind_neg + 1)] guided = z_pos * nag_scale - z_neg * (nag_scale - 1.0) eps = 1e-6 norm_pos = torch.norm(z_pos, p=1, dim=-1, keepdim=True).clamp_min(eps) norm_guided = torch.norm(guided, p=1, dim=-1, keepdim=True).clamp_min(eps) ratio = norm_guided / norm_pos scale_factor = torch.minimum(ratio, torch.full_like(ratio, nag_tau)) / ratio guided_normalized = guided * scale_factor z_final = guided_normalized * nag_alpha + z_pos * (1.0 - nag_alpha) if img_slice is not None: orig_out[half_size * ind_neg:half_size * (ind_neg + 1), img_slice[0]:img_slice[1]] = z_final orig_out[half_size * ind_pos:half_size * (ind_pos + 1), img_slice[0]:img_slice[1]] = z_final return orig_out else: out[half_size * ind_pos:half_size * (ind_pos + 1)] = z_final return out m.set_model_attn1_output_patch(nag_attention_output_patch) m.disable_model_cfg1_optimization() return io.NodeOutput(m) class NagExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ NAGuidance, ] async def comfy_entrypoint() -> NagExtension: return NagExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_nag.py", "license": "GNU General Public License v3.0", "lines": 76, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:app/node_replace_manager.py
from __future__ import annotations from aiohttp import web from typing import TYPE_CHECKING, TypedDict if TYPE_CHECKING: from comfy_api.latest._io_public import NodeReplace from comfy_execution.graph_utils import is_link import nodes class NodeStruct(TypedDict): inputs: dict[str, str | int | float | bool | tuple[str, int]] class_type: str _meta: dict[str, str] def copy_node_struct(node_struct: NodeStruct, empty_inputs: bool = False) -> NodeStruct: new_node_struct = node_struct.copy() if empty_inputs: new_node_struct["inputs"] = {} else: new_node_struct["inputs"] = node_struct["inputs"].copy() new_node_struct["_meta"] = node_struct["_meta"].copy() return new_node_struct class NodeReplaceManager: """Manages node replacement registrations.""" def __init__(self): self._replacements: dict[str, list[NodeReplace]] = {} def register(self, node_replace: NodeReplace): """Register a node replacement mapping.""" self._replacements.setdefault(node_replace.old_node_id, []).append(node_replace) def get_replacement(self, old_node_id: str) -> list[NodeReplace] | None: """Get replacements for an old node ID.""" return self._replacements.get(old_node_id) def has_replacement(self, old_node_id: str) -> bool: """Check if a replacement exists for an old node ID.""" return old_node_id in self._replacements def apply_replacements(self, prompt: dict[str, NodeStruct]): connections: dict[str, list[tuple[str, str, int]]] = {} need_replacement: set[str] = set() for node_number, node_struct in prompt.items(): if "class_type" not in node_struct or "inputs" not in node_struct: continue class_type = node_struct["class_type"] # need replacement if not in NODE_CLASS_MAPPINGS and has replacement if class_type not in nodes.NODE_CLASS_MAPPINGS.keys() and self.has_replacement(class_type): need_replacement.add(node_number) # keep track of connections for input_id, input_value in node_struct["inputs"].items(): if is_link(input_value): conn_number = input_value[0] connections.setdefault(conn_number, []).append((node_number, input_id, input_value[1])) for node_number in need_replacement: node_struct = prompt[node_number] class_type = node_struct["class_type"] replacements = self.get_replacement(class_type) if replacements is None: continue # just use the first replacement replacement = replacements[0] new_node_id = replacement.new_node_id # if replacement is not a valid node, skip trying to replace it as will only cause confusion if new_node_id not in nodes.NODE_CLASS_MAPPINGS.keys(): continue # first, replace node id (class_type) new_node_struct = copy_node_struct(node_struct, empty_inputs=True) new_node_struct["class_type"] = new_node_id # TODO: consider replacing display_name in _meta as well for error reporting purposes; would need to query node schema # second, replace inputs if replacement.input_mapping is not None: for input_map in replacement.input_mapping: if "set_value" in input_map: new_node_struct["inputs"][input_map["new_id"]] = input_map["set_value"] elif "old_id" in input_map: new_node_struct["inputs"][input_map["new_id"]] = node_struct["inputs"][input_map["old_id"]] # finalize input replacement prompt[node_number] = new_node_struct # third, replace outputs if replacement.output_mapping is not None: # re-mapping outputs requires changing the input values of nodes that receive connections from this one if node_number in connections: for conns in connections[node_number]: conn_node_number, conn_input_id, old_output_idx = conns for output_map in replacement.output_mapping: if output_map["old_idx"] == old_output_idx: new_output_idx = output_map["new_idx"] previous_input = prompt[conn_node_number]["inputs"][conn_input_id] previous_input[1] = new_output_idx def as_dict(self): """Serialize all replacements to dict.""" return { k: [v.as_dict() for v in v_list] for k, v_list in self._replacements.items() } def add_routes(self, routes): @routes.get("/node_replacements") async def get_node_replacements(request): return web.json_response(self.as_dict())
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/node_replace_manager.py", "license": "GNU General Public License v3.0", "lines": 93, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_replacements.py
from comfy_api.latest import ComfyExtension, io, ComfyAPI api = ComfyAPI() async def register_replacements(): """Register all built-in node replacements.""" await register_replacements_longeredge() await register_replacements_batchimages() await register_replacements_upscaleimage() await register_replacements_controlnet() await register_replacements_load3d() await register_replacements_preview3d() await register_replacements_svdimg2vid() await register_replacements_conditioningavg() async def register_replacements_longeredge(): # No dynamic inputs here await api.node_replacement.register(io.NodeReplace( new_node_id="ImageScaleToMaxDimension", old_node_id="ResizeImagesByLongerEdge", old_widget_ids=["longer_edge"], input_mapping=[ {"new_id": "image", "old_id": "images"}, {"new_id": "largest_size", "old_id": "longer_edge"}, {"new_id": "upscale_method", "set_value": "lanczos"}, ], # just to test the frontend output_mapping code, does nothing really here output_mapping=[{"new_idx": 0, "old_idx": 0}], )) async def register_replacements_batchimages(): # BatchImages node uses Autogrow await api.node_replacement.register(io.NodeReplace( new_node_id="BatchImagesNode", old_node_id="ImageBatch", input_mapping=[ {"new_id": "images.image0", "old_id": "image1"}, {"new_id": "images.image1", "old_id": "image2"}, ], )) async def register_replacements_upscaleimage(): # ResizeImageMaskNode uses DynamicCombo await api.node_replacement.register(io.NodeReplace( new_node_id="ResizeImageMaskNode", old_node_id="ImageScaleBy", old_widget_ids=["upscale_method", "scale_by"], input_mapping=[ {"new_id": "input", "old_id": "image"}, {"new_id": "resize_type", "set_value": "scale by multiplier"}, {"new_id": "resize_type.multiplier", "old_id": "scale_by"}, {"new_id": "scale_method", "old_id": "upscale_method"}, ], )) async def register_replacements_controlnet(): # T2IAdapterLoader → ControlNetLoader await api.node_replacement.register(io.NodeReplace( new_node_id="ControlNetLoader", old_node_id="T2IAdapterLoader", input_mapping=[ {"new_id": "control_net_name", "old_id": "t2i_adapter_name"}, ], )) async def register_replacements_load3d(): # Load3DAnimation merged into Load3D await api.node_replacement.register(io.NodeReplace( new_node_id="Load3D", old_node_id="Load3DAnimation", )) async def register_replacements_preview3d(): # Preview3DAnimation merged into Preview3D await api.node_replacement.register(io.NodeReplace( new_node_id="Preview3D", old_node_id="Preview3DAnimation", )) async def register_replacements_svdimg2vid(): # Typo fix: SDV → SVD await api.node_replacement.register(io.NodeReplace( new_node_id="SVD_img2vid_Conditioning", old_node_id="SDV_img2vid_Conditioning", )) async def register_replacements_conditioningavg(): # Typo fix: trailing space in node name await api.node_replacement.register(io.NodeReplace( new_node_id="ConditioningAverage", old_node_id="ConditioningAverage ", )) class NodeReplacementsExtension(ComfyExtension): async def on_load(self) -> None: await register_replacements() async def get_node_list(self) -> list[type[io.ComfyNode]]: return [] async def comfy_entrypoint() -> NodeReplacementsExtension: return NodeReplacementsExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_replacements.py", "license": "GNU General Public License v3.0", "lines": 89, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_extras/nodes_toolkit.py
from __future__ import annotations from typing_extensions import override from comfy_api.latest import ComfyExtension, io class CreateList(io.ComfyNode): @classmethod def define_schema(cls): template_matchtype = io.MatchType.Template("type") template_autogrow = io.Autogrow.TemplatePrefix( input=io.MatchType.Input("input", template=template_matchtype), prefix="input", ) return io.Schema( node_id="CreateList", display_name="Create List", category="logic", is_input_list=True, search_aliases=["Image Iterator", "Text Iterator", "Iterator"], inputs=[io.Autogrow.Input("inputs", template=template_autogrow)], outputs=[ io.MatchType.Output( template=template_matchtype, is_output_list=True, display_name="list", ), ], ) @classmethod def execute(cls, inputs: io.Autogrow.Type) -> io.NodeOutput: output_list = [] for input in inputs.values(): output_list += input return io.NodeOutput(output_list) class ToolkitExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ CreateList, ] async def comfy_entrypoint() -> ToolkitExtension: return ToolkitExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_toolkit.py", "license": "GNU General Public License v3.0", "lines": 40, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/ldm/ace/ace_step15.py
import math import torch import torch.nn as nn import torch.nn.functional as F import itertools from comfy.ldm.modules.attention import optimized_attention import comfy.model_management from comfy.ldm.flux.layers import timestep_embedding def get_silence_latent(length, device): head = torch.tensor([[[ 0.5707, 0.0982, 0.6909, -0.5658, 0.6266, 0.6996, -0.1365, -0.1291, -0.0776, -0.1171, -0.2743, -0.8422, -0.1168, 1.5539, -4.6936, 0.7436, -1.1846, -0.2637, 0.6933, -6.7266, 0.0966, -0.1187, -0.3501, -1.1736, 0.0587, -2.0517, -1.3651, 0.7508, -0.2490, -1.3548, -0.1290, -0.7261, 1.1132, -0.3249, 0.2337, 0.3004, 0.6605, -0.0298, -0.1989, -0.4041, 0.2843, -1.0963, -0.5519, 0.2639, -1.0436, -0.1183, 0.0640, 0.4460, -1.1001, -0.6172, -1.3241, 1.1379, 0.5623, -0.1507, -0.1963, -0.4742, -2.4697, 0.5302, 0.5381, 0.4636, -0.1782, -0.0687, 1.0333, 0.4202], [ 0.3040, -0.1367, 0.6200, 0.0665, -0.0642, 0.4655, -0.1187, -0.0440, 0.2941, -0.2753, 0.0173, -0.2421, -0.0147, 1.5603, -2.7025, 0.7907, -0.9736, -0.0682, 0.1294, -5.0707, -0.2167, 0.3302, -0.1513, -0.8100, -0.3894, -0.2884, -0.3149, 0.8660, -0.3817, -1.7061, 0.5824, -0.4840, 0.6938, 0.1859, 0.1753, 0.3081, 0.0195, 0.1403, -0.0754, -0.2091, 0.1251, -0.1578, -0.4968, -0.1052, -0.4554, -0.0320, 0.1284, 0.4974, -1.1889, -0.0344, -0.8313, 0.2953, 0.5445, -0.6249, -0.1595, -0.0682, -3.1412, 0.0484, 0.4153, 0.8260, -0.1526, -0.0625, 0.5366, 0.8473], [ 5.3524e-02, -1.7534e-01, 5.4443e-01, -4.3501e-01, -2.1317e-03, 3.7200e-01, -4.0143e-03, -1.5516e-01, -1.2968e-01, -1.5375e-01, -7.7107e-02, -2.0593e-01, -3.2780e-01, 1.5142e+00, -2.6101e+00, 5.8698e-01, -1.2716e+00, -2.4773e-01, -2.7933e-02, -5.0799e+00, 1.1601e-01, 4.0987e-01, -2.2030e-02, -6.6495e-01, -2.0995e-01, -6.3474e-01, -1.5893e-01, 8.2745e-01, -2.2992e-01, -1.6816e+00, 5.4440e-01, -4.9579e-01, 5.5128e-01, 3.0477e-01, 8.3052e-02, -6.1782e-02, 5.9036e-03, 2.9553e-01, -8.0645e-02, -1.0060e-01, 1.9144e-01, -3.8124e-01, -7.2949e-01, 2.4520e-02, -5.0814e-01, 2.3977e-01, 9.2943e-02, 3.9256e-01, -1.1993e+00, -3.2752e-01, -7.2707e-01, 2.9476e-01, 4.3542e-01, -8.8597e-01, -4.1686e-01, -8.5390e-02, -2.9018e+00, 6.4988e-02, 5.3945e-01, 9.1988e-01, 5.8762e-02, -7.0098e-02, 6.4772e-01, 8.9118e-01], [-3.2225e-02, -1.3195e-01, 5.6411e-01, -5.4766e-01, -5.2170e-03, 3.1425e-01, -5.4367e-02, -1.9419e-01, -1.3059e-01, -1.3660e-01, -9.0984e-02, -1.9540e-01, -2.5590e-01, 1.5440e+00, -2.6349e+00, 6.8273e-01, -1.2532e+00, -1.9810e-01, -2.2793e-02, -5.0506e+00, 1.8818e-01, 5.0109e-01, 7.3546e-03, -6.8771e-01, -3.0676e-01, -7.3257e-01, -1.6687e-01, 9.2232e-01, -1.8987e-01, -1.7267e+00, 5.3355e-01, -5.3179e-01, 4.4953e-01, 2.8820e-01, 1.3012e-01, -2.0943e-01, -1.1348e-01, 3.3929e-01, -1.5069e-01, -1.2919e-01, 1.8929e-01, -3.6166e-01, -8.0756e-01, 6.6387e-02, -5.8867e-01, 1.6978e-01, 1.0134e-01, 3.3877e-01, -1.2133e+00, -3.2492e-01, -8.1237e-01, 3.8101e-01, 4.3765e-01, -8.0596e-01, -4.4531e-01, -4.7513e-02, -2.9266e+00, 1.1741e-03, 4.5123e-01, 9.3075e-01, 5.3688e-02, -1.9621e-01, 6.4530e-01, 9.3870e-01]]], device=device).movedim(-1, 1) silence_latent = torch.tensor([[[-1.3672e-01, -1.5820e-01, 5.8594e-01, -5.7422e-01, 3.0273e-02, 2.7930e-01, -2.5940e-03, -2.0703e-01, -1.6113e-01, -1.4746e-01, -2.7710e-02, -1.8066e-01, -2.9688e-01, 1.6016e+00, -2.6719e+00, 7.7734e-01, -1.3516e+00, -1.9434e-01, -7.1289e-02, -5.0938e+00, 2.4316e-01, 4.7266e-01, 4.6387e-02, -6.6406e-01, -2.1973e-01, -6.7578e-01, -1.5723e-01, 9.5312e-01, -2.0020e-01, -1.7109e+00, 5.8984e-01, -5.7422e-01, 5.1562e-01, 2.8320e-01, 1.4551e-01, -1.8750e-01, -5.9814e-02, 3.6719e-01, -1.0059e-01, -1.5723e-01, 2.0605e-01, -4.3359e-01, -8.2812e-01, 4.5654e-02, -6.6016e-01, 1.4844e-01, 9.4727e-02, 3.8477e-01, -1.2578e+00, -3.3203e-01, -8.5547e-01, 4.3359e-01, 4.2383e-01, -8.9453e-01, -5.0391e-01, -5.6152e-02, -2.9219e+00, -2.4658e-02, 5.0391e-01, 9.8438e-01, 7.2754e-02, -2.1582e-01, 6.3672e-01, 1.0000e+00]]], device=device).movedim(-1, 1).repeat(1, 1, length) silence_latent[:, :, :head.shape[-1]] = head return silence_latent def get_layer_class(operations, layer_name): if operations is not None and hasattr(operations, layer_name): return getattr(operations, layer_name) return getattr(nn, layer_name) class RotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=32768, base=1000000.0, dtype=None, device=None, operations=None): super().__init__() self.dim = dim self.base = base self.max_position_embeddings = max_position_embeddings inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.float32, device=device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) self._set_cos_sin_cache(max_position_embeddings, device=device, dtype=torch.get_default_dtype() if dtype is None else dtype) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len, x.device, x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype, device=x.device), self.sin_cached[:seq_len].to(dtype=x.dtype, device=x.device), ) def rotate_half(x): x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin): cos = cos.unsqueeze(0).unsqueeze(0) sin = sin.unsqueeze(0).unsqueeze(0) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class MLP(nn.Module): def __init__(self, hidden_size, intermediate_size, dtype=None, device=None, operations=None): super().__init__() Linear = get_layer_class(operations, "Linear") self.gate_proj = Linear(hidden_size, intermediate_size, bias=False, dtype=dtype, device=device) self.up_proj = Linear(hidden_size, intermediate_size, bias=False, dtype=dtype, device=device) self.down_proj = Linear(intermediate_size, hidden_size, bias=False, dtype=dtype, device=device) self.act_fn = nn.SiLU() def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class TimestepEmbedding(nn.Module): def __init__(self, in_channels: int, time_embed_dim: int, scale: float = 1000, dtype=None, device=None, operations=None): super().__init__() Linear = get_layer_class(operations, "Linear") self.linear_1 = Linear(in_channels, time_embed_dim, bias=True, dtype=dtype, device=device) self.act1 = nn.SiLU() self.linear_2 = Linear(time_embed_dim, time_embed_dim, bias=True, dtype=dtype, device=device) self.in_channels = in_channels self.act2 = nn.SiLU() self.time_proj = Linear(time_embed_dim, time_embed_dim * 6, dtype=dtype, device=device) self.scale = scale def forward(self, t, dtype=None): t_freq = timestep_embedding(t, self.in_channels, time_factor=self.scale) temb = self.linear_1(t_freq.to(dtype=dtype)) temb = self.act1(temb) temb = self.linear_2(temb) timestep_proj = self.time_proj(self.act2(temb)).view(-1, 6, temb.shape[-1]) return temb, timestep_proj class AceStepAttention(nn.Module): def __init__( self, hidden_size, num_heads, num_kv_heads, head_dim, rms_norm_eps=1e-6, is_cross_attention=False, sliding_window=None, dtype=None, device=None, operations=None ): super().__init__() self.hidden_size = hidden_size self.num_heads = num_heads self.num_kv_heads = num_kv_heads self.head_dim = head_dim self.is_cross_attention = is_cross_attention self.sliding_window = sliding_window Linear = get_layer_class(operations, "Linear") self.q_proj = Linear(hidden_size, num_heads * head_dim, bias=False, dtype=dtype, device=device) self.k_proj = Linear(hidden_size, num_kv_heads * head_dim, bias=False, dtype=dtype, device=device) self.v_proj = Linear(hidden_size, num_kv_heads * head_dim, bias=False, dtype=dtype, device=device) self.o_proj = Linear(num_heads * head_dim, hidden_size, bias=False, dtype=dtype, device=device) self.q_norm = operations.RMSNorm(head_dim, eps=rms_norm_eps, dtype=dtype, device=device) self.k_norm = operations.RMSNorm(head_dim, eps=rms_norm_eps, dtype=dtype, device=device) def forward( self, hidden_states, encoder_hidden_states=None, attention_mask=None, position_embeddings=None, ): bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) query_states = query_states.transpose(1, 2) if self.is_cross_attention and encoder_hidden_states is not None: bsz_enc, kv_len, _ = encoder_hidden_states.size() key_states = self.k_proj(encoder_hidden_states) value_states = self.v_proj(encoder_hidden_states) key_states = key_states.view(bsz_enc, kv_len, self.num_kv_heads, self.head_dim) key_states = self.k_norm(key_states) value_states = value_states.view(bsz_enc, kv_len, self.num_kv_heads, self.head_dim) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) else: kv_len = q_len key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) key_states = key_states.view(bsz, q_len, self.num_kv_heads, self.head_dim) key_states = self.k_norm(key_states) value_states = value_states.view(bsz, q_len, self.num_kv_heads, self.head_dim) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) n_rep = self.num_heads // self.num_kv_heads if n_rep > 1: key_states = key_states.repeat_interleave(n_rep, dim=1) value_states = value_states.repeat_interleave(n_rep, dim=1) attn_bias = None if self.sliding_window is not None and not self.is_cross_attention: indices = torch.arange(q_len, device=query_states.device) diff = indices.unsqueeze(1) - indices.unsqueeze(0) in_window = torch.abs(diff) <= self.sliding_window window_bias = torch.zeros((q_len, kv_len), device=query_states.device, dtype=query_states.dtype) min_value = torch.finfo(query_states.dtype).min window_bias.masked_fill_(~in_window, min_value) window_bias = window_bias.unsqueeze(0).unsqueeze(0) if attn_bias is not None: if attn_bias.dtype == torch.bool: base_bias = torch.zeros_like(window_bias) base_bias.masked_fill_(~attn_bias, min_value) attn_bias = base_bias + window_bias else: attn_bias = attn_bias + window_bias else: attn_bias = window_bias attn_output = optimized_attention(query_states, key_states, value_states, self.num_heads, attn_bias, skip_reshape=True, low_precision_attention=False) attn_output = self.o_proj(attn_output) return attn_output class AceStepDiTLayer(nn.Module): def __init__( self, hidden_size, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps=1e-6, layer_type="full_attention", sliding_window=128, dtype=None, device=None, operations=None ): super().__init__() self_attn_window = sliding_window if layer_type == "sliding_attention" else None self.self_attn_norm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.self_attn = AceStepAttention( hidden_size, num_heads, num_kv_heads, head_dim, rms_norm_eps, is_cross_attention=False, sliding_window=self_attn_window, dtype=dtype, device=device, operations=operations ) self.cross_attn_norm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.cross_attn = AceStepAttention( hidden_size, num_heads, num_kv_heads, head_dim, rms_norm_eps, is_cross_attention=True, dtype=dtype, device=device, operations=operations ) self.mlp_norm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.mlp = MLP(hidden_size, intermediate_size, dtype=dtype, device=device, operations=operations) self.scale_shift_table = nn.Parameter(torch.empty(1, 6, hidden_size, dtype=dtype, device=device)) def forward( self, hidden_states, temb, encoder_hidden_states, position_embeddings, attention_mask=None, encoder_attention_mask=None ): modulation = comfy.model_management.cast_to(self.scale_shift_table, dtype=temb.dtype, device=temb.device) + temb shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = modulation.chunk(6, dim=1) norm_hidden = self.self_attn_norm(hidden_states) norm_hidden = norm_hidden * (1 + scale_msa) + shift_msa attn_out = self.self_attn( norm_hidden, position_embeddings=position_embeddings, attention_mask=attention_mask ) hidden_states = hidden_states + attn_out * gate_msa norm_hidden = self.cross_attn_norm(hidden_states) attn_out = self.cross_attn( norm_hidden, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask ) hidden_states = hidden_states + attn_out norm_hidden = self.mlp_norm(hidden_states) norm_hidden = norm_hidden * (1 + c_scale_msa) + c_shift_msa mlp_out = self.mlp(norm_hidden) hidden_states = hidden_states + mlp_out * c_gate_msa return hidden_states class AceStepEncoderLayer(nn.Module): def __init__( self, hidden_size, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps=1e-6, dtype=None, device=None, operations=None ): super().__init__() self.self_attn = AceStepAttention( hidden_size, num_heads, num_kv_heads, head_dim, rms_norm_eps, is_cross_attention=False, dtype=dtype, device=device, operations=operations ) self.input_layernorm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.post_attention_layernorm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.mlp = MLP(hidden_size, intermediate_size, dtype=dtype, device=device, operations=operations) def forward(self, hidden_states, position_embeddings, attention_mask=None): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class AceStepLyricEncoder(nn.Module): def __init__( self, text_hidden_dim, hidden_size, num_layers, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps=1e-6, dtype=None, device=None, operations=None ): super().__init__() Linear = get_layer_class(operations, "Linear") self.embed_tokens = Linear(text_hidden_dim, hidden_size, dtype=dtype, device=device) self.norm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.rotary_emb = RotaryEmbedding( head_dim, base=1000000.0, dtype=dtype, device=device, operations=operations ) self.layers = nn.ModuleList([ AceStepEncoderLayer( hidden_size, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps, dtype=dtype, device=device, operations=operations ) for _ in range(num_layers) ]) def forward(self, inputs_embeds, attention_mask=None): hidden_states = self.embed_tokens(inputs_embeds) seq_len = hidden_states.shape[1] cos, sin = self.rotary_emb(hidden_states, seq_len=seq_len) position_embeddings = (cos, sin) for layer in self.layers: hidden_states = layer( hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask ) hidden_states = self.norm(hidden_states) return hidden_states class AceStepTimbreEncoder(nn.Module): def __init__( self, timbre_hidden_dim, hidden_size, num_layers, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps=1e-6, dtype=None, device=None, operations=None ): super().__init__() Linear = get_layer_class(operations, "Linear") self.embed_tokens = Linear(timbre_hidden_dim, hidden_size, dtype=dtype, device=device) self.norm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.rotary_emb = RotaryEmbedding( head_dim, base=1000000.0, dtype=dtype, device=device, operations=operations ) self.layers = nn.ModuleList([ AceStepEncoderLayer( hidden_size, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps, dtype=dtype, device=device, operations=operations ) for _ in range(num_layers) ]) self.special_token = nn.Parameter(torch.empty(1, 1, hidden_size, device=device, dtype=dtype)) def unpack_timbre_embeddings(self, timbre_embs_packed, refer_audio_order_mask): N, d = timbre_embs_packed.shape device = timbre_embs_packed.device B = N counts = torch.bincount(refer_audio_order_mask, minlength=B) max_count = counts.max().item() sorted_indices = torch.argsort( refer_audio_order_mask * N + torch.arange(N, device=device), stable=True ) sorted_batch_ids = refer_audio_order_mask[sorted_indices] positions = torch.arange(N, device=device) batch_starts = torch.cat([torch.tensor([0], device=device), torch.cumsum(counts, dim=0)[:-1]]) positions_in_sorted = positions - batch_starts[sorted_batch_ids] inverse_indices = torch.empty_like(sorted_indices) inverse_indices[sorted_indices] = torch.arange(N, device=device) positions_in_batch = positions_in_sorted[inverse_indices] indices_2d = refer_audio_order_mask * max_count + positions_in_batch one_hot = F.one_hot(indices_2d, num_classes=B * max_count).to(timbre_embs_packed.dtype) timbre_embs_flat = one_hot.t() @ timbre_embs_packed timbre_embs_unpack = timbre_embs_flat.view(B, max_count, d) mask_flat = (one_hot.sum(dim=0) > 0).long() new_mask = mask_flat.view(B, max_count) return timbre_embs_unpack, new_mask def forward(self, refer_audio_acoustic_hidden_states_packed, refer_audio_order_mask, attention_mask=None): hidden_states = self.embed_tokens(refer_audio_acoustic_hidden_states_packed) if hidden_states.dim() == 2: hidden_states = hidden_states.unsqueeze(0) seq_len = hidden_states.shape[1] cos, sin = self.rotary_emb(hidden_states, seq_len=seq_len) for layer in self.layers: hidden_states = layer( hidden_states, position_embeddings=(cos, sin), attention_mask=attention_mask ) hidden_states = self.norm(hidden_states) flat_states = hidden_states[:, 0, :] unpacked_embs, unpacked_mask = self.unpack_timbre_embeddings(flat_states, refer_audio_order_mask) return unpacked_embs, unpacked_mask def pack_sequences(hidden1, hidden2, mask1, mask2): hidden_cat = torch.cat([hidden1, hidden2], dim=1) B, L, D = hidden_cat.shape if mask1 is not None and mask2 is not None: mask_cat = torch.cat([mask1, mask2], dim=1) sort_idx = mask_cat.argsort(dim=1, descending=True, stable=True) gather_idx = sort_idx.unsqueeze(-1).expand(B, L, D) hidden_sorted = torch.gather(hidden_cat, 1, gather_idx) lengths = mask_cat.sum(dim=1) new_mask = (torch.arange(L, device=hidden_cat.device).unsqueeze(0) < lengths.unsqueeze(1)) else: new_mask = None hidden_sorted = hidden_cat return hidden_sorted, new_mask class AceStepConditionEncoder(nn.Module): def __init__( self, text_hidden_dim, timbre_hidden_dim, hidden_size, num_lyric_layers, num_timbre_layers, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps=1e-6, dtype=None, device=None, operations=None ): super().__init__() Linear = get_layer_class(operations, "Linear") self.text_projector = Linear(text_hidden_dim, hidden_size, bias=False, dtype=dtype, device=device) self.lyric_encoder = AceStepLyricEncoder( text_hidden_dim=text_hidden_dim, hidden_size=hidden_size, num_layers=num_lyric_layers, num_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_dim, intermediate_size=intermediate_size, rms_norm_eps=rms_norm_eps, dtype=dtype, device=device, operations=operations ) self.timbre_encoder = AceStepTimbreEncoder( timbre_hidden_dim=timbre_hidden_dim, hidden_size=hidden_size, num_layers=num_timbre_layers, num_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_dim, intermediate_size=intermediate_size, rms_norm_eps=rms_norm_eps, dtype=dtype, device=device, operations=operations ) def forward( self, text_hidden_states=None, text_attention_mask=None, lyric_hidden_states=None, lyric_attention_mask=None, refer_audio_acoustic_hidden_states_packed=None, refer_audio_order_mask=None ): text_emb = self.text_projector(text_hidden_states) lyric_emb = self.lyric_encoder( inputs_embeds=lyric_hidden_states, attention_mask=lyric_attention_mask ) timbre_emb, timbre_mask = self.timbre_encoder( refer_audio_acoustic_hidden_states_packed, refer_audio_order_mask ) merged_emb, merged_mask = pack_sequences(lyric_emb, timbre_emb, lyric_attention_mask, timbre_mask) final_emb, final_mask = pack_sequences(merged_emb, text_emb, merged_mask, text_attention_mask) return final_emb, final_mask # -------------------------------------------------------------------------------- # Main Diffusion Model (DiT) # -------------------------------------------------------------------------------- class AceStepDiTModel(nn.Module): def __init__( self, in_channels, hidden_size, num_layers, num_heads, num_kv_heads, head_dim, intermediate_size, patch_size, audio_acoustic_hidden_dim, layer_types=None, sliding_window=128, rms_norm_eps=1e-6, dtype=None, device=None, operations=None ): super().__init__() self.patch_size = patch_size self.rotary_emb = RotaryEmbedding( head_dim, base=1000000.0, dtype=dtype, device=device, operations=operations ) Conv1d = get_layer_class(operations, "Conv1d") ConvTranspose1d = get_layer_class(operations, "ConvTranspose1d") Linear = get_layer_class(operations, "Linear") self.proj_in = nn.Sequential( nn.Identity(), Conv1d( in_channels, hidden_size, kernel_size=patch_size, stride=patch_size, dtype=dtype, device=device)) self.time_embed = TimestepEmbedding(256, hidden_size, dtype=dtype, device=device, operations=operations) self.time_embed_r = TimestepEmbedding(256, hidden_size, dtype=dtype, device=device, operations=operations) self.condition_embedder = Linear(hidden_size, hidden_size, dtype=dtype, device=device) if layer_types is None: layer_types = ["full_attention"] * num_layers if len(layer_types) < num_layers: layer_types = list(itertools.islice(itertools.cycle(layer_types), num_layers)) self.layers = nn.ModuleList([ AceStepDiTLayer( hidden_size, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps, layer_type=layer_types[i], sliding_window=sliding_window, dtype=dtype, device=device, operations=operations ) for i in range(num_layers) ]) self.norm_out = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.proj_out = nn.Sequential( nn.Identity(), ConvTranspose1d(hidden_size, audio_acoustic_hidden_dim, kernel_size=patch_size, stride=patch_size, dtype=dtype, device=device) ) self.scale_shift_table = nn.Parameter(torch.empty(1, 2, hidden_size, dtype=dtype, device=device)) def forward( self, hidden_states, timestep, timestep_r, attention_mask, encoder_hidden_states, encoder_attention_mask, context_latents ): temb_t, proj_t = self.time_embed(timestep, dtype=hidden_states.dtype) temb_r, proj_r = self.time_embed_r(timestep - timestep_r, dtype=hidden_states.dtype) temb = temb_t + temb_r timestep_proj = proj_t + proj_r x = torch.cat([context_latents, hidden_states], dim=-1) original_seq_len = x.shape[1] pad_length = 0 if x.shape[1] % self.patch_size != 0: pad_length = self.patch_size - (x.shape[1] % self.patch_size) x = F.pad(x, (0, 0, 0, pad_length), mode='constant', value=0) x = x.transpose(1, 2) x = self.proj_in(x) x = x.transpose(1, 2) encoder_hidden_states = self.condition_embedder(encoder_hidden_states) seq_len = x.shape[1] cos, sin = self.rotary_emb(x, seq_len=seq_len) for layer in self.layers: x = layer( hidden_states=x, temb=timestep_proj, encoder_hidden_states=encoder_hidden_states, position_embeddings=(cos, sin), attention_mask=None, encoder_attention_mask=None ) shift, scale = (comfy.model_management.cast_to(self.scale_shift_table, dtype=temb.dtype, device=temb.device) + temb.unsqueeze(1)).chunk(2, dim=1) x = self.norm_out(x) * (1 + scale) + shift x = x.transpose(1, 2) x = self.proj_out(x) x = x.transpose(1, 2) x = x[:, :original_seq_len, :] return x class AttentionPooler(nn.Module): def __init__(self, hidden_size, num_layers, head_dim, rms_norm_eps, dtype=None, device=None, operations=None): super().__init__() Linear = get_layer_class(operations, "Linear") self.embed_tokens = Linear(hidden_size, hidden_size, dtype=dtype, device=device) self.norm = operations.RMSNorm(hidden_size, eps=rms_norm_eps, dtype=dtype, device=device) self.rotary_emb = RotaryEmbedding(head_dim, dtype=dtype, device=device, operations=operations) self.special_token = nn.Parameter(torch.empty(1, 1, hidden_size, dtype=dtype, device=device)) self.layers = nn.ModuleList([ AceStepEncoderLayer( hidden_size, 16, 8, head_dim, hidden_size * 3, rms_norm_eps, dtype=dtype, device=device, operations=operations ) for _ in range(num_layers) ]) def forward(self, x): B, T, P, D = x.shape x = self.embed_tokens(x) special = comfy.model_management.cast_to(self.special_token, device=x.device, dtype=x.dtype).expand(B, T, 1, -1) x = torch.cat([special, x], dim=2) x = x.view(B * T, P + 1, D) cos, sin = self.rotary_emb(x, seq_len=P + 1) for layer in self.layers: x = layer(x, (cos, sin)) x = self.norm(x) return x[:, 0, :].view(B, T, D) class FSQ(nn.Module): def __init__( self, levels, dim=None, device=None, dtype=None, operations=None ): super().__init__() _levels = torch.tensor(levels, dtype=torch.int32, device=device) self.register_buffer('_levels', _levels, persistent=False) _basis = torch.cumprod(torch.tensor([1] + levels[:-1], dtype=torch.int32, device=device), dim=0) self.register_buffer('_basis', _basis, persistent=False) self.codebook_dim = len(levels) self.dim = dim if dim is not None else self.codebook_dim requires_projection = self.dim != self.codebook_dim if requires_projection: self.project_in = operations.Linear(self.dim, self.codebook_dim, device=device, dtype=dtype) self.project_out = operations.Linear(self.codebook_dim, self.dim, device=device, dtype=dtype) else: self.project_in = nn.Identity() self.project_out = nn.Identity() self.codebook_size = self._levels.prod().item() indices = torch.arange(self.codebook_size, device=device) implicit_codebook = self._indices_to_codes(indices) if dtype is not None: implicit_codebook = implicit_codebook.to(dtype) self.register_buffer('implicit_codebook', implicit_codebook, persistent=False) def bound(self, z): levels_minus_1 = (comfy.model_management.cast_to(self._levels, device=z.device, dtype=z.dtype) - 1) scale = 2. / levels_minus_1 bracket = (levels_minus_1 * (torch.tanh(z) + 1) / 2.) + 0.5 zhat = bracket.floor() bracket_ste = bracket + (zhat - bracket).detach() return scale * bracket_ste - 1. def _indices_to_codes(self, indices): indices = indices.unsqueeze(-1) codes_non_centered = (indices // self._basis) % self._levels return codes_non_centered.float() * (2. / (self._levels.float() - 1)) - 1. def codes_to_indices(self, zhat): zhat_normalized = (zhat + 1.) / (2. / (comfy.model_management.cast_to(self._levels, device=zhat.device, dtype=zhat.dtype) - 1)) return (zhat_normalized * comfy.model_management.cast_to(self._basis, device=zhat.device, dtype=zhat.dtype)).sum(dim=-1).round().to(torch.int32) def forward(self, z): orig_dtype = z.dtype z = self.project_in(z) codes = self.bound(z) indices = self.codes_to_indices(codes) out = self.project_out(codes) return out.to(orig_dtype), indices class ResidualFSQ(nn.Module): def __init__( self, levels, num_quantizers, dim=None, bound_hard_clamp=True, device=None, dtype=None, operations=None, **kwargs ): super().__init__() codebook_dim = len(levels) dim = dim if dim is not None else codebook_dim requires_projection = codebook_dim != dim if requires_projection: self.project_in = operations.Linear(dim, codebook_dim, device=device, dtype=dtype) self.project_out = operations.Linear(codebook_dim, dim, device=device, dtype=dtype) else: self.project_in = nn.Identity() self.project_out = nn.Identity() self.layers = nn.ModuleList() levels_tensor = torch.tensor(levels, device=device) scales = [] for ind in range(num_quantizers): scale_val = levels_tensor.float() ** -ind scales.append(scale_val) self.layers.append(FSQ( levels=levels, dim=codebook_dim, device=device, dtype=dtype, operations=operations )) scales_tensor = torch.stack(scales) if dtype is not None: scales_tensor = scales_tensor.to(dtype) self.register_buffer('scales', scales_tensor, persistent=False) if bound_hard_clamp: val = 1 + (1 / (levels_tensor.float() - 1)) if dtype is not None: val = val.to(dtype) self.register_buffer('soft_clamp_input_value', val, persistent=False) def get_output_from_indices(self, indices, dtype=torch.float32): if indices.dim() == 2: indices = indices.unsqueeze(-1) all_codes = [] for i, layer in enumerate(self.layers): idx = indices[..., i].long() codes = F.embedding(idx, comfy.model_management.cast_to(layer.implicit_codebook, device=idx.device, dtype=dtype)) all_codes.append(codes * comfy.model_management.cast_to(self.scales[i], device=idx.device, dtype=dtype)) codes_summed = torch.stack(all_codes).sum(dim=0) return self.project_out(codes_summed) def forward(self, x): x = self.project_in(x) if hasattr(self, 'soft_clamp_input_value'): sc_val = comfy.model_management.cast_to(self.soft_clamp_input_value, device=x.device, dtype=x.dtype) x = (x / sc_val).tanh() * sc_val quantized_out = torch.tensor(0., device=x.device, dtype=x.dtype) residual = x all_indices = [] for layer, scale in zip(self.layers, self.scales): scale = comfy.model_management.cast_to(scale, device=x.device, dtype=x.dtype) quantized, indices = layer(residual / scale) quantized = quantized * scale residual = residual - quantized.detach() quantized_out = quantized_out + quantized all_indices.append(indices) quantized_out = self.project_out(quantized_out) all_indices = torch.stack(all_indices, dim=-1) return quantized_out, all_indices class AceStepAudioTokenizer(nn.Module): def __init__( self, audio_acoustic_hidden_dim, hidden_size, pool_window_size, fsq_dim, fsq_levels, fsq_input_num_quantizers, num_layers, head_dim, rms_norm_eps, dtype=None, device=None, operations=None ): super().__init__() Linear = get_layer_class(operations, "Linear") self.audio_acoustic_proj = Linear(audio_acoustic_hidden_dim, hidden_size, dtype=dtype, device=device) self.attention_pooler = AttentionPooler( hidden_size, num_layers, head_dim, rms_norm_eps, dtype=dtype, device=device, operations=operations ) self.pool_window_size = pool_window_size self.fsq_dim = fsq_dim self.quantizer = ResidualFSQ( dim=fsq_dim, levels=fsq_levels, num_quantizers=fsq_input_num_quantizers, bound_hard_clamp=True, dtype=dtype, device=device, operations=operations ) def forward(self, hidden_states): hidden_states = self.audio_acoustic_proj(hidden_states) hidden_states = self.attention_pooler(hidden_states) quantized, indices = self.quantizer(hidden_states) return quantized, indices def tokenize(self, x): B, T, D = x.shape P = self.pool_window_size if T % P != 0: pad = P - (T % P) x = F.pad(x, (0, 0, 0, pad)) T = x.shape[1] T_patch = T // P x = x.view(B, T_patch, P, D) quantized, indices = self.forward(x) return quantized, indices class AudioTokenDetokenizer(nn.Module): def __init__( self, hidden_size, pool_window_size, audio_acoustic_hidden_dim, num_layers, head_dim, dtype=None, device=None, operations=None ): super().__init__() Linear = get_layer_class(operations, "Linear") self.pool_window_size = pool_window_size self.embed_tokens = Linear(hidden_size, hidden_size, dtype=dtype, device=device) self.special_tokens = nn.Parameter(torch.empty(1, pool_window_size, hidden_size, dtype=dtype, device=device)) self.rotary_emb = RotaryEmbedding(head_dim, dtype=dtype, device=device, operations=operations) self.layers = nn.ModuleList([ AceStepEncoderLayer( hidden_size, 16, 8, head_dim, hidden_size * 3, 1e-6, dtype=dtype, device=device, operations=operations ) for _ in range(num_layers) ]) self.norm = operations.RMSNorm(hidden_size, dtype=dtype, device=device) self.proj_out = Linear(hidden_size, audio_acoustic_hidden_dim, dtype=dtype, device=device) def forward(self, x): B, T, D = x.shape x = self.embed_tokens(x) x = x.unsqueeze(2).repeat(1, 1, self.pool_window_size, 1) x = x + comfy.model_management.cast_to(self.special_tokens.expand(B, T, -1, -1), device=x.device, dtype=x.dtype) x = x.view(B * T, self.pool_window_size, D) cos, sin = self.rotary_emb(x, seq_len=self.pool_window_size) for layer in self.layers: x = layer(x, (cos, sin)) x = self.norm(x) x = self.proj_out(x) return x.view(B, T * self.pool_window_size, -1) class AceStepConditionGenerationModel(nn.Module): def __init__( self, in_channels=192, hidden_size=2048, text_hidden_dim=1024, timbre_hidden_dim=64, audio_acoustic_hidden_dim=64, num_dit_layers=24, num_lyric_layers=8, num_timbre_layers=4, num_tokenizer_layers=2, num_heads=16, num_kv_heads=8, head_dim=128, intermediate_size=6144, patch_size=2, pool_window_size=5, rms_norm_eps=1e-06, timestep_mu=-0.4, timestep_sigma=1.0, data_proportion=0.5, sliding_window=128, layer_types=None, fsq_dim=2048, fsq_levels=[8, 8, 8, 5, 5, 5], fsq_input_num_quantizers=1, audio_model=None, dtype=None, device=None, operations=None ): super().__init__() self.dtype = dtype self.timestep_mu = timestep_mu self.timestep_sigma = timestep_sigma self.data_proportion = data_proportion self.pool_window_size = pool_window_size if layer_types is None: layer_types = [] for i in range(num_dit_layers): layer_types.append("sliding_attention" if i % 2 == 0 else "full_attention") self.decoder = AceStepDiTModel( in_channels, hidden_size, num_dit_layers, num_heads, num_kv_heads, head_dim, intermediate_size, patch_size, audio_acoustic_hidden_dim, layer_types=layer_types, sliding_window=sliding_window, rms_norm_eps=rms_norm_eps, dtype=dtype, device=device, operations=operations ) self.encoder = AceStepConditionEncoder( text_hidden_dim, timbre_hidden_dim, hidden_size, num_lyric_layers, num_timbre_layers, num_heads, num_kv_heads, head_dim, intermediate_size, rms_norm_eps, dtype=dtype, device=device, operations=operations ) self.tokenizer = AceStepAudioTokenizer( audio_acoustic_hidden_dim, hidden_size, pool_window_size, fsq_dim=fsq_dim, fsq_levels=fsq_levels, fsq_input_num_quantizers=fsq_input_num_quantizers, num_layers=num_tokenizer_layers, head_dim=head_dim, rms_norm_eps=rms_norm_eps, dtype=dtype, device=device, operations=operations ) self.detokenizer = AudioTokenDetokenizer( hidden_size, pool_window_size, audio_acoustic_hidden_dim, num_layers=2, head_dim=head_dim, dtype=dtype, device=device, operations=operations ) self.null_condition_emb = nn.Parameter(torch.empty(1, 1, hidden_size, dtype=dtype, device=device)) def prepare_condition( self, text_hidden_states, text_attention_mask, lyric_hidden_states, lyric_attention_mask, refer_audio_acoustic_hidden_states_packed, refer_audio_order_mask, src_latents, chunk_masks, is_covers, precomputed_lm_hints_25Hz=None, audio_codes=None ): encoder_hidden, encoder_mask = self.encoder( text_hidden_states, text_attention_mask, lyric_hidden_states, lyric_attention_mask, refer_audio_acoustic_hidden_states_packed, refer_audio_order_mask ) if precomputed_lm_hints_25Hz is not None: lm_hints = precomputed_lm_hints_25Hz else: if audio_codes is not None: if audio_codes.shape[1] * 5 < src_latents.shape[1]: audio_codes = torch.nn.functional.pad(audio_codes, (0, math.ceil(src_latents.shape[1] / 5) - audio_codes.shape[1]), "constant", 35847) lm_hints_5Hz = self.tokenizer.quantizer.get_output_from_indices(audio_codes, dtype=text_hidden_states.dtype) else: lm_hints_5Hz, indices = self.tokenizer.tokenize(refer_audio_acoustic_hidden_states_packed) lm_hints = self.detokenizer(lm_hints_5Hz) lm_hints = lm_hints[:, :src_latents.shape[1], :] if is_covers is None or is_covers is True: src_latents = lm_hints elif is_covers is False: src_latents = refer_audio_acoustic_hidden_states_packed context_latents = torch.cat([src_latents, chunk_masks.to(src_latents.dtype)], dim=-1) return encoder_hidden, encoder_mask, context_latents def forward(self, x, timestep, context, lyric_embed=None, refer_audio=None, audio_codes=None, is_covers=None, replace_with_null_embeds=False, **kwargs): text_attention_mask = None lyric_attention_mask = None refer_audio_order_mask = None attention_mask = None chunk_masks = None src_latents = None precomputed_lm_hints_25Hz = None lyric_hidden_states = lyric_embed text_hidden_states = context refer_audio_acoustic_hidden_states_packed = refer_audio.movedim(-1, -2) x = x.movedim(-1, -2) if refer_audio_order_mask is None: refer_audio_order_mask = torch.zeros((x.shape[0],), device=x.device, dtype=torch.long) if src_latents is None: src_latents = x if chunk_masks is None: chunk_masks = torch.ones_like(x) enc_hidden, enc_mask, context_latents = self.prepare_condition( text_hidden_states, text_attention_mask, lyric_hidden_states, lyric_attention_mask, refer_audio_acoustic_hidden_states_packed, refer_audio_order_mask, src_latents, chunk_masks, is_covers, precomputed_lm_hints_25Hz=precomputed_lm_hints_25Hz, audio_codes=audio_codes ) if replace_with_null_embeds: enc_hidden[:] = self.null_condition_emb.to(enc_hidden) out = self.decoder(hidden_states=x, timestep=timestep, timestep_r=timestep, attention_mask=attention_mask, encoder_hidden_states=enc_hidden, encoder_attention_mask=enc_mask, context_latents=context_latents ) return out.movedim(-1, -2)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/ace/ace_step15.py", "license": "GNU General Public License v3.0", "lines": 979, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/text_encoders/ace15.py
from .anima import Qwen3Tokenizer import comfy.text_encoders.llama from comfy import sd1_clip import torch import math import yaml import comfy.utils def sample_manual_loop_no_classes( model, ids=None, execution_dtype=None, cfg_scale: float = 2.0, temperature: float = 0.85, top_p: float = 0.9, top_k: int = None, min_p: float = 0.000, seed: int = 1, min_tokens: int = 1, max_new_tokens: int = 2048, audio_start_id: int = 151669, # The cutoff ID for audio codes audio_end_id: int = 215669, eos_token_id: int = 151645, ): if ids is None: return [] device = model.execution_device if execution_dtype is None: if comfy.model_management.should_use_bf16(device): execution_dtype = torch.bfloat16 else: execution_dtype = torch.float32 embeds, attention_mask, num_tokens, embeds_info = model.process_tokens(ids, device) embeds_batch = embeds.shape[0] output_audio_codes = [] past_key_values = [] generator = torch.Generator(device=device) generator.manual_seed(seed) model_config = model.transformer.model.config past_kv_shape = [embeds_batch, model_config.num_key_value_heads, embeds.shape[1] + min_tokens, model_config.head_dim] for x in range(model_config.num_hidden_layers): past_key_values.append((torch.empty(past_kv_shape, device=device, dtype=execution_dtype), torch.empty(past_kv_shape, device=device, dtype=execution_dtype), 0)) progress_bar = comfy.utils.ProgressBar(max_new_tokens) for step in comfy.utils.model_trange(max_new_tokens, desc="LM sampling"): outputs = model.transformer(None, attention_mask, embeds=embeds.to(execution_dtype), num_tokens=num_tokens, intermediate_output=None, dtype=execution_dtype, embeds_info=embeds_info, past_key_values=past_key_values) next_token_logits = model.transformer.logits(outputs[0])[:, -1] past_key_values = outputs[2] if cfg_scale != 1.0: cond_logits = next_token_logits[0:1] uncond_logits = next_token_logits[1:2] cfg_logits = uncond_logits + cfg_scale * (cond_logits - uncond_logits) else: cfg_logits = next_token_logits[0:1] use_eos_score = eos_token_id is not None and eos_token_id < audio_start_id and min_tokens < step if use_eos_score: eos_score = cfg_logits[:, eos_token_id].clone() remove_logit_value = torch.finfo(cfg_logits.dtype).min # Only generate audio tokens cfg_logits[:, :audio_start_id] = remove_logit_value cfg_logits[:, audio_end_id:] = remove_logit_value if use_eos_score: cfg_logits[:, eos_token_id] = eos_score if top_k is not None and top_k > 0: top_k_vals, _ = torch.topk(cfg_logits, top_k) min_val = top_k_vals[..., -1, None] cfg_logits[cfg_logits < min_val] = remove_logit_value if min_p is not None and min_p > 0: probs = torch.softmax(cfg_logits, dim=-1) p_max = probs.max(dim=-1, keepdim=True).values indices_to_remove = probs < (min_p * p_max) cfg_logits[indices_to_remove] = remove_logit_value if top_p is not None and top_p < 1.0: sorted_logits, sorted_indices = torch.sort(cfg_logits, descending=True) cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) cfg_logits[indices_to_remove] = remove_logit_value if temperature > 0: cfg_logits = cfg_logits / temperature next_token = torch.multinomial(torch.softmax(cfg_logits, dim=-1), num_samples=1, generator=generator).squeeze(1) else: next_token = torch.argmax(cfg_logits, dim=-1) token = next_token.item() if token == eos_token_id: break embed, _, _, _ = model.process_tokens([[token]], device) embeds = embed.repeat(embeds_batch, 1, 1) attention_mask = torch.cat([attention_mask, torch.ones((embeds_batch, 1), device=device, dtype=attention_mask.dtype)], dim=1) output_audio_codes.append(token - audio_start_id) progress_bar.update_absolute(step) return output_audio_codes def generate_audio_codes(model, positive, negative, min_tokens=1, max_tokens=1024, seed=0, cfg_scale=2.0, temperature=0.85, top_p=0.9, top_k=0, min_p=0.000): positive = [[token for token, _ in inner_list] for inner_list in positive] positive = positive[0] if cfg_scale != 1.0: negative = [[token for token, _ in inner_list] for inner_list in negative] negative = negative[0] neg_pad = 0 if len(negative) < len(positive): neg_pad = (len(positive) - len(negative)) negative = [model.special_tokens["pad"]] * neg_pad + negative pos_pad = 0 if len(negative) > len(positive): pos_pad = (len(negative) - len(positive)) positive = [model.special_tokens["pad"]] * pos_pad + positive ids = [positive, negative] else: ids = [positive] return sample_manual_loop_no_classes(model, ids, cfg_scale=cfg_scale, temperature=temperature, top_p=top_p, top_k=top_k, min_p=min_p, seed=seed, min_tokens=min_tokens, max_new_tokens=max_tokens) class ACE15Tokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen3_06b", tokenizer=Qwen3Tokenizer) def _metas_to_cot(self, *, return_yaml: bool = False, **kwargs) -> str: user_metas = { k: kwargs.pop(k) for k in ("bpm", "duration", "keyscale", "timesignature") if k in kwargs } timesignature = user_metas.get("timesignature") if isinstance(timesignature, str) and timesignature.endswith("/4"): user_metas["timesignature"] = timesignature[:-2] user_metas = { k: v if not isinstance(v, str) or not v.isdigit() else int(v) for k, v in user_metas.items() if v not in {"unspecified", None} } if len(user_metas): meta_yaml = yaml.dump(user_metas, allow_unicode=True, sort_keys=True).strip() else: meta_yaml = "" return f"<think>\n{meta_yaml}\n</think>" if not return_yaml else meta_yaml def _metas_to_cap(self, **kwargs) -> str: use_keys = ("bpm", "timesignature", "keyscale", "duration") user_metas = { k: kwargs.pop(k, "N/A") for k in use_keys } timesignature = user_metas.get("timesignature") if isinstance(timesignature, str) and timesignature.endswith("/4"): user_metas["timesignature"] = timesignature[:-2] duration = user_metas["duration"] if duration == "N/A": user_metas["duration"] = "30 seconds" elif isinstance(duration, (str, int, float)): user_metas["duration"] = f"{math.ceil(float(duration))} seconds" else: raise TypeError("Unexpected type for duration key, must be str, int or float") return "\n".join(f"- {k}: {user_metas[k]}" for k in use_keys) def tokenize_with_weights(self, text, return_word_ids=False, **kwargs): text = text.strip() text_negative = kwargs.get("caption_negative", text).strip() lyrics = kwargs.get("lyrics", "") lyrics_negative = kwargs.get("lyrics_negative", lyrics) duration = kwargs.get("duration", 120) if isinstance(duration, str): duration = float(duration.split(None, 1)[0]) language = kwargs.get("language") seed = kwargs.get("seed", 0) generate_audio_codes = kwargs.get("generate_audio_codes", True) cfg_scale = kwargs.get("cfg_scale", 2.0) temperature = kwargs.get("temperature", 0.85) top_p = kwargs.get("top_p", 0.9) top_k = kwargs.get("top_k", 0.0) min_p = kwargs.get("min_p", 0.000) duration = math.ceil(duration) kwargs["duration"] = duration tokens_duration = duration * 5 min_tokens = int(kwargs.get("min_tokens", tokens_duration)) max_tokens = int(kwargs.get("max_tokens", tokens_duration)) metas_negative = { k.rsplit("_", 1)[0]: kwargs.pop(k) for k in ("bpm_negative", "duration_negative", "keyscale_negative", "timesignature_negative", "language_negative", "caption_negative") if k in kwargs } if not kwargs.get("use_negative_caption"): _ = metas_negative.pop("caption", None) cot_text = self._metas_to_cot(caption=text, **kwargs) cot_text_negative = "<think>\n\n</think>" if not metas_negative else self._metas_to_cot(**metas_negative) meta_cap = self._metas_to_cap(**kwargs) lm_template = "<|im_start|>system\n# Instruction\nGenerate audio semantic tokens based on the given conditions:\n\n<|im_end|>\n<|im_start|>user\n# Caption\n{}\n\n# Lyric\n{}\n<|im_end|>\n<|im_start|>assistant\n{}\n\n<|im_end|>\n" lyrics_template = "# Languages\n{}\n\n# Lyric\n{}<|endoftext|><|endoftext|>" qwen3_06b_template = "# Instruction\nGenerate audio semantic tokens based on the given conditions:\n\n# Caption\n{}\n\n# Metas\n{}\n<|endoftext|>\n<|endoftext|>" llm_prompts = { "lm_prompt": lm_template.format(text, lyrics.strip(), cot_text), "lm_prompt_negative": lm_template.format(text_negative, lyrics_negative.strip(), cot_text_negative), "lyrics": lyrics_template.format(language if language is not None else "", lyrics), "qwen3_06b": qwen3_06b_template.format(text, meta_cap), } out = { prompt_key: self.qwen3_06b.tokenize_with_weights( prompt, prompt_key == "qwen3_06b" and return_word_ids, disable_weights = True, **kwargs, ) for prompt_key, prompt in llm_prompts.items() } out["lm_metadata"] = {"min_tokens": min_tokens, "max_tokens": max_tokens, "seed": seed, "generate_audio_codes": generate_audio_codes, "cfg_scale": cfg_scale, "temperature": temperature, "top_p": top_p, "top_k": top_k, "min_p": min_p, } return out class Qwen3_06BModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_06B_ACE15, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class Qwen3_2B_ACE15(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): llama_quantization_metadata = model_options.get("llama_quantization_metadata", None) if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_2B_ACE15_lm, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class Qwen3_4B_ACE15(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): llama_quantization_metadata = model_options.get("llama_quantization_metadata", None) if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_4B_ACE15_lm, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class ACE15TEModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None, dtype_llama=None, lm_model=None, model_options={}): super().__init__() if dtype_llama is None: dtype_llama = dtype model = None self.constant = 0.4375 if lm_model == "qwen3_4b": model = Qwen3_4B_ACE15 self.constant = 0.5625 elif lm_model == "qwen3_2b": model = Qwen3_2B_ACE15 self.lm_model = lm_model self.qwen3_06b = Qwen3_06BModel(device=device, dtype=dtype, model_options=model_options) if model is not None: setattr(self, self.lm_model, model(device=device, dtype=dtype_llama, model_options=model_options)) self.dtypes = set([dtype, dtype_llama]) def encode_token_weights(self, token_weight_pairs): token_weight_pairs_base = token_weight_pairs["qwen3_06b"] token_weight_pairs_lyrics = token_weight_pairs["lyrics"] self.qwen3_06b.set_clip_options({"layer": None}) base_out, _, extra = self.qwen3_06b.encode_token_weights(token_weight_pairs_base) self.qwen3_06b.set_clip_options({"layer": [0]}) lyrics_embeds, _, extra_l = self.qwen3_06b.encode_token_weights(token_weight_pairs_lyrics) out = {"conditioning_lyrics": lyrics_embeds[:, 0]} lm_metadata = token_weight_pairs["lm_metadata"] if lm_metadata["generate_audio_codes"]: audio_codes = generate_audio_codes(getattr(self, self.lm_model, self.qwen3_06b), token_weight_pairs["lm_prompt"], token_weight_pairs["lm_prompt_negative"], min_tokens=lm_metadata["min_tokens"], max_tokens=lm_metadata["min_tokens"], seed=lm_metadata["seed"], cfg_scale=lm_metadata["cfg_scale"], temperature=lm_metadata["temperature"], top_p=lm_metadata["top_p"], top_k=lm_metadata["top_k"], min_p=lm_metadata["min_p"]) out["audio_codes"] = [audio_codes] return base_out, None, out def set_clip_options(self, options): self.qwen3_06b.set_clip_options(options) lm_model = getattr(self, self.lm_model, None) if lm_model is not None: lm_model.set_clip_options(options) def reset_clip_options(self): self.qwen3_06b.reset_clip_options() lm_model = getattr(self, self.lm_model, None) if lm_model is not None: lm_model.reset_clip_options() def load_sd(self, sd): if "model.layers.0.post_attention_layernorm.weight" in sd: shape = sd["model.layers.0.post_attention_layernorm.weight"].shape if shape[0] == 1024: return self.qwen3_06b.load_sd(sd) else: return getattr(self, self.lm_model).load_sd(sd) def memory_estimation_function(self, token_weight_pairs, device=None): lm_metadata = token_weight_pairs.get("lm_metadata", {}) constant = self.constant if comfy.model_management.should_use_bf16(device): constant *= 0.5 token_weight_pairs = token_weight_pairs.get("lm_prompt", []) num_tokens = sum(map(lambda a: len(a), token_weight_pairs)) num_tokens += lm_metadata.get("min_tokens", 0) return num_tokens * constant * 1024 * 1024 def te(dtype_llama=None, llama_quantization_metadata=None, lm_model="qwen3_2b"): class ACE15TEModel_(ACE15TEModel): def __init__(self, device="cpu", dtype=None, model_options={}): if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["llama_quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, dtype_llama=dtype_llama, lm_model=lm_model, dtype=dtype, model_options=model_options) return ACE15TEModel_
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/ace15.py", "license": "GNU General Public License v3.0", "lines": 287, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/apis/hitpaw.py
from typing import TypedDict from pydantic import BaseModel, Field class InputVideoModel(TypedDict): model: str resolution: str class ImageEnhanceTaskCreateRequest(BaseModel): model_name: str = Field(...) img_url: str = Field(...) extension: str = Field(".png") exif: bool = Field(False) DPI: int | None = Field(None) class VideoEnhanceTaskCreateRequest(BaseModel): video_url: str = Field(...) extension: str = Field(".mp4") model_name: str | None = Field(...) resolution: list[int] = Field(..., description="Target resolution [width, height]") original_resolution: list[int] = Field(..., description="Original video resolution [width, height]") class TaskCreateDataResponse(BaseModel): job_id: str = Field(...) consume_coins: int | None = Field(None) class TaskStatusPollRequest(BaseModel): job_id: str = Field(...) class TaskCreateResponse(BaseModel): code: int = Field(...) message: str = Field(...) data: TaskCreateDataResponse | None = Field(None) class TaskStatusDataResponse(BaseModel): job_id: str = Field(...) status: str = Field(...) res_url: str = Field("") class TaskStatusResponse(BaseModel): code: int = Field(...) message: str = Field(...) data: TaskStatusDataResponse = Field(...)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/hitpaw.py", "license": "GNU General Public License v3.0", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_hitpaw.py
import math from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.hitpaw import ( ImageEnhanceTaskCreateRequest, InputVideoModel, TaskCreateDataResponse, TaskCreateResponse, TaskStatusPollRequest, TaskStatusResponse, VideoEnhanceTaskCreateRequest, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, download_url_to_video_output, downscale_image_tensor, get_image_dimensions, poll_op, sync_op, upload_image_to_comfyapi, upload_video_to_comfyapi, validate_video_duration, ) VIDEO_MODELS_MODELS_MAP = { "Portrait Restore Model (1x)": "portrait_restore_1x", "Portrait Restore Model (2x)": "portrait_restore_2x", "General Restore Model (1x)": "general_restore_1x", "General Restore Model (2x)": "general_restore_2x", "General Restore Model (4x)": "general_restore_4x", "Ultra HD Model (2x)": "ultrahd_restore_2x", "Generative Model (1x)": "generative_1x", } # Resolution name to target dimension (shorter side) in pixels RESOLUTION_TARGET_MAP = { "720p": 720, "1080p": 1080, "2K/QHD": 1440, "4K/UHD": 2160, "8K": 4320, } # Square (1:1) resolutions use standard square dimensions RESOLUTION_SQUARE_MAP = { "720p": 720, "1080p": 1080, "2K/QHD": 1440, "4K/UHD": 2048, # DCI 4K square "8K": 4096, # DCI 8K square } # Models with limited resolution support (no 8K) LIMITED_RESOLUTION_MODELS = {"Generative Model (1x)"} # Resolution options for different model types RESOLUTIONS_LIMITED = ["original", "720p", "1080p", "2K/QHD", "4K/UHD"] RESOLUTIONS_FULL = ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"] # Maximum output resolution in pixels MAX_PIXELS_GENERATIVE = 32_000_000 MAX_MP_GENERATIVE = MAX_PIXELS_GENERATIVE // 1_000_000 class HitPawGeneralImageEnhance(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="HitPawGeneralImageEnhance", display_name="HitPaw General Image Enhance", category="api node/image/HitPaw", description="Upscale low-resolution images to super-resolution, eliminate artifacts and noise. " f"Maximum output: {MAX_MP_GENERATIVE} megapixels.", inputs=[ IO.Combo.Input("model", options=["generative_portrait", "generative"]), IO.Image.Input("image"), IO.Combo.Input("upscale_factor", options=[1, 2, 4]), IO.Boolean.Input( "auto_downscale", default=False, tooltip="Automatically downscale input image if output would exceed the limit.", ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["model"]), expr=""" ( $prices := { "generative_portrait": {"min": 0.02, "max": 0.06}, "generative": {"min": 0.05, "max": 0.15} }; $price := $lookup($prices, widgets.model); { "type": "range_usd", "min_usd": $price.min, "max_usd": $price.max } ) """, ), ) @classmethod async def execute( cls, model: str, image: Input.Image, upscale_factor: int, auto_downscale: bool, ) -> IO.NodeOutput: height, width = get_image_dimensions(image) requested_scale = upscale_factor output_pixels = height * width * requested_scale * requested_scale if output_pixels > MAX_PIXELS_GENERATIVE: if auto_downscale: input_pixels = width * height scale = 1 max_input_pixels = MAX_PIXELS_GENERATIVE for candidate in [4, 2, 1]: if candidate > requested_scale: continue scale_output_pixels = input_pixels * candidate * candidate if scale_output_pixels <= MAX_PIXELS_GENERATIVE: scale = candidate max_input_pixels = None break # Check if we can downscale input by at most 2x to fit downscale_ratio = math.sqrt(scale_output_pixels / MAX_PIXELS_GENERATIVE) if downscale_ratio <= 2.0: scale = candidate max_input_pixels = MAX_PIXELS_GENERATIVE // (candidate * candidate) break if max_input_pixels is not None: image = downscale_image_tensor(image, total_pixels=max_input_pixels) upscale_factor = scale else: output_width = width * requested_scale output_height = height * requested_scale raise ValueError( f"Output size ({output_width}x{output_height} = {output_pixels:,} pixels) " f"exceeds maximum allowed size of {MAX_PIXELS_GENERATIVE:,} pixels ({MAX_MP_GENERATIVE}MP). " f"Enable auto_downscale or use a smaller input image or a lower upscale factor." ) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/hitpaw/api/photo-enhancer", method="POST"), response_model=TaskCreateResponse, data=ImageEnhanceTaskCreateRequest( model_name=f"{model}_{upscale_factor}x", img_url=await upload_image_to_comfyapi(cls, image, total_pixels=None), ), wait_label="Creating task", final_label_on_success="Task created", ) if initial_res.code != 200: raise ValueError(f"Task creation failed with code {initial_res.code}: {initial_res.message}") request_price = initial_res.data.consume_coins / 1000 final_response = await poll_op( cls, ApiEndpoint(path="/proxy/hitpaw/api/task-status", method="POST"), data=TaskCreateDataResponse(job_id=initial_res.data.job_id), response_model=TaskStatusResponse, status_extractor=lambda x: x.data.status, price_extractor=lambda x: request_price, poll_interval=10.0, max_poll_attempts=480, ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.data.res_url)) class HitPawVideoEnhance(IO.ComfyNode): @classmethod def define_schema(cls): model_options = [] for model_name in VIDEO_MODELS_MODELS_MAP: if model_name in LIMITED_RESOLUTION_MODELS: resolutions = RESOLUTIONS_LIMITED else: resolutions = RESOLUTIONS_FULL model_options.append( IO.DynamicCombo.Option( model_name, [IO.Combo.Input("resolution", options=resolutions)], ) ) return IO.Schema( node_id="HitPawVideoEnhance", display_name="HitPaw Video Enhance", category="api node/video/HitPaw", description="Upscale low-resolution videos to high resolution, eliminate artifacts and noise. " "Prices shown are per second of video.", inputs=[ IO.DynamicCombo.Input("model", options=model_options), IO.Video.Input("video"), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]), expr=""" ( $m := $lookup(widgets, "model"); $res := $lookup(widgets, "model.resolution"); $standard_model_prices := { "original": {"min": 0.01, "max": 0.198}, "720p": {"min": 0.01, "max": 0.06}, "1080p": {"min": 0.015, "max": 0.09}, "2k/qhd": {"min": 0.02, "max": 0.117}, "4k/uhd": {"min": 0.025, "max": 0.152}, "8k": {"min": 0.033, "max": 0.198} }; $ultra_hd_model_prices := { "original": {"min": 0.015, "max": 0.264}, "720p": {"min": 0.015, "max": 0.092}, "1080p": {"min": 0.02, "max": 0.12}, "2k/qhd": {"min": 0.026, "max": 0.156}, "4k/uhd": {"min": 0.034, "max": 0.203}, "8k": {"min": 0.044, "max": 0.264} }; $generative_model_prices := { "original": {"min": 0.015, "max": 0.338}, "720p": {"min": 0.008, "max": 0.090}, "1080p": {"min": 0.05, "max": 0.15}, "2k/qhd": {"min": 0.038, "max": 0.225}, "4k/uhd": {"min": 0.056, "max": 0.338} }; $prices := $contains($m, "ultra hd") ? $ultra_hd_model_prices : $contains($m, "generative") ? $generative_model_prices : $standard_model_prices; $price := $lookup($prices, $res); { "type": "range_usd", "min_usd": $price.min, "max_usd": $price.max, "format": {"approximate": true, "suffix": "/second"} } ) """, ), ) @classmethod async def execute( cls, model: InputVideoModel, video: Input.Video, ) -> IO.NodeOutput: validate_video_duration(video, min_duration=0.5, max_duration=60 * 60) resolution = model["resolution"] src_width, src_height = video.get_dimensions() if resolution == "original": output_width = src_width output_height = src_height else: if src_width == src_height: target_size = RESOLUTION_SQUARE_MAP[resolution] if target_size < src_width: raise ValueError( f"Selected resolution {resolution} ({target_size}x{target_size}) is smaller than " f"the input video ({src_width}x{src_height}). Please select a higher resolution or 'original'." ) output_width = target_size output_height = target_size else: min_dimension = min(src_width, src_height) target_size = RESOLUTION_TARGET_MAP[resolution] if target_size < min_dimension: raise ValueError( f"Selected resolution {resolution} ({target_size}p) is smaller than " f"the input video's shorter dimension ({min_dimension}p). " f"Please select a higher resolution or 'original'." ) if src_width > src_height: output_height = target_size output_width = int(target_size * (src_width / src_height)) else: output_width = target_size output_height = int(target_size * (src_height / src_width)) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/hitpaw/api/video-enhancer", method="POST"), response_model=TaskCreateResponse, data=VideoEnhanceTaskCreateRequest( video_url=await upload_video_to_comfyapi(cls, video), resolution=[output_width, output_height], original_resolution=[src_width, src_height], model_name=VIDEO_MODELS_MODELS_MAP[model["model"]], ), wait_label="Creating task", final_label_on_success="Task created", ) request_price = initial_res.data.consume_coins / 1000 if initial_res.code != 200: raise ValueError(f"Task creation failed with code {initial_res.code}: {initial_res.message}") final_response = await poll_op( cls, ApiEndpoint(path="/proxy/hitpaw/api/task-status", method="POST"), data=TaskStatusPollRequest(job_id=initial_res.data.job_id), response_model=TaskStatusResponse, status_extractor=lambda x: x.data.status, price_extractor=lambda x: request_price, poll_interval=10.0, max_poll_attempts=320, ) return IO.NodeOutput(await download_url_to_video_output(final_response.data.res_url)) class HitPawExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ HitPawGeneralImageEnhance, HitPawVideoEnhance, ] async def comfy_entrypoint() -> HitPawExtension: return HitPawExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_hitpaw.py", "license": "GNU General Public License v3.0", "lines": 319, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/memory_management.py
import math import torch from typing import NamedTuple from comfy.quant_ops import QuantizedTensor class TensorGeometry(NamedTuple): shape: any dtype: torch.dtype def element_size(self): info = torch.finfo(self.dtype) if self.dtype.is_floating_point else torch.iinfo(self.dtype) return info.bits // 8 def numel(self): return math.prod(self.shape) def tensors_to_geometries(tensors, dtype=None): geometries = [] for t in tensors: if t is None or isinstance(t, QuantizedTensor): geometries.append(t) continue tdtype = t.dtype if hasattr(t, "_model_dtype"): tdtype = t._model_dtype if dtype is not None: tdtype = dtype geometries.append(TensorGeometry(shape=t.shape, dtype=tdtype)) return geometries def vram_aligned_size(tensor): if isinstance(tensor, list): return sum([vram_aligned_size(t) for t in tensor]) if isinstance(tensor, QuantizedTensor): inner_tensors, _ = tensor.__tensor_flatten__() return vram_aligned_size([ getattr(tensor, attr) for attr in inner_tensors ]) if tensor is None: return 0 size = tensor.numel() * tensor.element_size() aligment_req = 1024 return (size + aligment_req - 1) // aligment_req * aligment_req def interpret_gathered_like(tensors, gathered): offset = 0 dest_views = [] if gathered.dim() != 1 or gathered.element_size() != 1: raise ValueError(f"Buffer must be 1D and single-byte (got {gathered.dim()}D {gathered.dtype})") for tensor in tensors: if tensor is None: dest_views.append(None) continue if isinstance(tensor, QuantizedTensor): inner_tensors, qt_ctx = tensor.__tensor_flatten__() templates = { attr: getattr(tensor, attr) for attr in inner_tensors } else: templates = { "data": tensor } actuals = {} for attr, template in templates.items(): size = template.numel() * template.element_size() if offset + size > gathered.numel(): raise ValueError(f"Buffer too small: needs {offset + size} bytes, but only has {gathered.numel()}. ") actuals[attr] = gathered[offset:offset+size].view(dtype=template.dtype).view(template.shape) offset += vram_aligned_size(template) if isinstance(tensor, QuantizedTensor): dest_views.append(QuantizedTensor.__tensor_unflatten__(actuals, qt_ctx, 0, 0)) else: dest_views.append(actuals["data"]) return dest_views aimdo_enabled = False
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/memory_management.py", "license": "GNU General Public License v3.0", "lines": 63, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/pinned_memory.py
import torch import comfy.model_management import comfy.memory_management from comfy.cli_args import args def get_pin(module): return getattr(module, "_pin", None) def pin_memory(module): if module.pin_failed or args.disable_pinned_memory or get_pin(module) is not None: return #FIXME: This is a RAM cache trigger event size = comfy.memory_management.vram_aligned_size([ module.weight, module.bias ]) pin = torch.empty((size,), dtype=torch.uint8) if comfy.model_management.pin_memory(pin): module._pin = pin else: module.pin_failed = True return False return True def unpin_memory(module): if get_pin(module) is None: return 0 size = module._pin.numel() * module._pin.element_size() comfy.model_management.unpin_memory(module._pin) del module._pin return size
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/pinned_memory.py", "license": "GNU General Public License v3.0", "lines": 25, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/windows.py
import ctypes import logging import psutil from ctypes import wintypes import comfy_aimdo.control psapi = ctypes.WinDLL("psapi") kernel32 = ctypes.WinDLL("kernel32") class PERFORMANCE_INFORMATION(ctypes.Structure): _fields_ = [ ("cb", wintypes.DWORD), ("CommitTotal", ctypes.c_size_t), ("CommitLimit", ctypes.c_size_t), ("CommitPeak", ctypes.c_size_t), ("PhysicalTotal", ctypes.c_size_t), ("PhysicalAvailable", ctypes.c_size_t), ("SystemCache", ctypes.c_size_t), ("KernelTotal", ctypes.c_size_t), ("KernelPaged", ctypes.c_size_t), ("KernelNonpaged", ctypes.c_size_t), ("PageSize", ctypes.c_size_t), ("HandleCount", wintypes.DWORD), ("ProcessCount", wintypes.DWORD), ("ThreadCount", wintypes.DWORD), ] def get_free_ram(): #Windows is way too conservative and chalks recently used uncommitted model RAM #as "in-use". So, calculate free RAM for the sake of general use as the greater of: # #1: What psutil says #2: Total Memory - (Committed Memory - VRAM in use) # #We have to subtract VRAM in use from the comitted memory as WDDM creates a naked #commit charge for all VRAM used just incase it wants to page it all out. This just #isn't realistic so "overcommit" on our calculations by just subtracting it off. pi = PERFORMANCE_INFORMATION() pi.cb = ctypes.sizeof(pi) if not psapi.GetPerformanceInfo(ctypes.byref(pi), pi.cb): logging.warning("WARNING: Failed to query windows performance info. RAM usage may be sub optimal") return psutil.virtual_memory().available committed = pi.CommitTotal * pi.PageSize total = pi.PhysicalTotal * pi.PageSize return max(psutil.virtual_memory().available, total - (committed - comfy_aimdo.control.get_total_vram_usage()))
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/windows.py", "license": "GNU General Public License v3.0", "lines": 43, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:tests-unit/assets_test/test_assets_missing_sync.py
import os import uuid from pathlib import Path import pytest import requests from conftest import get_asset_filename, trigger_sync_seed_assets @pytest.mark.parametrize("root", ["input", "output"]) def test_seed_asset_removed_when_file_is_deleted( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, ): """Asset without hash (seed) whose file disappears: after triggering sync_seed_assets, Asset + AssetInfo disappear. """ # Create a file directly under input/unit-tests/<case> so tags include "unit-tests" case_dir = comfy_tmp_base_dir / root / "unit-tests" / "syncseed" case_dir.mkdir(parents=True, exist_ok=True) name = f"seed_{uuid.uuid4().hex[:8]}.bin" fp = case_dir / name fp.write_bytes(b"Z" * 2048) # Trigger a seed sync so DB sees this path (seed asset => hash is NULL) trigger_sync_seed_assets(http, api_base) # Verify it is visible via API and carries no hash (seed) r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,syncseed", "name_contains": name}, timeout=120, ) body1 = r1.json() assert r1.status_code == 200 # there should be exactly one with that name matches = [a for a in body1.get("assets", []) if a.get("name") == name] assert matches assert matches[0].get("asset_hash") is None asset_info_id = matches[0]["id"] # Remove the underlying file and sync again if fp.exists(): fp.unlink() trigger_sync_seed_assets(http, api_base) # It should disappear (AssetInfo and seed Asset gone) r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,syncseed", "name_contains": name}, timeout=120, ) body2 = r2.json() assert r2.status_code == 200 matches2 = [a for a in body2.get("assets", []) if a.get("name") == name] assert not matches2, f"Seed asset {asset_info_id} should be gone after sync" @pytest.mark.skip(reason="Requires computing hashes of files in directories to verify and clear missing tags") def test_hashed_asset_missing_tag_added_then_removed_after_scan( http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, ): """Hashed asset with a single cache_state: 1. delete its file -> sync adds 'missing' 2. restore file -> sync removes 'missing' """ name = "missing_tag_test.png" tags = ["input", "unit-tests", "msync2"] data = make_asset_bytes(name, 4096) a = asset_factory(name, tags, {}, data) # Compute its on-disk path and remove it dest = comfy_tmp_base_dir / "input" / "unit-tests" / "msync2" / get_asset_filename(a["asset_hash"], ".png") assert dest.exists(), f"Expected asset file at {dest}" dest.unlink() # Fast sync should add 'missing' to the AssetInfo trigger_sync_seed_assets(http, api_base) g1 = http.get(f"{api_base}/api/assets/{a['id']}", timeout=120) d1 = g1.json() assert g1.status_code == 200, d1 assert "missing" in set(d1.get("tags", [])), "Expected 'missing' tag after deletion" # Restore the file with the exact same content and sync again dest.parent.mkdir(parents=True, exist_ok=True) dest.write_bytes(data) trigger_sync_seed_assets(http, api_base) g2 = http.get(f"{api_base}/api/assets/{a['id']}", timeout=120) d2 = g2.json() assert g2.status_code == 200, d2 assert "missing" not in set(d2.get("tags", [])), "Missing tag should be cleared after verify" def test_hashed_asset_two_asset_infos_both_get_missing( http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, ): """Hashed asset with a single cache_state, but two AssetInfo rows: deleting the single file then syncing should add 'missing' to both infos. """ # Upload one hashed asset name = "two_infos_one_path.png" base_tags = ["input", "unit-tests", "multiinfo"] created = asset_factory(name, base_tags, {}, b"A" * 2048) # Create second AssetInfo for the same Asset via from-hash payload = { "hash": created["asset_hash"], "name": "two_infos_one_path_copy.png", "tags": base_tags, # keep it in our unit-tests scope for cleanup "user_metadata": {"k": "v"}, } r2 = http.post(api_base + "/api/assets/from-hash", json=payload, timeout=120) b2 = r2.json() assert r2.status_code == 201, b2 second_id = b2["id"] # Remove the single underlying file p = comfy_tmp_base_dir / "input" / "unit-tests" / "multiinfo" / get_asset_filename(b2["asset_hash"], ".png") assert p.exists() p.unlink() r0 = http.get(api_base + "/api/tags", params={"limit": "1000", "include_zero": "false"}, timeout=120) tags0 = r0.json() assert r0.status_code == 200, tags0 byname0 = {t["name"]: t for t in tags0.get("tags", [])} old_missing = int(byname0.get("missing", {}).get("count", 0)) # Sync -> both AssetInfos for this asset must receive 'missing' trigger_sync_seed_assets(http, api_base) ga = http.get(f"{api_base}/api/assets/{created['id']}", timeout=120) da = ga.json() assert ga.status_code == 200, da assert "missing" in set(da.get("tags", [])) gb = http.get(f"{api_base}/api/assets/{second_id}", timeout=120) db = gb.json() assert gb.status_code == 200, db assert "missing" in set(db.get("tags", [])) # Tag usage for 'missing' increased by exactly 2 (two AssetInfos) r1 = http.get(api_base + "/api/tags", params={"limit": "1000", "include_zero": "false"}, timeout=120) tags1 = r1.json() assert r1.status_code == 200, tags1 byname1 = {t["name"]: t for t in tags1.get("tags", [])} new_missing = int(byname1.get("missing", {}).get("count", 0)) assert new_missing == old_missing + 2 @pytest.mark.skip(reason="Requires computing hashes of files in directories to deduplicate into multiple cache states") def test_hashed_asset_two_cache_states_partial_delete_then_full_delete( http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, run_scan_and_wait, ): """Hashed asset with two cache_state rows: 1. delete one file -> sync should NOT add 'missing' 2. delete second file -> sync should add 'missing' """ name = "two_cache_states_partial_delete.png" tags = ["input", "unit-tests", "dual"] data = make_asset_bytes(name, 3072) created = asset_factory(name, tags, {}, data) path1 = comfy_tmp_base_dir / "input" / "unit-tests" / "dual" / get_asset_filename(created["asset_hash"], ".png") assert path1.exists() # Create a second on-disk copy under the same root but different subfolder path2 = comfy_tmp_base_dir / "input" / "unit-tests" / "dual_copy" / name path2.parent.mkdir(parents=True, exist_ok=True) path2.write_bytes(data) # Fast seed so the second path appears (as a seed initially) trigger_sync_seed_assets(http, api_base) # Deduplication of AssetInfo-s will not happen as first AssetInfo has owner='default' and second has empty owner. run_scan_and_wait("input") # Remove only one file and sync -> asset should still be healthy (no 'missing') path1.unlink() trigger_sync_seed_assets(http, api_base) g1 = http.get(f"{api_base}/api/assets/{created['id']}", timeout=120) d1 = g1.json() assert g1.status_code == 200, d1 assert "missing" not in set(d1.get("tags", [])), "Should not be missing while one valid path remains" # Baseline 'missing' usage count just before last file removal r0 = http.get(api_base + "/api/tags", params={"limit": "1000", "include_zero": "false"}, timeout=120) tags0 = r0.json() assert r0.status_code == 200, tags0 old_missing = int({t["name"]: t for t in tags0.get("tags", [])}.get("missing", {}).get("count", 0)) # Remove the second (last) file and sync -> now we expect 'missing' on this AssetInfo path2.unlink() trigger_sync_seed_assets(http, api_base) g2 = http.get(f"{api_base}/api/assets/{created['id']}", timeout=120) d2 = g2.json() assert g2.status_code == 200, d2 assert "missing" in set(d2.get("tags", [])), "Missing must be set once no valid paths remain" # Tag usage for 'missing' increased by exactly 2 (two AssetInfo for one Asset) r1 = http.get(api_base + "/api/tags", params={"limit": "1000", "include_zero": "false"}, timeout=120) tags1 = r1.json() assert r1.status_code == 200, tags1 new_missing = int({t["name"]: t for t in tags1.get("tags", [])}.get("missing", {}).get("count", 0)) assert new_missing == old_missing + 2 @pytest.mark.parametrize("root", ["input", "output"]) def test_missing_tag_clears_on_fastpass_when_mtime_and_size_match( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, ): """ Fast pass alone clears 'missing' when size and mtime match exactly: 1) upload (hashed), record original mtime_ns 2) delete -> fast pass adds 'missing' 3) restore same bytes and set mtime back to the original value 4) run fast pass again -> 'missing' is removed (no slow scan) """ scope = f"fastclear-{uuid.uuid4().hex[:6]}" name = "fastpass_clear.bin" data = make_asset_bytes(name, 3072) a = asset_factory(name, [root, "unit-tests", scope], {}, data) aid = a["id"] base = comfy_tmp_base_dir / root / "unit-tests" / scope p = base / get_asset_filename(a["asset_hash"], ".bin") st0 = p.stat() orig_mtime_ns = getattr(st0, "st_mtime_ns", int(st0.st_mtime * 1_000_000_000)) # Delete -> fast pass adds 'missing' p.unlink() trigger_sync_seed_assets(http, api_base) g1 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d1 = g1.json() assert g1.status_code == 200, d1 assert "missing" in set(d1.get("tags", [])) # Restore same bytes and revert mtime to the original value p.parent.mkdir(parents=True, exist_ok=True) p.write_bytes(data) # set both atime and mtime in ns to ensure exact match os.utime(p, ns=(orig_mtime_ns, orig_mtime_ns)) # Fast pass should clear 'missing' without a scan trigger_sync_seed_assets(http, api_base) g2 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d2 = g2.json() assert g2.status_code == 200, d2 assert "missing" not in set(d2.get("tags", [])), "Fast pass should clear 'missing' when size+mtime match" @pytest.mark.skip(reason="Requires computing hashes of files in directories to deduplicate into multiple cache states") @pytest.mark.parametrize("root", ["input", "output"]) def test_fastpass_removes_stale_state_row_no_missing( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, run_scan_and_wait, ): """ Hashed asset with two states: - delete one file - run fast pass only Expect: - asset stays healthy (no 'missing') - stale AssetCacheState row for the deleted path is removed. We verify this behaviorally by recreating the deleted path and running fast pass again: a new *seed* AssetInfo is created, which proves the old state row was not reused. """ scope = f"stale-{uuid.uuid4().hex[:6]}" name = "two_states.bin" data = make_asset_bytes(name, 2048) # Upload hashed asset at path1 a = asset_factory(name, [root, "unit-tests", scope], {}, data) base = comfy_tmp_base_dir / root / "unit-tests" / scope a1_filename = get_asset_filename(a["asset_hash"], ".bin") p1 = base / a1_filename assert p1.exists() aid = a["id"] h = a["asset_hash"] # Create second state path2, seed+scan to dedupe into the same Asset p2 = base / "copy" / name p2.parent.mkdir(parents=True, exist_ok=True) p2.write_bytes(data) trigger_sync_seed_assets(http, api_base) run_scan_and_wait(root) # Delete path1 and run fast pass -> no 'missing' and stale state row should be removed p1.unlink() trigger_sync_seed_assets(http, api_base) g1 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d1 = g1.json() assert g1.status_code == 200, d1 assert "missing" not in set(d1.get("tags", [])) # Recreate path1 and run fast pass again. # If the stale state row was removed, a NEW seed AssetInfo will appear for this path. p1.write_bytes(data) trigger_sync_seed_assets(http, api_base) rl = http.get( api_base + "/api/assets", params={"include_tags": f"unit-tests,{scope}"}, timeout=120, ) bl = rl.json() assert rl.status_code == 200, bl items = bl.get("assets", []) # one hashed AssetInfo (asset_hash == h) + one seed AssetInfo (asset_hash == null) hashes = [it.get("asset_hash") for it in items if it.get("name") in (name, a1_filename)] assert h in hashes assert any(x is None for x in hashes), "Expected a new seed AssetInfo for the recreated path" # Asset identity still healthy rh = http.head(f"{api_base}/api/assets/hash/{h}", timeout=120) assert rh.status_code == 200
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_assets_missing_sync.py", "license": "GNU General Public License v3.0", "lines": 294, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_crud.py
import uuid from concurrent.futures import ThreadPoolExecutor from pathlib import Path import pytest import requests from conftest import get_asset_filename, trigger_sync_seed_assets def test_create_from_hash_success( http: requests.Session, api_base: str, seeded_asset: dict ): h = seeded_asset["asset_hash"] payload = { "hash": h, "name": "from_hash_ok.safetensors", "tags": ["models", "checkpoints", "unit-tests", "from-hash"], "user_metadata": {"k": "v"}, } r1 = http.post(f"{api_base}/api/assets/from-hash", json=payload, timeout=120) b1 = r1.json() assert r1.status_code == 201, b1 assert b1["asset_hash"] == h assert b1["created_new"] is False aid = b1["id"] # Calling again with the same name should return the same AssetInfo id r2 = http.post(f"{api_base}/api/assets/from-hash", json=payload, timeout=120) b2 = r2.json() assert r2.status_code == 201, b2 assert b2["id"] == aid def test_get_and_delete_asset(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] # GET detail rg = http.get(f"{api_base}/api/assets/{aid}", timeout=120) detail = rg.json() assert rg.status_code == 200, detail assert detail["id"] == aid assert "user_metadata" in detail assert "filename" in detail["user_metadata"] # DELETE rd = http.delete(f"{api_base}/api/assets/{aid}", timeout=120) assert rd.status_code == 204 # GET again -> 404 rg2 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) body = rg2.json() assert rg2.status_code == 404 assert body["error"]["code"] == "ASSET_NOT_FOUND" def test_delete_upon_reference_count( http: requests.Session, api_base: str, seeded_asset: dict ): # Create a second reference to the same asset via from-hash src_hash = seeded_asset["asset_hash"] payload = { "hash": src_hash, "name": "unit_ref_copy.safetensors", "tags": ["models", "checkpoints", "unit-tests", "del-flow"], "user_metadata": {"note": "copy"}, } r2 = http.post(f"{api_base}/api/assets/from-hash", json=payload, timeout=120) copy = r2.json() assert r2.status_code == 201, copy assert copy["asset_hash"] == src_hash assert copy["created_new"] is False # Delete original reference -> asset identity must remain aid1 = seeded_asset["id"] rd1 = http.delete(f"{api_base}/api/assets/{aid1}", timeout=120) assert rd1.status_code == 204 rh1 = http.head(f"{api_base}/api/assets/hash/{src_hash}", timeout=120) assert rh1.status_code == 200 # identity still present # Delete the last reference with default semantics -> identity and cached files removed aid2 = copy["id"] rd2 = http.delete(f"{api_base}/api/assets/{aid2}", timeout=120) assert rd2.status_code == 204 rh2 = http.head(f"{api_base}/api/assets/hash/{src_hash}", timeout=120) assert rh2.status_code == 404 # orphan content removed def test_update_asset_fields(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] original_tags = seeded_asset["tags"] payload = { "name": "unit_1_renamed.safetensors", "user_metadata": {"purpose": "updated", "epoch": 2}, } ru = http.put(f"{api_base}/api/assets/{aid}", json=payload, timeout=120) body = ru.json() assert ru.status_code == 200, body assert body["name"] == payload["name"] assert body["tags"] == original_tags # tags unchanged assert body["user_metadata"]["purpose"] == "updated" # filename should still be present and normalized by server assert "filename" in body["user_metadata"] def test_head_asset_by_hash(http: requests.Session, api_base: str, seeded_asset: dict): h = seeded_asset["asset_hash"] # Existing rh1 = http.head(f"{api_base}/api/assets/hash/{h}", timeout=120) assert rh1.status_code == 200 # Non-existent rh2 = http.head(f"{api_base}/api/assets/hash/blake3:{'0'*64}", timeout=120) assert rh2.status_code == 404 def test_head_asset_bad_hash_returns_400_and_no_body(http: requests.Session, api_base: str): # Invalid format; handler returns a JSON error, but HEAD responses must not carry a payload. # requests exposes an empty body for HEAD, so validate status and that there is no payload. rh = http.head(f"{api_base}/api/assets/hash/not_a_hash", timeout=120) assert rh.status_code == 400 body = rh.content assert body == b"" def test_delete_nonexistent_returns_404(http: requests.Session, api_base: str): bogus = str(uuid.uuid4()) r = http.delete(f"{api_base}/api/assets/{bogus}", timeout=120) body = r.json() assert r.status_code == 404 assert body["error"]["code"] == "ASSET_NOT_FOUND" def test_create_from_hash_invalids(http: requests.Session, api_base: str): # Bad hash algorithm bad = { "hash": "sha256:" + "0" * 64, "name": "x.bin", "tags": ["models", "checkpoints", "unit-tests"], } r1 = http.post(f"{api_base}/api/assets/from-hash", json=bad, timeout=120) b1 = r1.json() assert r1.status_code == 400 assert b1["error"]["code"] == "INVALID_BODY" # Invalid JSON body r2 = http.post(f"{api_base}/api/assets/from-hash", data=b"{not json}", timeout=120) b2 = r2.json() assert r2.status_code == 400 assert b2["error"]["code"] == "INVALID_JSON" def test_get_update_download_bad_ids(http: requests.Session, api_base: str): # All endpoints should be not found, as we UUID regex directly in the route definition. bad_id = "not-a-uuid" r1 = http.get(f"{api_base}/api/assets/{bad_id}", timeout=120) assert r1.status_code == 404 r3 = http.get(f"{api_base}/api/assets/{bad_id}/content", timeout=120) assert r3.status_code == 404 def test_update_requires_at_least_one_field(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] r = http.put(f"{api_base}/api/assets/{aid}", json={}, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "INVALID_BODY" @pytest.mark.parametrize("root", ["input", "output"]) def test_concurrent_delete_same_asset_info_single_204( root: str, http: requests.Session, api_base: str, asset_factory, make_asset_bytes, ): """ Many concurrent DELETE for the same AssetInfo should result in: - exactly one 204 No Content (the one that actually deleted) - all others 404 Not Found (row already gone) """ scope = f"conc-del-{uuid.uuid4().hex[:6]}" name = "to_delete.bin" data = make_asset_bytes(name, 1536) created = asset_factory(name, [root, "unit-tests", scope], {}, data) aid = created["id"] # Hit the same endpoint N times in parallel. n_tests = 4 url = f"{api_base}/api/assets/{aid}?delete_content=false" def _do_delete(delete_url): with requests.Session() as s: return s.delete(delete_url, timeout=120).status_code with ThreadPoolExecutor(max_workers=n_tests) as ex: statuses = list(ex.map(_do_delete, [url] * n_tests)) # Exactly one actual delete, the rest must be 404 assert statuses.count(204) == 1, f"Expected exactly one 204; got: {statuses}" assert statuses.count(404) == n_tests - 1, f"Expected {n_tests-1} 404; got: {statuses}" # The resource must be gone. rg = http.get(f"{api_base}/api/assets/{aid}", timeout=120) assert rg.status_code == 404 @pytest.mark.parametrize("root", ["input", "output"]) def test_metadata_filename_is_set_for_seed_asset_without_hash( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, ): """Seed ingest (no hash yet) must compute user_metadata['filename'] immediately.""" scope = f"seedmeta-{uuid.uuid4().hex[:6]}" name = "seed_filename.bin" base = comfy_tmp_base_dir / root / "unit-tests" / scope / "a" / "b" base.mkdir(parents=True, exist_ok=True) fp = base / name fp.write_bytes(b"Z" * 2048) trigger_sync_seed_assets(http, api_base) r1 = http.get( api_base + "/api/assets", params={"include_tags": f"unit-tests,{scope}", "name_contains": name}, timeout=120, ) body = r1.json() assert r1.status_code == 200, body matches = [a for a in body.get("assets", []) if a.get("name") == name] assert matches, "Seed asset should be visible after sync" assert matches[0].get("asset_hash") is None # still a seed aid = matches[0]["id"] r2 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) detail = r2.json() assert r2.status_code == 200, detail filename = (detail.get("user_metadata") or {}).get("filename") expected = str(fp.relative_to(comfy_tmp_base_dir / root)).replace("\\", "/") assert filename == expected, f"expected filename={expected}, got {filename!r}" @pytest.mark.skip(reason="Requires computing hashes of files in directories to retarget cache states") @pytest.mark.parametrize("root", ["input", "output"]) def test_metadata_filename_computed_and_updated_on_retarget( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, run_scan_and_wait, ): """ 1) Ingest under {root}/unit-tests/<scope>/a/b/<name> -> filename reflects relative path. 2) Retarget by copying to {root}/unit-tests/<scope>/x/<new_name>, remove old file, run fast pass + scan -> filename updates to new relative path. """ scope = f"meta-fn-{uuid.uuid4().hex[:6]}" name1 = "compute_metadata_filename.png" name2 = "compute_changed_metadata_filename.png" data = make_asset_bytes(name1, 2100) # Upload into nested path a/b a = asset_factory(name1, [root, "unit-tests", scope, "a", "b"], {}, data) aid = a["id"] root_base = comfy_tmp_base_dir / root p1 = (root_base / "unit-tests" / scope / "a" / "b" / get_asset_filename(a["asset_hash"], ".png")) assert p1.exists() # filename at ingest should be the path relative to root rel1 = str(p1.relative_to(root_base)).replace("\\", "/") g1 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d1 = g1.json() assert g1.status_code == 200, d1 fn1 = d1["user_metadata"].get("filename") assert fn1 == rel1 # Retarget: copy to x/<name2>, remove old, then sync+scan p2 = root_base / "unit-tests" / scope / "x" / name2 p2.parent.mkdir(parents=True, exist_ok=True) p2.write_bytes(data) if p1.exists(): p1.unlink() trigger_sync_seed_assets(http, api_base) # seed the new path run_scan_and_wait(root) # verify/hash and reconcile # filename should now point at x/<name2> rel2 = str(p2.relative_to(root_base)).replace("\\", "/") g2 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d2 = g2.json() assert g2.status_code == 200, d2 fn2 = d2["user_metadata"].get("filename") assert fn2 == rel2
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_crud.py", "license": "GNU General Public License v3.0", "lines": 249, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_downloads.py
import time import uuid from datetime import datetime from pathlib import Path from typing import Optional import pytest import requests from conftest import get_asset_filename, trigger_sync_seed_assets def test_download_attachment_and_inline(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] # default attachment r1 = http.get(f"{api_base}/api/assets/{aid}/content", timeout=120) data = r1.content assert r1.status_code == 200 cd = r1.headers.get("Content-Disposition", "") assert "attachment" in cd assert data and len(data) == 4096 # inline requested r2 = http.get(f"{api_base}/api/assets/{aid}/content?disposition=inline", timeout=120) r2.content assert r2.status_code == 200 cd2 = r2.headers.get("Content-Disposition", "") assert "inline" in cd2 @pytest.mark.skip(reason="Requires computing hashes of files in directories to deduplicate into multiple cache states") @pytest.mark.parametrize("root", ["input", "output"]) def test_download_chooses_existing_state_and_updates_access_time( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, run_scan_and_wait, ): """ Hashed asset with two state paths: if the first one disappears, GET /content still serves from the remaining path and bumps last_access_time. """ scope = f"dl-first-{uuid.uuid4().hex[:6]}" name = "first_existing_state.bin" data = make_asset_bytes(name, 3072) # Upload -> path1 a = asset_factory(name, [root, "unit-tests", scope], {}, data) aid = a["id"] base = comfy_tmp_base_dir / root / "unit-tests" / scope path1 = base / get_asset_filename(a["asset_hash"], ".bin") assert path1.exists() # Seed path2 by copying, then scan to dedupe into a second state path2 = base / "alt" / name path2.parent.mkdir(parents=True, exist_ok=True) path2.write_bytes(data) trigger_sync_seed_assets(http, api_base) run_scan_and_wait(root) # Remove path1 so server must fall back to path2 path1.unlink() # last_access_time before rg0 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d0 = rg0.json() assert rg0.status_code == 200, d0 ts0 = d0.get("last_access_time") time.sleep(0.05) r = http.get(f"{api_base}/api/assets/{aid}/content", timeout=120) blob = r.content assert r.status_code == 200 assert blob == data # must serve from the surviving state (same bytes) rg1 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) d1 = rg1.json() assert rg1.status_code == 200, d1 ts1 = d1.get("last_access_time") def _parse_iso8601(s: Optional[str]) -> Optional[float]: if not s: return None s = s[:-1] if s.endswith("Z") else s return datetime.fromisoformat(s).timestamp() t0 = _parse_iso8601(ts0) t1 = _parse_iso8601(ts1) assert t1 is not None if t0 is not None: assert t1 > t0 @pytest.mark.parametrize("seeded_asset", [{"tags": ["models", "checkpoints"]}], indirect=True) def test_download_missing_file_returns_404( http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, seeded_asset: dict ): # Remove the underlying file then attempt download. # We initialize fixture without additional tags to know exactly the asset file path. try: aid = seeded_asset["id"] rg = http.get(f"{api_base}/api/assets/{aid}", timeout=120) detail = rg.json() assert rg.status_code == 200 asset_filename = get_asset_filename(detail["asset_hash"], ".safetensors") abs_path = comfy_tmp_base_dir / "models" / "checkpoints" / asset_filename assert abs_path.exists() abs_path.unlink() r2 = http.get(f"{api_base}/api/assets/{aid}/content", timeout=120) assert r2.status_code == 404 body = r2.json() assert body["error"]["code"] == "FILE_NOT_FOUND" finally: # We created asset without the "unit-tests" tag(see `autoclean_unit_test_assets`), we need to clear it manually. dr = http.delete(f"{api_base}/api/assets/{aid}", timeout=120) dr.content @pytest.mark.skip(reason="Requires computing hashes of files in directories to deduplicate into multiple cache states") @pytest.mark.parametrize("root", ["input", "output"]) def test_download_404_if_all_states_missing( root: str, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, run_scan_and_wait, ): """Multi-state asset: after the last remaining on-disk file is removed, download must return 404.""" scope = f"dl-404-{uuid.uuid4().hex[:6]}" name = "missing_all_states.bin" data = make_asset_bytes(name, 2048) # Upload -> path1 a = asset_factory(name, [root, "unit-tests", scope], {}, data) aid = a["id"] base = comfy_tmp_base_dir / root / "unit-tests" / scope p1 = base / get_asset_filename(a["asset_hash"], ".bin") assert p1.exists() # Seed a second state and dedupe p2 = base / "copy" / name p2.parent.mkdir(parents=True, exist_ok=True) p2.write_bytes(data) trigger_sync_seed_assets(http, api_base) run_scan_and_wait(root) # Remove first file -> download should still work via the second state p1.unlink() ok1 = http.get(f"{api_base}/api/assets/{aid}/content", timeout=120) b1 = ok1.content assert ok1.status_code == 200 and b1 == data # Remove the last file -> download must 404 p2.unlink() r2 = http.get(f"{api_base}/api/assets/{aid}/content", timeout=120) body = r2.json() assert r2.status_code == 404 assert body["error"]["code"] == "FILE_NOT_FOUND"
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_downloads.py", "license": "GNU General Public License v3.0", "lines": 140, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_list_filter.py
import time import uuid import requests def test_list_assets_paging_and_sort(http: requests.Session, api_base: str, asset_factory, make_asset_bytes): names = ["a1_u.safetensors", "a2_u.safetensors", "a3_u.safetensors"] for n in names: asset_factory( n, ["models", "checkpoints", "unit-tests", "paging"], {"epoch": 1}, make_asset_bytes(n, size=2048), ) # name ascending for stable order r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,paging", "sort": "name", "order": "asc", "limit": "2", "offset": "0"}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 got1 = [a["name"] for a in b1["assets"]] assert got1 == sorted(names)[:2] assert b1["has_more"] is True r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,paging", "sort": "name", "order": "asc", "limit": "2", "offset": "2"}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 got2 = [a["name"] for a in b2["assets"]] assert got2 == sorted(names)[2:] assert b2["has_more"] is False def test_list_assets_include_exclude_and_name_contains(http: requests.Session, api_base: str, asset_factory): a = asset_factory("inc_a.safetensors", ["models", "checkpoints", "unit-tests", "alpha"], {}, b"X" * 1024) b = asset_factory("inc_b.safetensors", ["models", "checkpoints", "unit-tests", "beta"], {}, b"Y" * 1024) r = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,alpha", "exclude_tags": "beta", "limit": "50"}, timeout=120, ) body = r.json() assert r.status_code == 200 names = [x["name"] for x in body["assets"]] assert a["name"] in names assert b["name"] not in names r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests", "name_contains": "inc_"}, timeout=120, ) body2 = r2.json() assert r2.status_code == 200 names2 = [x["name"] for x in body2["assets"]] assert a["name"] in names2 assert b["name"] in names2 r2 = http.get( api_base + "/api/assets", params={"include_tags": "non-existing-tag"}, timeout=120, ) body3 = r2.json() assert r2.status_code == 200 assert not body3["assets"] def test_list_assets_sort_by_size_both_orders(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-size"] n1, n2, n3 = "sz1.safetensors", "sz2.safetensors", "sz3.safetensors" asset_factory(n1, t, {}, make_asset_bytes(n1, 1024)) asset_factory(n2, t, {}, make_asset_bytes(n2, 2048)) asset_factory(n3, t, {}, make_asset_bytes(n3, 3072)) r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-size", "sort": "size", "order": "asc"}, timeout=120, ) b1 = r1.json() names = [a["name"] for a in b1["assets"]] assert names[:3] == [n1, n2, n3] r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-size", "sort": "size", "order": "desc"}, timeout=120, ) b2 = r2.json() names2 = [a["name"] for a in b2["assets"]] assert names2[:3] == [n3, n2, n1] def test_list_assets_sort_by_updated_at_desc(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-upd"] a1 = asset_factory("upd_a.safetensors", t, {}, make_asset_bytes("upd_a", 1200)) a2 = asset_factory("upd_b.safetensors", t, {}, make_asset_bytes("upd_b", 1200)) # Rename the second asset to bump updated_at rp = http.put(f"{api_base}/api/assets/{a2['id']}", json={"name": "upd_b_renamed.safetensors"}, timeout=120) upd = rp.json() assert rp.status_code == 200, upd r = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-upd", "sort": "updated_at", "order": "desc"}, timeout=120, ) body = r.json() assert r.status_code == 200 names = [x["name"] for x in body["assets"]] assert names[0] == "upd_b_renamed.safetensors" assert a1["name"] in names def test_list_assets_sort_by_last_access_time_desc(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-access"] asset_factory("acc_a.safetensors", t, {}, make_asset_bytes("acc_a", 1100)) time.sleep(0.02) a2 = asset_factory("acc_b.safetensors", t, {}, make_asset_bytes("acc_b", 1100)) # Touch last_access_time of b by downloading its content time.sleep(0.02) dl = http.get(f"{api_base}/api/assets/{a2['id']}/content", timeout=120) assert dl.status_code == 200 dl.content r = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-access", "sort": "last_access_time", "order": "desc"}, timeout=120, ) body = r.json() assert r.status_code == 200 names = [x["name"] for x in body["assets"]] assert names[0] == a2["name"] def test_list_assets_include_tags_variants_and_case(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-include"] a = asset_factory("incvar_alpha.safetensors", [*t, "alpha"], {}, make_asset_bytes("iva")) asset_factory("incvar_beta.safetensors", [*t, "beta"], {}, make_asset_bytes("ivb")) # CSV + case-insensitive r1 = http.get( api_base + "/api/assets", params={"include_tags": "UNIT-TESTS,LF-INCLUDE,alpha"}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 names1 = [x["name"] for x in b1["assets"]] assert a["name"] in names1 assert not any("beta" in x for x in names1) # Repeated query params for include_tags params_multi = [ ("include_tags", "unit-tests"), ("include_tags", "lf-include"), ("include_tags", "alpha"), ] r2 = http.get(api_base + "/api/assets", params=params_multi, timeout=120) b2 = r2.json() assert r2.status_code == 200 names2 = [x["name"] for x in b2["assets"]] assert a["name"] in names2 assert not any("beta" in x for x in names2) # Duplicates and spaces in CSV r3 = http.get( api_base + "/api/assets", params={"include_tags": " unit-tests , lf-include , alpha , alpha "}, timeout=120, ) b3 = r3.json() assert r3.status_code == 200 names3 = [x["name"] for x in b3["assets"]] assert a["name"] in names3 def test_list_assets_exclude_tags_dedup_and_case(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-exclude"] a = asset_factory("ex_a_alpha.safetensors", [*t, "alpha"], {}, make_asset_bytes("exa", 900)) asset_factory("ex_b_beta.safetensors", [*t, "beta"], {}, make_asset_bytes("exb", 900)) # Exclude uppercase should work r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-exclude", "exclude_tags": "BETA"}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 names1 = [x["name"] for x in b1["assets"]] assert a["name"] in names1 # Repeated excludes with duplicates params_multi = [ ("include_tags", "unit-tests"), ("include_tags", "lf-exclude"), ("exclude_tags", "beta"), ("exclude_tags", "beta"), ] r2 = http.get(api_base + "/api/assets", params=params_multi, timeout=120) b2 = r2.json() assert r2.status_code == 200 names2 = [x["name"] for x in b2["assets"]] assert all("beta" not in x for x in names2) def test_list_assets_name_contains_case_and_specials(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-name"] a1 = asset_factory("CaseMix.SAFE", t, {}, make_asset_bytes("cm", 800)) a2 = asset_factory("case-other.safetensors", t, {}, make_asset_bytes("co", 800)) r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-name", "name_contains": "casemix"}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 names1 = [x["name"] for x in b1["assets"]] assert a1["name"] in names1 r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-name", "name_contains": ".SAFE"}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 names2 = [x["name"] for x in b2["assets"]] assert a1["name"] in names2 r3 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-name", "name_contains": "case-"}, timeout=120, ) b3 = r3.json() assert r3.status_code == 200 names3 = [x["name"] for x in b3["assets"]] assert a2["name"] in names3 def test_list_assets_offset_beyond_total_and_limit_boundary(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "lf-pagelimits"] asset_factory("pl1.safetensors", t, {}, make_asset_bytes("pl1", 600)) asset_factory("pl2.safetensors", t, {}, make_asset_bytes("pl2", 600)) asset_factory("pl3.safetensors", t, {}, make_asset_bytes("pl3", 600)) # Offset far beyond total r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-pagelimits", "limit": "2", "offset": "10"}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 assert not b1["assets"] assert b1["has_more"] is False # Boundary large limit (<=500 is valid) r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,lf-pagelimits", "limit": "500"}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 assert len(b2["assets"]) == 3 assert b2["has_more"] is False def test_list_assets_offset_negative_and_limit_nonint_rejected(http, api_base): r1 = http.get(api_base + "/api/assets", params={"offset": "-1"}, timeout=120) b1 = r1.json() assert r1.status_code == 400 assert b1["error"]["code"] == "INVALID_QUERY" r2 = http.get(api_base + "/api/assets", params={"limit": "abc"}, timeout=120) b2 = r2.json() assert r2.status_code == 400 assert b2["error"]["code"] == "INVALID_QUERY" def test_list_assets_invalid_query_rejected(http: requests.Session, api_base: str): # limit too small r1 = http.get(api_base + "/api/assets", params={"limit": "0"}, timeout=120) b1 = r1.json() assert r1.status_code == 400 assert b1["error"]["code"] == "INVALID_QUERY" # bad metadata JSON r2 = http.get(api_base + "/api/assets", params={"metadata_filter": "{not json"}, timeout=120) b2 = r2.json() assert r2.status_code == 400 assert b2["error"]["code"] == "INVALID_QUERY" def test_list_assets_name_contains_literal_underscore( http, api_base, asset_factory, make_asset_bytes, ): """'name_contains' must treat '_' literally, not as a SQL wildcard. We create: - foo_bar.safetensors (should match) - fooxbar.safetensors (must NOT match if '_' is escaped) - foobar.safetensors (must NOT match) """ scope = f"lf-underscore-{uuid.uuid4().hex[:6]}" tags = ["models", "checkpoints", "unit-tests", scope] a = asset_factory("foo_bar.safetensors", tags, {}, make_asset_bytes("a", 700)) b = asset_factory("fooxbar.safetensors", tags, {}, make_asset_bytes("b", 700)) c = asset_factory("foobar.safetensors", tags, {}, make_asset_bytes("c", 700)) r = http.get( api_base + "/api/assets", params={"include_tags": f"unit-tests,{scope}", "name_contains": "foo_bar"}, timeout=120, ) body = r.json() assert r.status_code == 200, body names = [x["name"] for x in body["assets"]] assert a["name"] in names, f"Expected literal underscore match to include {a['name']}" assert b["name"] not in names, "Underscore must be escaped — should not match 'fooxbar'" assert c["name"] not in names, "Underscore must be escaped — should not match 'foobar'" assert body["total"] == 1
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_list_filter.py", "license": "GNU General Public License v3.0", "lines": 291, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_metadata_filters.py
import json def test_meta_and_across_keys_and_types( http, api_base: str, asset_factory, make_asset_bytes ): name = "mf_and_mix.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-and"] meta = {"purpose": "mix", "epoch": 1, "active": True, "score": 1.23} asset_factory(name, tags, meta, make_asset_bytes(name, 4096)) # All keys must match (AND semantics) f_ok = {"purpose": "mix", "epoch": 1, "active": True, "score": 1.23} r1 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-and", "metadata_filter": json.dumps(f_ok), }, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 names = [a["name"] for a in b1["assets"]] assert name in names # One key mismatched -> no result f_bad = {"purpose": "mix", "epoch": 2, "active": True} r2 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-and", "metadata_filter": json.dumps(f_bad), }, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 assert not b2["assets"] def test_meta_type_strictness_int_vs_str_and_bool(http, api_base, asset_factory, make_asset_bytes): name = "mf_types.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-types"] meta = {"epoch": 1, "active": True} asset_factory(name, tags, meta, make_asset_bytes(name)) # int filter matches numeric r1 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-types", "metadata_filter": json.dumps({"epoch": 1}), }, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 and any(a["name"] == name for a in b1["assets"]) # string "1" must NOT match numeric 1 r2 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-types", "metadata_filter": json.dumps({"epoch": "1"}), }, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and not b2["assets"] # bool True matches, string "true" must NOT match r3 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-types", "metadata_filter": json.dumps({"active": True}), }, timeout=120, ) b3 = r3.json() assert r3.status_code == 200 and any(a["name"] == name for a in b3["assets"]) r4 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-types", "metadata_filter": json.dumps({"active": "true"}), }, timeout=120, ) b4 = r4.json() assert r4.status_code == 200 and not b4["assets"] def test_meta_any_of_list_of_scalars(http, api_base, asset_factory, make_asset_bytes): name = "mf_list_scalars.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-list"] meta = {"flags": ["red", "green"]} asset_factory(name, tags, meta, make_asset_bytes(name, 3000)) # Any-of should match because "green" is present filt_ok = {"flags": ["blue", "green"]} r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-list", "metadata_filter": json.dumps(filt_ok)}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 and any(a["name"] == name for a in b1["assets"]) # None of provided flags present -> no match filt_miss = {"flags": ["blue", "yellow"]} r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-list", "metadata_filter": json.dumps(filt_miss)}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and not b2["assets"] # Duplicates in list should not break matching filt_dup = {"flags": ["green", "green", "green"]} r3 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-list", "metadata_filter": json.dumps(filt_dup)}, timeout=120, ) b3 = r3.json() assert r3.status_code == 200 and any(a["name"] == name for a in b3["assets"]) def test_meta_none_semantics_missing_or_null_and_any_of_with_none( http, api_base, asset_factory, make_asset_bytes ): # a1: key missing; a2: explicit null; a3: concrete value t = ["models", "checkpoints", "unit-tests", "mf-none"] a1 = asset_factory("mf_none_missing.safetensors", t, {"x": 1}, make_asset_bytes("a1")) a2 = asset_factory("mf_none_null.safetensors", t, {"maybe": None}, make_asset_bytes("a2")) a3 = asset_factory("mf_none_value.safetensors", t, {"maybe": "x"}, make_asset_bytes("a3")) # Filter {maybe: None} must match a1 and a2, not a3 filt = {"maybe": None} r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-none", "metadata_filter": json.dumps(filt), "sort": "name"}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 got = [a["name"] for a in b1["assets"]] assert a1["name"] in got and a2["name"] in got and a3["name"] not in got # Any-of with None should include missing/null plus value matches filt_any = {"maybe": [None, "x"]} r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-none", "metadata_filter": json.dumps(filt_any), "sort": "name"}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 got2 = [a["name"] for a in b2["assets"]] assert a1["name"] in got2 and a2["name"] in got2 and a3["name"] in got2 def test_meta_nested_json_object_equality(http, api_base, asset_factory, make_asset_bytes): name = "mf_nested_json.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-nested"] cfg = {"optimizer": "adam", "lr": 0.001, "schedule": {"type": "cosine", "warmup": 100}} asset_factory(name, tags, {"config": cfg}, make_asset_bytes(name, 2200)) # Exact JSON object equality (same structure) r1 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-nested", "metadata_filter": json.dumps({"config": cfg}), }, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 and any(a["name"] == name for a in b1["assets"]) # Different JSON object should not match r2 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-nested", "metadata_filter": json.dumps({"config": {"optimizer": "sgd"}}), }, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and not b2["assets"] def test_meta_list_of_objects_any_of(http, api_base, asset_factory, make_asset_bytes): name = "mf_list_objects.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-objlist"] transforms = [{"type": "crop", "size": 128}, {"type": "flip", "p": 0.5}] asset_factory(name, tags, {"transforms": transforms}, make_asset_bytes(name, 2048)) # Any-of for list of objects should match when one element equals the filter object r1 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-objlist", "metadata_filter": json.dumps({"transforms": {"type": "flip", "p": 0.5}}), }, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 and any(a["name"] == name for a in b1["assets"]) # Non-matching object -> no match r2 = http.get( api_base + "/api/assets", params={ "include_tags": "unit-tests,mf-objlist", "metadata_filter": json.dumps({"transforms": {"type": "rotate", "deg": 90}}), }, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and not b2["assets"] def test_meta_with_special_and_unicode_keys(http, api_base, asset_factory, make_asset_bytes): name = "mf_keys_unicode.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-keys"] meta = { "weird.key": "v1", "path/like": 7, "with:colon": True, "ключ": "значение", "emoji": "🐍", } asset_factory(name, tags, meta, make_asset_bytes(name, 1500)) # Match all the special keys filt = {"weird.key": "v1", "path/like": 7, "with:colon": True, "emoji": "🐍"} r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-keys", "metadata_filter": json.dumps(filt)}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 and any(a["name"] == name for a in b1["assets"]) # Unicode key match r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-keys", "metadata_filter": json.dumps({"ключ": "значение"})}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and any(a["name"] == name for a in b2["assets"]) def test_meta_with_zero_and_boolean_lists(http, api_base, asset_factory, make_asset_bytes): t = ["models", "checkpoints", "unit-tests", "mf-zero-bool"] a0 = asset_factory("mf_zero_count.safetensors", t, {"count": 0}, make_asset_bytes("z", 1025)) a1 = asset_factory("mf_bool_list.safetensors", t, {"choices": [True, False]}, make_asset_bytes("b", 1026)) # count == 0 must match only a0 r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-zero-bool", "metadata_filter": json.dumps({"count": 0})}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 names1 = [a["name"] for a in b1["assets"]] assert a0["name"] in names1 and a1["name"] not in names1 # Any-of list of booleans: True matches second asset r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-zero-bool", "metadata_filter": json.dumps({"choices": True})}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and any(a["name"] == a1["name"] for a in b2["assets"]) def test_meta_mixed_list_types_and_strictness(http, api_base, asset_factory, make_asset_bytes): name = "mf_mixed_list.safetensors" tags = ["models", "checkpoints", "unit-tests", "mf-mixed"] meta = {"mix": ["1", 1, True, None]} asset_factory(name, tags, meta, make_asset_bytes(name, 1999)) # Should match because 1 is present r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-mixed", "metadata_filter": json.dumps({"mix": [2, 1]})}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 and any(a["name"] == name for a in b1["assets"]) # Should NOT match for False r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-mixed", "metadata_filter": json.dumps({"mix": False})}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and not b2["assets"] def test_meta_unknown_key_and_none_behavior_with_scope_tags(http, api_base, asset_factory, make_asset_bytes): # Use a unique scope tag to avoid interference t = ["models", "checkpoints", "unit-tests", "mf-unknown-scope"] x = asset_factory("mf_unknown_a.safetensors", t, {"k1": 1}, make_asset_bytes("ua")) y = asset_factory("mf_unknown_b.safetensors", t, {"k2": 2}, make_asset_bytes("ub")) # Filtering by unknown key with None should return both (missing key OR null) r1 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-unknown-scope", "metadata_filter": json.dumps({"unknown": None})}, timeout=120, ) b1 = r1.json() assert r1.status_code == 200 names = {a["name"] for a in b1["assets"]} assert x["name"] in names and y["name"] in names # Filtering by unknown key with concrete value should return none r2 = http.get( api_base + "/api/assets", params={"include_tags": "unit-tests,mf-unknown-scope", "metadata_filter": json.dumps({"unknown": "x"})}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200 and not b2["assets"] def test_meta_with_tags_include_exclude_and_name_contains(http, api_base, asset_factory, make_asset_bytes): # alpha matches epoch=1; beta has epoch=2 a = asset_factory( "mf_tag_alpha.safetensors", ["models", "checkpoints", "unit-tests", "mf-tag", "alpha"], {"epoch": 1}, make_asset_bytes("alpha"), ) b = asset_factory( "mf_tag_beta.safetensors", ["models", "checkpoints", "unit-tests", "mf-tag", "beta"], {"epoch": 2}, make_asset_bytes("beta"), ) params = { "include_tags": "unit-tests,mf-tag,alpha", "exclude_tags": "beta", "name_contains": "mf_tag_", "metadata_filter": json.dumps({"epoch": 1}), } r = http.get(api_base + "/api/assets", params=params, timeout=120) body = r.json() assert r.status_code == 200 names = [x["name"] for x in body["assets"]] assert a["name"] in names assert b["name"] not in names def test_meta_sort_and_paging_under_filter(http, api_base, asset_factory, make_asset_bytes): # Three assets in same scope with different sizes and a common filter key t = ["models", "checkpoints", "unit-tests", "mf-sort"] n1, n2, n3 = "mf_sort_1.safetensors", "mf_sort_2.safetensors", "mf_sort_3.safetensors" asset_factory(n1, t, {"group": "g"}, make_asset_bytes(n1, 1024)) asset_factory(n2, t, {"group": "g"}, make_asset_bytes(n2, 2048)) asset_factory(n3, t, {"group": "g"}, make_asset_bytes(n3, 3072)) # Sort by size ascending with paging q = { "include_tags": "unit-tests,mf-sort", "metadata_filter": json.dumps({"group": "g"}), "sort": "size", "order": "asc", "limit": "2", } r1 = http.get(api_base + "/api/assets", params=q, timeout=120) b1 = r1.json() assert r1.status_code == 200 got1 = [a["name"] for a in b1["assets"]] assert got1 == [n1, n2] assert b1["has_more"] is True q2 = {**q, "offset": "2"} r2 = http.get(api_base + "/api/assets", params=q2, timeout=120) b2 = r2.json() assert r2.status_code == 200 got2 = [a["name"] for a in b2["assets"]] assert got2 == [n3] assert b2["has_more"] is False
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_metadata_filters.py", "license": "GNU General Public License v3.0", "lines": 345, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_prune_orphaned_assets.py
import uuid from pathlib import Path import pytest import requests from conftest import get_asset_filename, trigger_sync_seed_assets @pytest.fixture def create_seed_file(comfy_tmp_base_dir: Path): """Create a file on disk that will become a seed asset after sync.""" created: list[Path] = [] def _create(root: str, scope: str, name: str | None = None, data: bytes = b"TEST") -> Path: name = name or f"seed_{uuid.uuid4().hex[:8]}.bin" path = comfy_tmp_base_dir / root / "unit-tests" / scope / name path.parent.mkdir(parents=True, exist_ok=True) path.write_bytes(data) created.append(path) return path yield _create for p in created: p.unlink(missing_ok=True) @pytest.fixture def find_asset(http: requests.Session, api_base: str): """Query API for assets matching scope and optional name.""" def _find(scope: str, name: str | None = None) -> list[dict]: params = {"include_tags": f"unit-tests,{scope}"} if name: params["name_contains"] = name r = http.get(f"{api_base}/api/assets", params=params, timeout=120) assert r.status_code == 200 assets = r.json().get("assets", []) if name: return [a for a in assets if a.get("name") == name] return assets return _find @pytest.mark.parametrize("root", ["input", "output"]) def test_orphaned_seed_asset_is_pruned( root: str, create_seed_file, find_asset, http: requests.Session, api_base: str, ): """Seed asset with deleted file is removed; with file present, it survives.""" scope = f"prune-{uuid.uuid4().hex[:6]}" fp = create_seed_file(root, scope) name = fp.name trigger_sync_seed_assets(http, api_base) assert find_asset(scope, name), "Seed asset should exist" fp.unlink() trigger_sync_seed_assets(http, api_base) assert not find_asset(scope, name), "Orphaned seed should be pruned" def test_seed_asset_with_file_survives_prune( create_seed_file, find_asset, http: requests.Session, api_base: str, ): """Seed asset with file still on disk is NOT pruned.""" scope = f"keep-{uuid.uuid4().hex[:6]}" fp = create_seed_file("input", scope) trigger_sync_seed_assets(http, api_base) trigger_sync_seed_assets(http, api_base) assert find_asset(scope, fp.name), "Seed with valid file should survive" def test_hashed_asset_not_pruned_when_file_missing( http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, asset_factory, make_asset_bytes, ): """Hashed assets are never deleted by prune, even without file.""" scope = f"hashed-{uuid.uuid4().hex[:6]}" data = make_asset_bytes("test", 2048) a = asset_factory("test.bin", ["input", "unit-tests", scope], {}, data) path = comfy_tmp_base_dir / "input" / "unit-tests" / scope / get_asset_filename(a["asset_hash"], ".bin") path.unlink() trigger_sync_seed_assets(http, api_base) r = http.get(f"{api_base}/api/assets/{a['id']}", timeout=120) assert r.status_code == 200, "Hashed asset should NOT be pruned" def test_prune_across_multiple_roots( create_seed_file, find_asset, http: requests.Session, api_base: str, ): """Prune correctly handles assets across input and output roots.""" scope = f"multi-{uuid.uuid4().hex[:6]}" input_fp = create_seed_file("input", scope, "input.bin") create_seed_file("output", scope, "output.bin") trigger_sync_seed_assets(http, api_base) assert len(find_asset(scope)) == 2 input_fp.unlink() trigger_sync_seed_assets(http, api_base) remaining = find_asset(scope) assert len(remaining) == 1 assert remaining[0]["name"] == "output.bin" @pytest.mark.parametrize("dirname", ["100%_done", "my_folder_name", "has spaces"]) def test_special_chars_in_path_escaped_correctly( dirname: str, create_seed_file, find_asset, http: requests.Session, api_base: str, comfy_tmp_base_dir: Path, ): """SQL LIKE wildcards (%, _) and spaces in paths don't cause false matches.""" scope = f"special-{uuid.uuid4().hex[:6]}/{dirname}" fp = create_seed_file("input", scope) trigger_sync_seed_assets(http, api_base) trigger_sync_seed_assets(http, api_base) assert find_asset(scope.split("/")[0], fp.name), "Asset with special chars should survive"
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_prune_orphaned_assets.py", "license": "GNU General Public License v3.0", "lines": 110, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_tags.py
import json import uuid import requests def test_tags_present(http: requests.Session, api_base: str, seeded_asset: dict): # Include zero-usage tags by default r1 = http.get(api_base + "/api/tags", params={"limit": "50"}, timeout=120) body1 = r1.json() assert r1.status_code == 200 names = [t["name"] for t in body1["tags"]] # A few system tags from migration should exist: assert "models" in names assert "checkpoints" in names # Only used tags before we add anything new from this test cycle r2 = http.get(api_base + "/api/tags", params={"include_zero": "false"}, timeout=120) body2 = r2.json() assert r2.status_code == 200 # We already seeded one asset via fixture, so used tags must be non-empty used_names = [t["name"] for t in body2["tags"]] assert "models" in used_names assert "checkpoints" in used_names # Prefix filter should refine the list r3 = http.get(api_base + "/api/tags", params={"include_zero": "false", "prefix": "uni"}, timeout=120) b3 = r3.json() assert r3.status_code == 200 names3 = [t["name"] for t in b3["tags"]] assert "unit-tests" in names3 assert "models" not in names3 # filtered out by prefix # Order by name ascending should be stable r4 = http.get(api_base + "/api/tags", params={"include_zero": "false", "order": "name_asc"}, timeout=120) b4 = r4.json() assert r4.status_code == 200 names4 = [t["name"] for t in b4["tags"]] assert names4 == sorted(names4) def test_tags_empty_usage(http: requests.Session, api_base: str, asset_factory, make_asset_bytes): # Baseline: system tags exist when include_zero (default) is true r1 = http.get(api_base + "/api/tags", params={"limit": "500"}, timeout=120) body1 = r1.json() assert r1.status_code == 200 names = [t["name"] for t in body1["tags"]] assert "models" in names and "checkpoints" in names # Create a short-lived asset under input with a unique custom tag scope = f"tags-empty-usage-{uuid.uuid4().hex[:6]}" custom_tag = f"temp-{uuid.uuid4().hex[:8]}" name = "tag_seed.bin" _asset = asset_factory( name, ["input", "unit-tests", scope, custom_tag], {}, make_asset_bytes(name, 512), ) # While the asset exists, the custom tag must appear when include_zero=false r2 = http.get( api_base + "/api/tags", params={"include_zero": "false", "prefix": custom_tag, "limit": "50"}, timeout=120, ) body2 = r2.json() assert r2.status_code == 200 used_names = [t["name"] for t in body2["tags"]] assert custom_tag in used_names # Delete the asset so the tag usage drops to zero rd = http.delete(f"{api_base}/api/assets/{_asset['id']}", timeout=120) assert rd.status_code == 204 # Now the custom tag must not be returned when include_zero=false r3 = http.get( api_base + "/api/tags", params={"include_zero": "false", "prefix": custom_tag, "limit": "50"}, timeout=120, ) body3 = r3.json() assert r3.status_code == 200 names_after = [t["name"] for t in body3["tags"]] assert custom_tag not in names_after assert not names_after # filtered view should be empty now def test_add_and_remove_tags(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] # Add tags with duplicates and mixed case payload_add = {"tags": ["NewTag", "unit-tests", "newtag", "BETA"]} r1 = http.post(f"{api_base}/api/assets/{aid}/tags", json=payload_add, timeout=120) b1 = r1.json() assert r1.status_code == 200, b1 # normalized, deduplicated; 'unit-tests' was already present from the seed assert set(b1["added"]) == {"newtag", "beta"} assert set(b1["already_present"]) == {"unit-tests"} assert "newtag" in b1["total_tags"] and "beta" in b1["total_tags"] rg = http.get(f"{api_base}/api/assets/{aid}", timeout=120) g = rg.json() assert rg.status_code == 200 tags_now = set(g["tags"]) assert {"newtag", "beta"}.issubset(tags_now) # Remove a tag and a non-existent tag payload_del = {"tags": ["newtag", "does-not-exist"]} r2 = http.delete(f"{api_base}/api/assets/{aid}/tags", json=payload_del, timeout=120) b2 = r2.json() assert r2.status_code == 200 assert set(b2["removed"]) == {"newtag"} assert set(b2["not_present"]) == {"does-not-exist"} # Verify remaining tags after deletion rg2 = http.get(f"{api_base}/api/assets/{aid}", timeout=120) g2 = rg2.json() assert rg2.status_code == 200 tags_later = set(g2["tags"]) assert "newtag" not in tags_later assert "beta" in tags_later # still present def test_tags_list_order_and_prefix(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] h = seeded_asset["asset_hash"] # Add both tags to the seeded asset (usage: orderaaa=1, orderbbb=1) r_add = http.post(f"{api_base}/api/assets/{aid}/tags", json={"tags": ["orderaaa", "orderbbb"]}, timeout=120) add_body = r_add.json() assert r_add.status_code == 200, add_body # Create another AssetInfo from the same content but tagged ONLY with 'orderbbb'. payload = { "hash": h, "name": "order_only_bbb.safetensors", "tags": ["input", "unit-tests", "orderbbb"], "user_metadata": {}, } r_copy = http.post(f"{api_base}/api/assets/from-hash", json=payload, timeout=120) copy_body = r_copy.json() assert r_copy.status_code == 201, copy_body # 1) Default order (count_desc): 'orderbbb' should come before 'orderaaa' # because it has higher usage (2 vs 1). r1 = http.get(api_base + "/api/tags", params={"prefix": "order", "include_zero": "false"}, timeout=120) b1 = r1.json() assert r1.status_code == 200, b1 names1 = [t["name"] for t in b1["tags"]] counts1 = {t["name"]: t["count"] for t in b1["tags"]} # Both must be present within the prefix subset assert "orderaaa" in names1 and "orderbbb" in names1 # Usage of 'orderbbb' must be >= 'orderaaa'; in our setup it's 2 vs 1 assert counts1["orderbbb"] >= counts1["orderaaa"] # And with count_desc, 'orderbbb' appears earlier than 'orderaaa' assert names1.index("orderbbb") < names1.index("orderaaa") # 2) name_asc: lexical order should flip the relative order r2 = http.get( api_base + "/api/tags", params={"prefix": "order", "include_zero": "false", "order": "name_asc"}, timeout=120, ) b2 = r2.json() assert r2.status_code == 200, b2 names2 = [t["name"] for t in b2["tags"]] assert "orderaaa" in names2 and "orderbbb" in names2 assert names2.index("orderaaa") < names2.index("orderbbb") # 3) invalid limit rejected (existing negative case retained) r3 = http.get(api_base + "/api/tags", params={"limit": "1001"}, timeout=120) b3 = r3.json() assert r3.status_code == 400 assert b3["error"]["code"] == "INVALID_QUERY" def test_tags_endpoints_invalid_bodies(http: requests.Session, api_base: str, seeded_asset: dict): aid = seeded_asset["id"] # Add with empty list r1 = http.post(f"{api_base}/api/assets/{aid}/tags", json={"tags": []}, timeout=120) b1 = r1.json() assert r1.status_code == 400 assert b1["error"]["code"] == "INVALID_BODY" # Remove with wrong type r2 = http.delete(f"{api_base}/api/assets/{aid}/tags", json={"tags": [123]}, timeout=120) b2 = r2.json() assert r2.status_code == 400 assert b2["error"]["code"] == "INVALID_BODY" # metadata_filter provided as JSON array should be rejected (must be object) r3 = http.get( api_base + "/api/assets", params={"metadata_filter": json.dumps([{"x": 1}])}, timeout=120, ) b3 = r3.json() assert r3.status_code == 400 assert b3["error"]["code"] == "INVALID_QUERY" def test_tags_prefix_treats_underscore_literal( http, api_base, asset_factory, make_asset_bytes, ): """'prefix' for /api/tags must treat '_' literally, not as a wildcard.""" base = f"pref_{uuid.uuid4().hex[:6]}" tag_ok = f"{base}_ok" # should match prefix=f"{base}_" tag_bad = f"{base}xok" # must NOT match if '_' is escaped scope = f"tags-underscore-{uuid.uuid4().hex[:6]}" asset_factory("t1.bin", ["input", "unit-tests", scope, tag_ok], {}, make_asset_bytes("t1", 512)) asset_factory("t2.bin", ["input", "unit-tests", scope, tag_bad], {}, make_asset_bytes("t2", 512)) r = http.get(api_base + "/api/tags", params={"include_zero": "false", "prefix": f"{base}_"}, timeout=120) body = r.json() assert r.status_code == 200, body names = [t["name"] for t in body["tags"]] assert tag_ok in names, f"Expected {tag_ok} to be returned for prefix '{base}_'" assert tag_bad not in names, f"'{tag_bad}' must not match — '_' is not a wildcard" assert body["total"] == 1
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_tags.py", "license": "GNU General Public License v3.0", "lines": 191, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/assets_test/test_uploads.py
import json import uuid from concurrent.futures import ThreadPoolExecutor import requests import pytest def test_upload_ok_duplicate_reference(http: requests.Session, api_base: str, make_asset_bytes): name = "dup_a.safetensors" tags = ["models", "checkpoints", "unit-tests", "alpha"] meta = {"purpose": "dup"} data = make_asset_bytes(name) files = {"file": (name, data, "application/octet-stream")} form = {"tags": json.dumps(tags), "name": name, "user_metadata": json.dumps(meta)} r1 = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) a1 = r1.json() assert r1.status_code == 201, a1 assert a1["created_new"] is True # Second upload with the same data and name should return created_new == False and the same asset files = {"file": (name, data, "application/octet-stream")} form = {"tags": json.dumps(tags), "name": name, "user_metadata": json.dumps(meta)} r2 = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) a2 = r2.json() assert r2.status_code == 200, a2 assert a2["created_new"] is False assert a2["asset_hash"] == a1["asset_hash"] assert a2["id"] == a1["id"] # old reference # Third upload with the same data but new name should return created_new == False and the new AssetReference files = {"file": (name, data, "application/octet-stream")} form = {"tags": json.dumps(tags), "name": name + "_d", "user_metadata": json.dumps(meta)} r2 = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) a3 = r2.json() assert r2.status_code == 200, a3 assert a3["created_new"] is False assert a3["asset_hash"] == a1["asset_hash"] assert a3["id"] != a1["id"] # old reference def test_upload_fastpath_from_existing_hash_no_file(http: requests.Session, api_base: str): # Seed a small file first name = "fastpath_seed.safetensors" tags = ["models", "checkpoints", "unit-tests"] meta = {} files = {"file": (name, b"B" * 1024, "application/octet-stream")} form = {"tags": json.dumps(tags), "name": name, "user_metadata": json.dumps(meta)} r1 = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) b1 = r1.json() assert r1.status_code == 201, b1 h = b1["asset_hash"] # Now POST /api/assets with only hash and no file files = [ ("hash", (None, h)), ("tags", (None, json.dumps(tags))), ("name", (None, "fastpath_copy.safetensors")), ("user_metadata", (None, json.dumps({"purpose": "copy"}))), ] r2 = http.post(api_base + "/api/assets", files=files, timeout=120) b2 = r2.json() assert r2.status_code == 200, b2 # fast path returns 200 with created_new == False assert b2["created_new"] is False assert b2["asset_hash"] == h def test_upload_fastpath_with_known_hash_and_file( http: requests.Session, api_base: str ): # Seed files = {"file": ("seed.safetensors", b"C" * 128, "application/octet-stream")} form = {"tags": json.dumps(["models", "checkpoints", "unit-tests", "fp"]), "name": "seed.safetensors", "user_metadata": json.dumps({})} r1 = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) b1 = r1.json() assert r1.status_code == 201, b1 h = b1["asset_hash"] # Send both file and hash of existing content -> server must drain file and create from hash (200) files = {"file": ("ignored.bin", b"ignored" * 10, "application/octet-stream")} form = {"hash": h, "tags": json.dumps(["models", "checkpoints", "unit-tests", "fp"]), "name": "copy_from_hash.safetensors", "user_metadata": json.dumps({})} r2 = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) b2 = r2.json() assert r2.status_code == 200, b2 assert b2["created_new"] is False assert b2["asset_hash"] == h def test_upload_multiple_tags_fields_are_merged(http: requests.Session, api_base: str): data = [ ("tags", "models,checkpoints"), ("tags", json.dumps(["unit-tests", "alpha"])), ("name", "merge.safetensors"), ("user_metadata", json.dumps({"u": 1})), ] files = {"file": ("merge.safetensors", b"B" * 256, "application/octet-stream")} r1 = http.post(api_base + "/api/assets", data=data, files=files, timeout=120) created = r1.json() assert r1.status_code in (200, 201), created aid = created["id"] # Verify all tags are present on the resource rg = http.get(f"{api_base}/api/assets/{aid}", timeout=120) detail = rg.json() assert rg.status_code == 200, detail tags = set(detail["tags"]) assert {"models", "checkpoints", "unit-tests", "alpha"}.issubset(tags) @pytest.mark.parametrize("root", ["input", "output"]) def test_concurrent_upload_identical_bytes_different_names( root: str, http: requests.Session, api_base: str, make_asset_bytes, ): """ Two concurrent uploads of identical bytes but different names. Expect a single Asset (same hash), two AssetInfo rows, and exactly one created_new=True. """ scope = f"concupload-{uuid.uuid4().hex[:6]}" name1, name2 = "cu_a.bin", "cu_b.bin" data = make_asset_bytes("concurrent", 4096) tags = [root, "unit-tests", scope] def _do_upload(args): url, form_data, files_data = args with requests.Session() as s: return s.post(url, data=form_data, files=files_data, timeout=120) url = api_base + "/api/assets" form1 = {"tags": json.dumps(tags), "name": name1, "user_metadata": json.dumps({})} files1 = {"file": (name1, data, "application/octet-stream")} form2 = {"tags": json.dumps(tags), "name": name2, "user_metadata": json.dumps({})} files2 = {"file": (name2, data, "application/octet-stream")} with ThreadPoolExecutor(max_workers=2) as executor: futures = list(executor.map(_do_upload, [(url, form1, files1), (url, form2, files2)])) r1, r2 = futures b1, b2 = r1.json(), r2.json() assert r1.status_code in (200, 201), b1 assert r2.status_code in (200, 201), b2 assert b1["asset_hash"] == b2["asset_hash"] assert b1["id"] != b2["id"] created_flags = sorted([bool(b1.get("created_new")), bool(b2.get("created_new"))]) assert created_flags == [False, True] rl = http.get( api_base + "/api/assets", params={"include_tags": f"unit-tests,{scope}", "sort": "name"}, timeout=120, ) bl = rl.json() assert rl.status_code == 200, bl names = [a["name"] for a in bl.get("assets", [])] assert set([name1, name2]).issubset(names) def test_create_from_hash_endpoint_404(http: requests.Session, api_base: str): payload = { "hash": "blake3:" + "0" * 64, "name": "nonexistent.bin", "tags": ["models", "checkpoints", "unit-tests"], } r = http.post(api_base + "/api/assets/from-hash", json=payload, timeout=120) body = r.json() assert r.status_code == 404 assert body["error"]["code"] == "ASSET_NOT_FOUND" def test_upload_zero_byte_rejected(http: requests.Session, api_base: str): files = {"file": ("empty.safetensors", b"", "application/octet-stream")} form = {"tags": json.dumps(["models", "checkpoints", "unit-tests", "edge"]), "name": "empty.safetensors", "user_metadata": json.dumps({})} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "EMPTY_UPLOAD" def test_upload_invalid_root_tag_rejected(http: requests.Session, api_base: str): files = {"file": ("badroot.bin", b"A" * 64, "application/octet-stream")} form = {"tags": json.dumps(["not-a-root", "whatever"]), "name": "badroot.bin", "user_metadata": json.dumps({})} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "INVALID_BODY" def test_upload_user_metadata_must_be_json(http: requests.Session, api_base: str): files = {"file": ("badmeta.bin", b"A" * 128, "application/octet-stream")} form = {"tags": json.dumps(["models", "checkpoints", "unit-tests", "edge"]), "name": "badmeta.bin", "user_metadata": "{not json}"} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "INVALID_BODY" def test_upload_requires_multipart(http: requests.Session, api_base: str): r = http.post(api_base + "/api/assets", json={"foo": "bar"}, timeout=120) body = r.json() assert r.status_code == 415 assert body["error"]["code"] == "UNSUPPORTED_MEDIA_TYPE" def test_upload_missing_file_and_hash(http: requests.Session, api_base: str): files = [ ("tags", (None, json.dumps(["models", "checkpoints", "unit-tests"]))), ("name", (None, "x.safetensors")), ] r = http.post(api_base + "/api/assets", files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "MISSING_FILE" def test_upload_models_unknown_category(http: requests.Session, api_base: str): files = {"file": ("m.safetensors", b"A" * 128, "application/octet-stream")} form = {"tags": json.dumps(["models", "no_such_category", "unit-tests"]), "name": "m.safetensors"} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "INVALID_BODY" assert body["error"]["message"].startswith("unknown models category") def test_upload_models_requires_category(http: requests.Session, api_base: str): files = {"file": ("nocat.safetensors", b"A" * 64, "application/octet-stream")} form = {"tags": json.dumps(["models"]), "name": "nocat.safetensors", "user_metadata": json.dumps({})} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "INVALID_BODY" def test_upload_tags_traversal_guard(http: requests.Session, api_base: str): files = {"file": ("evil.safetensors", b"A" * 256, "application/octet-stream")} form = {"tags": json.dumps(["models", "checkpoints", "unit-tests", "..", "zzz"]), "name": "evil.safetensors"} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] in ("BAD_REQUEST", "INVALID_BODY") @pytest.mark.parametrize("root", ["input", "output"]) def test_duplicate_upload_same_display_name_does_not_clobber( root: str, http: requests.Session, api_base: str, asset_factory, make_asset_bytes, ): """ Two uploads use the same tags and the same display name but different bytes. With hash-based filenames, they must NOT overwrite each other. Both assets remain accessible and serve their original content. """ scope = f"dup-path-{uuid.uuid4().hex[:6]}" display_name = "same_display.bin" d1 = make_asset_bytes(scope + "-v1", 1536) d2 = make_asset_bytes(scope + "-v2", 2048) tags = [root, "unit-tests", scope] first = asset_factory(display_name, tags, {}, d1) second = asset_factory(display_name, tags, {}, d2) assert first["id"] != second["id"] assert first["asset_hash"] != second["asset_hash"] # different content assert first["name"] == second["name"] == display_name # Both must be independently retrievable r1 = http.get(f"{api_base}/api/assets/{first['id']}/content", timeout=120) b1 = r1.content assert r1.status_code == 200 assert b1 == d1 r2 = http.get(f"{api_base}/api/assets/{second['id']}/content", timeout=120) b2 = r2.content assert r2.status_code == 200 assert b2 == d2
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/assets_test/test_uploads.py", "license": "GNU General Public License v3.0", "lines": 235, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:comfy_extras/nodes_color.py
from typing_extensions import override from comfy_api.latest import ComfyExtension, io class ColorToRGBInt(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="ColorToRGBInt", display_name="Color to RGB Int", category="utils", description="Convert a color to a RGB integer value.", inputs=[ io.Color.Input("color"), ], outputs=[ io.Int.Output(display_name="rgb_int"), ], ) @classmethod def execute( cls, color: str, ) -> io.NodeOutput: # expect format #RRGGBB if len(color) != 7 or color[0] != "#": raise ValueError("Color must be in format #RRGGBB") r = int(color[1:3], 16) g = int(color[3:5], 16) b = int(color[5:7], 16) return io.NodeOutput(r * 256 * 256 + g * 256 + b) class ColorExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ColorToRGBInt] async def comfy_entrypoint() -> ColorExtension: return ColorExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_color.py", "license": "GNU General Public License v3.0", "lines": 35, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/grok.py
from pydantic import BaseModel, Field class ImageGenerationRequest(BaseModel): model: str = Field(...) prompt: str = Field(...) aspect_ratio: str = Field(...) n: int = Field(...) seed: int = Field(...) response_for: str = Field("url") class InputUrlObject(BaseModel): url: str = Field(...) class ImageEditRequest(BaseModel): model: str = Field(...) image: InputUrlObject = Field(...) prompt: str = Field(...) resolution: str = Field(...) n: int = Field(...) seed: int = Field(...) response_for: str = Field("url") class VideoGenerationRequest(BaseModel): model: str = Field(...) prompt: str = Field(...) image: InputUrlObject | None = Field(...) duration: int = Field(...) aspect_ratio: str | None = Field(...) resolution: str = Field(...) seed: int = Field(...) class VideoEditRequest(BaseModel): model: str = Field(...) prompt: str = Field(...) video: InputUrlObject = Field(...) seed: int = Field(...) class ImageResponseObject(BaseModel): url: str | None = Field(None) b64_json: str | None = Field(None) revised_prompt: str | None = Field(None) class ImageGenerationResponse(BaseModel): data: list[ImageResponseObject] = Field(...) class VideoGenerationResponse(BaseModel): request_id: str = Field(...) class VideoResponseObject(BaseModel): url: str = Field(...) upsampled_prompt: str | None = Field(None) duration: int = Field(...) class VideoStatusResponse(BaseModel): status: str | None = Field(None) video: VideoResponseObject | None = Field(None) model: str | None = Field(None)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/grok.py", "license": "GNU General Public License v3.0", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_grok.py
import torch from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.grok import ( ImageEditRequest, ImageGenerationRequest, ImageGenerationResponse, InputUrlObject, VideoEditRequest, VideoGenerationRequest, VideoGenerationResponse, VideoStatusResponse, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, download_url_to_video_output, get_fs_object_size, get_number_of_images, poll_op, sync_op, tensor_to_base64_string, upload_video_to_comfyapi, validate_string, validate_video_duration, ) class GrokImageNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="GrokImageNode", display_name="Grok Image", category="api node/image/Grok", description="Generate images using Grok based on a text prompt", inputs=[ IO.Combo.Input("model", options=["grok-imagine-image-beta"]), IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the image", ), IO.Combo.Input( "aspect_ratio", options=[ "1:1", "2:3", "3:2", "3:4", "4:3", "9:16", "16:9", "9:19.5", "19.5:9", "9:20", "20:9", "1:2", "2:1", ], ), IO.Int.Input( "number_of_images", default=1, min=1, max=10, step=1, tooltip="Number of images to generate", display_mode=IO.NumberDisplay.number, ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to determine if node should re-run; " "actual results are nondeterministic regardless of seed.", ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["number_of_images"]), expr="""{"type":"usd","usd":0.033 * widgets.number_of_images}""", ), ) @classmethod async def execute( cls, model: str, prompt: str, aspect_ratio: str, number_of_images: int, seed: int, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) response = await sync_op( cls, ApiEndpoint(path="/proxy/xai/v1/images/generations", method="POST"), data=ImageGenerationRequest( model=model, prompt=prompt, aspect_ratio=aspect_ratio, n=number_of_images, seed=seed, ), response_model=ImageGenerationResponse, ) if len(response.data) == 1: return IO.NodeOutput(await download_url_to_image_tensor(response.data[0].url)) return IO.NodeOutput( torch.cat( [await download_url_to_image_tensor(i) for i in [str(d.url) for d in response.data if d.url]], ) ) class GrokImageEditNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="GrokImageEditNode", display_name="Grok Image Edit", category="api node/image/Grok", description="Modify an existing image based on a text prompt", inputs=[ IO.Combo.Input("model", options=["grok-imagine-image-beta"]), IO.Image.Input("image"), IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the image", ), IO.Combo.Input("resolution", options=["1K"]), IO.Int.Input( "number_of_images", default=1, min=1, max=10, step=1, tooltip="Number of edited images to generate", display_mode=IO.NumberDisplay.number, ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to determine if node should re-run; " "actual results are nondeterministic regardless of seed.", ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["number_of_images"]), expr="""{"type":"usd","usd":0.002 + 0.033 * widgets.number_of_images}""", ), ) @classmethod async def execute( cls, model: str, image: Input.Image, prompt: str, resolution: str, number_of_images: int, seed: int, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) if get_number_of_images(image) != 1: raise ValueError("Only one input image is supported.") response = await sync_op( cls, ApiEndpoint(path="/proxy/xai/v1/images/edits", method="POST"), data=ImageEditRequest( model=model, image=InputUrlObject(url=f"data:image/png;base64,{tensor_to_base64_string(image)}"), prompt=prompt, resolution=resolution.lower(), n=number_of_images, seed=seed, ), response_model=ImageGenerationResponse, ) if len(response.data) == 1: return IO.NodeOutput(await download_url_to_image_tensor(response.data[0].url)) return IO.NodeOutput( torch.cat( [await download_url_to_image_tensor(i) for i in [str(d.url) for d in response.data if d.url]], ) ) class GrokVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="GrokVideoNode", display_name="Grok Video", category="api node/video/Grok", description="Generate video from a prompt or an image", inputs=[ IO.Combo.Input("model", options=["grok-imagine-video-beta"]), IO.String.Input( "prompt", multiline=True, tooltip="Text description of the desired video.", ), IO.Combo.Input( "resolution", options=["480p", "720p"], tooltip="The resolution of the output video.", ), IO.Combo.Input( "aspect_ratio", options=["auto", "16:9", "4:3", "3:2", "1:1", "2:3", "3:4", "9:16"], tooltip="The aspect ratio of the output video.", ), IO.Int.Input( "duration", default=6, min=1, max=15, step=1, tooltip="The duration of the output video in seconds.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to determine if node should re-run; " "actual results are nondeterministic regardless of seed.", ), IO.Image.Input("image", optional=True), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["duration"], inputs=["image"]), expr=""" ( $base := 0.181 * widgets.duration; {"type":"usd","usd": inputs.image.connected ? $base + 0.002 : $base} ) """, ), ) @classmethod async def execute( cls, model: str, prompt: str, resolution: str, aspect_ratio: str, duration: int, seed: int, image: Input.Image | None = None, ) -> IO.NodeOutput: image_url = None if image is not None: if get_number_of_images(image) != 1: raise ValueError("Only one input image is supported.") image_url = InputUrlObject(url=f"data:image/png;base64,{tensor_to_base64_string(image)}") validate_string(prompt, strip_whitespace=True, min_length=1) initial_response = await sync_op( cls, ApiEndpoint(path="/proxy/xai/v1/videos/generations", method="POST"), data=VideoGenerationRequest( model=model, image=image_url, prompt=prompt, resolution=resolution, duration=duration, aspect_ratio=None if aspect_ratio == "auto" else aspect_ratio, seed=seed, ), response_model=VideoGenerationResponse, ) response = await poll_op( cls, ApiEndpoint(path=f"/proxy/xai/v1/videos/{initial_response.request_id}"), status_extractor=lambda r: r.status if r.status is not None else "complete", response_model=VideoStatusResponse, ) return IO.NodeOutput(await download_url_to_video_output(response.video.url)) class GrokVideoEditNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="GrokVideoEditNode", display_name="Grok Video Edit", category="api node/video/Grok", description="Edit an existing video based on a text prompt.", inputs=[ IO.Combo.Input("model", options=["grok-imagine-video-beta"]), IO.String.Input( "prompt", multiline=True, tooltip="Text description of the desired video.", ), IO.Video.Input("video", tooltip="Maximum supported duration is 8.7 seconds and 50MB file size."), IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to determine if node should re-run; " "actual results are nondeterministic regardless of seed.", ), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd": 0.191, "format": {"suffix": "/sec", "approximate": true}}""", ), ) @classmethod async def execute( cls, model: str, prompt: str, video: Input.Video, seed: int, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) validate_video_duration(video, min_duration=1, max_duration=8.7) video_stream = video.get_stream_source() video_size = get_fs_object_size(video_stream) if video_size > 50 * 1024 * 1024: raise ValueError(f"Video size ({video_size / 1024 / 1024:.1f}MB) exceeds 50MB limit.") initial_response = await sync_op( cls, ApiEndpoint(path="/proxy/xai/v1/videos/edits", method="POST"), data=VideoEditRequest( model=model, video=InputUrlObject(url=await upload_video_to_comfyapi(cls, video)), prompt=prompt, seed=seed, ), response_model=VideoGenerationResponse, ) response = await poll_op( cls, ApiEndpoint(path=f"/proxy/xai/v1/videos/{initial_response.request_id}"), status_extractor=lambda r: r.status if r.status is not None else "complete", response_model=VideoStatusResponse, ) return IO.NodeOutput(await download_url_to_video_output(response.video.url)) class GrokExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ GrokImageNode, GrokImageEditNode, GrokVideoNode, GrokVideoEditNode, ] async def comfy_entrypoint() -> GrokExtension: return GrokExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_grok.py", "license": "GNU General Public License v3.0", "lines": 396, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/apis/magnific.py
from typing import TypedDict from pydantic import AliasChoices, BaseModel, Field, model_validator class InputPortraitMode(TypedDict): portrait_mode: str portrait_style: str portrait_beautifier: str class InputAdvancedSettings(TypedDict): advanced_settings: str whites: int blacks: int brightness: int contrast: int saturation: int engine: str transfer_light_a: str transfer_light_b: str fixed_generation: bool class InputSkinEnhancerMode(TypedDict): mode: str skin_detail: int optimized_for: str class ImageUpscalerCreativeRequest(BaseModel): image: str = Field(...) scale_factor: str = Field(...) optimized_for: str = Field(...) prompt: str | None = Field(None) creativity: int = Field(...) hdr: int = Field(...) resemblance: int = Field(...) fractality: int = Field(...) engine: str = Field(...) class ImageUpscalerPrecisionV2Request(BaseModel): image: str = Field(...) sharpen: int = Field(...) smart_grain: int = Field(...) ultra_detail: int = Field(...) flavor: str = Field(...) scale_factor: int = Field(...) class ImageRelightAdvancedSettingsRequest(BaseModel): whites: int = Field(...) blacks: int = Field(...) brightness: int = Field(...) contrast: int = Field(...) saturation: int = Field(...) engine: str = Field(...) transfer_light_a: str = Field(...) transfer_light_b: str = Field(...) fixed_generation: bool = Field(...) class ImageRelightRequest(BaseModel): image: str = Field(...) prompt: str | None = Field(None) transfer_light_from_reference_image: str | None = Field(None) light_transfer_strength: int = Field(...) interpolate_from_original: bool = Field(...) change_background: bool = Field(...) style: str = Field(...) preserve_details: bool = Field(...) advanced_settings: ImageRelightAdvancedSettingsRequest | None = Field(...) class ImageStyleTransferRequest(BaseModel): image: str = Field(...) reference_image: str = Field(...) prompt: str | None = Field(None) style_strength: int = Field(...) structure_strength: int = Field(...) is_portrait: bool = Field(...) portrait_style: str | None = Field(...) portrait_beautifier: str | None = Field(...) flavor: str = Field(...) engine: str = Field(...) fixed_generation: bool = Field(...) class ImageSkinEnhancerCreativeRequest(BaseModel): image: str = Field(...) sharpen: int = Field(...) smart_grain: int = Field(...) class ImageSkinEnhancerFaithfulRequest(BaseModel): image: str = Field(...) sharpen: int = Field(...) smart_grain: int = Field(...) skin_detail: int = Field(...) class ImageSkinEnhancerFlexibleRequest(BaseModel): image: str = Field(...) sharpen: int = Field(...) smart_grain: int = Field(...) optimized_for: str = Field(...) class TaskResponse(BaseModel): """Unified response model that handles both wrapped and unwrapped API responses.""" task_id: str = Field(...) status: str = Field(validation_alias=AliasChoices("status", "task_status")) generated: list[str] | None = Field(None) @model_validator(mode="before") @classmethod def unwrap_data(cls, values: dict) -> dict: if "data" in values and isinstance(values["data"], dict): return values["data"] return values
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/magnific.py", "license": "GNU General Public License v3.0", "lines": 95, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_magnific.py
import math from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.magnific import ( ImageRelightAdvancedSettingsRequest, ImageRelightRequest, ImageSkinEnhancerCreativeRequest, ImageSkinEnhancerFaithfulRequest, ImageSkinEnhancerFlexibleRequest, ImageStyleTransferRequest, ImageUpscalerCreativeRequest, ImageUpscalerPrecisionV2Request, InputAdvancedSettings, InputPortraitMode, InputSkinEnhancerMode, TaskResponse, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, downscale_image_tensor, get_image_dimensions, get_number_of_images, poll_op, sync_op, upload_images_to_comfyapi, validate_image_aspect_ratio, validate_image_dimensions, ) _EUR_TO_USD = 1.19 def _tier_price_eur(megapixels: float) -> float: """Price in EUR for a single Magnific upscaling step based on input megapixels.""" if megapixels <= 1.3: return 0.143 if megapixels <= 3.0: return 0.286 if megapixels <= 6.4: return 0.429 return 1.716 def _calculate_magnific_upscale_price_usd(width: int, height: int, scale: int) -> float: """Calculate total Magnific upscale price in USD for given input dimensions and scale factor.""" num_steps = int(math.log2(scale)) total_eur = 0.0 pixels = width * height for _ in range(num_steps): total_eur += _tier_price_eur(pixels / 1_000_000) pixels *= 4 return round(total_eur * _EUR_TO_USD, 2) class MagnificImageUpscalerCreativeNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MagnificImageUpscalerCreativeNode", display_name="Magnific Image Upscale (Creative)", category="api node/image/Magnific", description="Prompt‑guided enhancement, stylization, and 2x/4x/8x/16x upscaling. " "Maximum output: 25.3 megapixels.", inputs=[ IO.Image.Input("image"), IO.String.Input("prompt", multiline=True, default=""), IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]), IO.Combo.Input( "optimized_for", options=[ "standard", "soft_portraits", "hard_portraits", "art_n_illustration", "videogame_assets", "nature_n_landscapes", "films_n_photography", "3d_renders", "science_fiction_n_horror", ], ), IO.Int.Input("creativity", min=-10, max=10, default=0, display_mode=IO.NumberDisplay.slider), IO.Int.Input( "hdr", min=-10, max=10, default=0, tooltip="The level of definition and detail.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "resemblance", min=-10, max=10, default=0, tooltip="The level of resemblance to the original image.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "fractality", min=-10, max=10, default=0, tooltip="The strength of the prompt and intricacy per square pixel.", display_mode=IO.NumberDisplay.slider, ), IO.Combo.Input( "engine", options=["automatic", "magnific_illusio", "magnific_sharpy", "magnific_sparkle"], advanced=True, ), IO.Boolean.Input( "auto_downscale", default=False, tooltip="Automatically downscale input image if output would exceed maximum pixel limit.", advanced=True, ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["scale_factor", "auto_downscale"]), expr=""" ( $ad := widgets.auto_downscale; $mins := $ad ? {"2x": 0.172, "4x": 0.343, "8x": 0.515, "16x": 0.515} : {"2x": 0.172, "4x": 0.343, "8x": 0.515, "16x": 0.844}; $maxs := {"2x": 0.515, "4x": 0.844, "8x": 1.015, "16x": 1.187}; { "type": "range_usd", "min_usd": $lookup($mins, widgets.scale_factor), "max_usd": $lookup($maxs, widgets.scale_factor), "format": { "approximate": true } } ) """, ), ) @classmethod async def execute( cls, image: Input.Image, prompt: str, scale_factor: str, optimized_for: str, creativity: int, hdr: int, resemblance: int, fractality: int, engine: str, auto_downscale: bool, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False) validate_image_dimensions(image, min_height=160, min_width=160) max_output_pixels = 25_300_000 height, width = get_image_dimensions(image) requested_scale = int(scale_factor.rstrip("x")) output_pixels = height * width * requested_scale * requested_scale if output_pixels > max_output_pixels: if auto_downscale: # Find optimal scale factor that doesn't require >2x downscale. # Server upscales in 2x steps, so aggressive downscaling degrades quality. input_pixels = width * height scale = 2 max_input_pixels = max_output_pixels // 4 for candidate in [16, 8, 4, 2]: if candidate > requested_scale: continue scale_output_pixels = input_pixels * candidate * candidate if scale_output_pixels <= max_output_pixels: scale = candidate max_input_pixels = None break downscale_ratio = math.sqrt(scale_output_pixels / max_output_pixels) if downscale_ratio <= 2.0: scale = candidate max_input_pixels = max_output_pixels // (candidate * candidate) break if max_input_pixels is not None: image = downscale_image_tensor(image, total_pixels=max_input_pixels) scale_factor = f"{scale}x" else: raise ValueError( f"Output size ({width * requested_scale}x{height * requested_scale} = {output_pixels:,} pixels) " f"exceeds maximum allowed size of {max_output_pixels:,} pixels. " f"Use a smaller input image or lower scale factor." ) final_height, final_width = get_image_dimensions(image) actual_scale = int(scale_factor.rstrip("x")) price_usd = _calculate_magnific_upscale_price_usd(final_width, final_height, actual_scale) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler", method="POST"), response_model=TaskResponse, data=ImageUpscalerCreativeRequest( image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0], scale_factor=scale_factor, optimized_for=optimized_for, creativity=creativity, hdr=hdr, resemblance=resemblance, fractality=fractality, engine=engine, prompt=prompt if prompt else None, ), ) final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler/{initial_res.task_id}"), response_model=TaskResponse, status_extractor=lambda x: x.status, price_extractor=lambda _: price_usd, poll_interval=10.0, max_poll_attempts=480, ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0])) class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MagnificImageUpscalerPreciseV2Node", display_name="Magnific Image Upscale (Precise V2)", category="api node/image/Magnific", description="High-fidelity upscaling with fine control over sharpness, grain, and detail. " "Maximum output: 10060×10060 pixels.", inputs=[ IO.Image.Input("image"), IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]), IO.Combo.Input( "flavor", options=["sublime", "photo", "photo_denoiser"], tooltip="Processing style: " "sublime for general use, photo for photographs, photo_denoiser for noisy photos.", ), IO.Int.Input( "sharpen", min=0, max=100, default=7, tooltip="Image sharpness intensity. Higher values increase edge definition and clarity.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "smart_grain", min=0, max=100, default=7, tooltip="Intelligent grain/texture enhancement to prevent the image from " "looking too smooth or artificial.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "ultra_detail", min=0, max=100, default=30, tooltip="Controls fine detail, textures, and micro-details added during upscaling.", display_mode=IO.NumberDisplay.slider, ), IO.Boolean.Input( "auto_downscale", default=False, tooltip="Automatically downscale input image if output would exceed maximum resolution.", advanced=True, ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]), expr=""" ( $mins := {"2x": 0.172, "4x": 0.343, "8x": 0.515, "16x": 0.844}; $maxs := {"2x": 2.045, "4x": 2.545, "8x": 2.889, "16x": 3.06}; { "type": "range_usd", "min_usd": $lookup($mins, widgets.scale_factor), "max_usd": $lookup($maxs, widgets.scale_factor), "format": { "approximate": true } } ) """, ), ) @classmethod async def execute( cls, image: Input.Image, scale_factor: str, flavor: str, sharpen: int, smart_grain: int, ultra_detail: int, auto_downscale: bool, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False) validate_image_dimensions(image, min_height=160, min_width=160) max_output_dimension = 10060 height, width = get_image_dimensions(image) requested_scale = int(scale_factor.strip("x")) output_width = width * requested_scale output_height = height * requested_scale if output_width > max_output_dimension or output_height > max_output_dimension: if auto_downscale: # Find optimal scale factor that doesn't require >2x downscale. # Server upscales in 2x steps, so aggressive downscaling degrades quality. max_dim = max(width, height) scale = 2 max_input_dim = max_output_dimension // 2 scale_ratio = max_input_dim / max_dim max_input_pixels = int(width * height * scale_ratio * scale_ratio) for candidate in [16, 8, 4, 2]: if candidate > requested_scale: continue output_dim = max_dim * candidate if output_dim <= max_output_dimension: scale = candidate max_input_pixels = None break downscale_ratio = output_dim / max_output_dimension if downscale_ratio <= 2.0: scale = candidate max_input_dim = max_output_dimension // candidate scale_ratio = max_input_dim / max_dim max_input_pixels = int(width * height * scale_ratio * scale_ratio) break if max_input_pixels is not None: image = downscale_image_tensor(image, total_pixels=max_input_pixels) requested_scale = scale else: raise ValueError( f"Output dimensions ({output_width}x{output_height}) exceed maximum allowed " f"resolution of {max_output_dimension}x{max_output_dimension} pixels. " f"Use a smaller input image or lower scale factor." ) final_height, final_width = get_image_dimensions(image) price_usd = _calculate_magnific_upscale_price_usd(final_width, final_height, requested_scale) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler-precision-v2", method="POST"), response_model=TaskResponse, data=ImageUpscalerPrecisionV2Request( image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0], scale_factor=requested_scale, flavor=flavor, sharpen=sharpen, smart_grain=smart_grain, ultra_detail=ultra_detail, ), ) final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler-precision-v2/{initial_res.task_id}"), response_model=TaskResponse, status_extractor=lambda x: x.status, price_extractor=lambda _: price_usd, poll_interval=10.0, max_poll_attempts=480, ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0])) class MagnificImageStyleTransferNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MagnificImageStyleTransferNode", display_name="Magnific Image Style Transfer", category="api node/image/Magnific", description="Transfer the style from a reference image to your input image.", inputs=[ IO.Image.Input("image", tooltip="The image to apply style transfer to."), IO.Image.Input("reference_image", tooltip="The reference image to extract style from."), IO.String.Input("prompt", multiline=True, default=""), IO.Int.Input( "style_strength", min=0, max=100, default=100, tooltip="Percentage of style strength.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "structure_strength", min=0, max=100, default=50, tooltip="Maintains the structure of the original image.", display_mode=IO.NumberDisplay.slider, ), IO.Combo.Input( "flavor", options=["faithful", "gen_z", "psychedelia", "detaily", "clear", "donotstyle", "donotstyle_sharp"], tooltip="Style transfer flavor.", ), IO.Combo.Input( "engine", options=[ "balanced", "definio", "illusio", "3d_cartoon", "colorful_anime", "caricature", "real", "super_real", "softy", ], tooltip="Processing engine selection.", advanced=True, ), IO.DynamicCombo.Input( "portrait_mode", options=[ IO.DynamicCombo.Option("disabled", []), IO.DynamicCombo.Option( "enabled", [ IO.Combo.Input( "portrait_style", options=["standard", "pop", "super_pop"], tooltip="Visual style applied to portrait images.", ), IO.Combo.Input( "portrait_beautifier", options=["none", "beautify_face", "beautify_face_max"], tooltip="Facial beautification intensity on portraits.", ), ], ), ], tooltip="Enable portrait mode for facial enhancements.", ), IO.Boolean.Input( "fixed_generation", default=True, tooltip="When disabled, expect each generation to introduce a degree of randomness, " "leading to more diverse outcomes.", advanced=True, ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.11}""", ), ) @classmethod async def execute( cls, image: Input.Image, reference_image: Input.Image, prompt: str, style_strength: int, structure_strength: int, flavor: str, engine: str, portrait_mode: InputPortraitMode, fixed_generation: bool, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") if get_number_of_images(reference_image) != 1: raise ValueError("Exactly one reference image is required.") validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False) validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False) validate_image_dimensions(image, min_height=160, min_width=160) validate_image_dimensions(reference_image, min_height=160, min_width=160) is_portrait = portrait_mode["portrait_mode"] == "enabled" portrait_style = portrait_mode.get("portrait_style", "standard") portrait_beautifier = portrait_mode.get("portrait_beautifier", "none") uploaded_urls = await upload_images_to_comfyapi(cls, [image, reference_image], max_images=2) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/freepik/v1/ai/image-style-transfer", method="POST"), response_model=TaskResponse, data=ImageStyleTransferRequest( image=uploaded_urls[0], reference_image=uploaded_urls[1], prompt=prompt if prompt else None, style_strength=style_strength, structure_strength=structure_strength, is_portrait=is_portrait, portrait_style=portrait_style if is_portrait else None, portrait_beautifier=portrait_beautifier if is_portrait and portrait_beautifier != "none" else None, flavor=flavor, engine=engine, fixed_generation=fixed_generation, ), ) final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-style-transfer/{initial_res.task_id}"), response_model=TaskResponse, status_extractor=lambda x: x.status, poll_interval=10.0, max_poll_attempts=480, ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0])) class MagnificImageRelightNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MagnificImageRelightNode", display_name="Magnific Image Relight", category="api node/image/Magnific", description="Relight an image with lighting adjustments and optional reference-based light transfer.", inputs=[ IO.Image.Input("image", tooltip="The image to relight."), IO.String.Input( "prompt", multiline=True, default="", tooltip="Descriptive guidance for lighting. Supports emphasis notation (1-1.4).", ), IO.Int.Input( "light_transfer_strength", min=0, max=100, default=100, tooltip="Intensity of light transfer application.", display_mode=IO.NumberDisplay.slider, ), IO.Combo.Input( "style", options=[ "standard", "darker_but_realistic", "clean", "smooth", "brighter", "contrasted_n_hdr", "just_composition", ], tooltip="Stylistic output preference.", ), IO.Boolean.Input( "interpolate_from_original", default=False, tooltip="Restricts generation freedom to match original more closely.", advanced=True, ), IO.Boolean.Input( "change_background", default=True, tooltip="Modifies background based on prompt/reference.", advanced=True, ), IO.Boolean.Input( "preserve_details", default=True, tooltip="Maintains texture and fine details from original.", advanced=True, ), IO.DynamicCombo.Input( "advanced_settings", options=[ IO.DynamicCombo.Option("disabled", []), IO.DynamicCombo.Option( "enabled", [ IO.Int.Input( "whites", min=0, max=100, default=50, tooltip="Adjusts the brightest tones in the image.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "blacks", min=0, max=100, default=50, tooltip="Adjusts the darkest tones in the image.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "brightness", min=0, max=100, default=50, tooltip="Overall brightness adjustment.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "contrast", min=0, max=100, default=50, tooltip="Contrast adjustment.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "saturation", min=0, max=100, default=50, tooltip="Color saturation adjustment.", display_mode=IO.NumberDisplay.slider, ), IO.Combo.Input( "engine", options=[ "automatic", "balanced", "cool", "real", "illusio", "fairy", "colorful_anime", "hard_transform", "softy", ], tooltip="Processing engine selection.", ), IO.Combo.Input( "transfer_light_a", options=["automatic", "low", "medium", "normal", "high", "high_on_faces"], tooltip="The intensity of light transfer.", ), IO.Combo.Input( "transfer_light_b", options=[ "automatic", "composition", "straight", "smooth_in", "smooth_out", "smooth_both", "reverse_both", "soft_in", "soft_out", "soft_mid", # "strong_mid", # Commented out because requests fail when this is set. "style_shift", "strong_shift", ], tooltip="Also modifies light transfer intensity. " "Can be combined with the previous control for varied effects.", ), IO.Boolean.Input( "fixed_generation", default=True, tooltip="Ensures consistent output with the same settings.", ), ], ), ], tooltip="Fine-tuning options for advanced lighting control.", ), IO.Image.Input( "reference_image", optional=True, tooltip="Optional reference image to transfer lighting from.", ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.11}""", ), ) @classmethod async def execute( cls, image: Input.Image, prompt: str, light_transfer_strength: int, style: str, interpolate_from_original: bool, change_background: bool, preserve_details: bool, advanced_settings: InputAdvancedSettings, reference_image: Input.Image | None = None, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") if reference_image is not None and get_number_of_images(reference_image) != 1: raise ValueError("Exactly one reference image is required.") validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False) validate_image_dimensions(image, min_height=160, min_width=160) if reference_image is not None: validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False) validate_image_dimensions(reference_image, min_height=160, min_width=160) image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0] reference_url = None if reference_image is not None: reference_url = (await upload_images_to_comfyapi(cls, reference_image, max_images=1))[0] adv_settings = None if advanced_settings["advanced_settings"] == "enabled": adv_settings = ImageRelightAdvancedSettingsRequest( whites=advanced_settings["whites"], blacks=advanced_settings["blacks"], brightness=advanced_settings["brightness"], contrast=advanced_settings["contrast"], saturation=advanced_settings["saturation"], engine=advanced_settings["engine"], transfer_light_a=advanced_settings["transfer_light_a"], transfer_light_b=advanced_settings["transfer_light_b"], fixed_generation=advanced_settings["fixed_generation"], ) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/freepik/v1/ai/image-relight", method="POST"), response_model=TaskResponse, data=ImageRelightRequest( image=image_url, prompt=prompt if prompt else None, transfer_light_from_reference_image=reference_url, light_transfer_strength=light_transfer_strength, interpolate_from_original=interpolate_from_original, change_background=change_background, style=style, preserve_details=preserve_details, advanced_settings=adv_settings, ), ) final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-relight/{initial_res.task_id}"), response_model=TaskResponse, status_extractor=lambda x: x.status, poll_interval=10.0, max_poll_attempts=480, ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0])) class MagnificImageSkinEnhancerNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MagnificImageSkinEnhancerNode", display_name="Magnific Image Skin Enhancer", category="api node/image/Magnific", description="Skin enhancement for portraits with multiple processing modes.", inputs=[ IO.Image.Input("image", tooltip="The portrait image to enhance."), IO.Int.Input( "sharpen", min=0, max=100, default=0, tooltip="Sharpening intensity level.", display_mode=IO.NumberDisplay.slider, ), IO.Int.Input( "smart_grain", min=0, max=100, default=2, tooltip="Smart grain intensity level.", display_mode=IO.NumberDisplay.slider, ), IO.DynamicCombo.Input( "mode", options=[ IO.DynamicCombo.Option("creative", []), IO.DynamicCombo.Option( "faithful", [ IO.Int.Input( "skin_detail", min=0, max=100, default=80, tooltip="Skin detail enhancement level.", display_mode=IO.NumberDisplay.slider, ), ], ), IO.DynamicCombo.Option( "flexible", [ IO.Combo.Input( "optimized_for", options=[ "enhance_skin", "improve_lighting", "enhance_everything", "transform_to_real", "no_make_up", ], tooltip="Enhancement optimization target.", ), ], ), ], tooltip="Processing mode: creative for artistic enhancement, " "faithful for preserving original appearance, " "flexible for targeted optimization.", ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["mode"]), expr=""" ( $rates := {"creative": 0.29, "faithful": 0.37, "flexible": 0.45}; {"type":"usd","usd": $lookup($rates, widgets.mode)} ) """, ), ) @classmethod async def execute( cls, image: Input.Image, sharpen: int, smart_grain: int, mode: InputSkinEnhancerMode, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False) validate_image_dimensions(image, min_height=160, min_width=160) image_url = (await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=4096 * 4096))[0] selected_mode = mode["mode"] if selected_mode == "creative": endpoint = "creative" data = ImageSkinEnhancerCreativeRequest( image=image_url, sharpen=sharpen, smart_grain=smart_grain, ) elif selected_mode == "faithful": endpoint = "faithful" data = ImageSkinEnhancerFaithfulRequest( image=image_url, sharpen=sharpen, smart_grain=smart_grain, skin_detail=mode["skin_detail"], ) else: # flexible endpoint = "flexible" data = ImageSkinEnhancerFlexibleRequest( image=image_url, sharpen=sharpen, smart_grain=smart_grain, optimized_for=mode["optimized_for"], ) initial_res = await sync_op( cls, ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{endpoint}", method="POST"), response_model=TaskResponse, data=data, ) final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{initial_res.task_id}"), response_model=TaskResponse, status_extractor=lambda x: x.status, poll_interval=10.0, max_poll_attempts=480, ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0])) class MagnificExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ MagnificImageUpscalerCreativeNode, MagnificImageUpscalerPreciseV2Node, MagnificImageStyleTransferNode, MagnificImageRelightNode, MagnificImageSkinEnhancerNode, ] async def comfy_entrypoint() -> MagnificExtension: return MagnificExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_magnific.py", "license": "GNU General Public License v3.0", "lines": 900, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_lora_debug.py
import folder_paths import comfy.utils import comfy.sd class LoraLoaderBypass: """ Apply LoRA in bypass mode without modifying base model weights. Bypass mode computes: output = base_forward(x) + lora_path(x) This is useful for training and when model weights are offloaded. """ def __init__(self): self.loaded_lora = None @classmethod def INPUT_TYPES(s): return { "required": { "model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}), "clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}), "lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}), "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}), "strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}), } } RETURN_TYPES = ("MODEL", "CLIP") OUTPUT_TOOLTIPS = ("The modified diffusion model.", "The modified CLIP model.") FUNCTION = "load_lora" CATEGORY = "loaders" DESCRIPTION = "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios." EXPERIMENTAL = True def load_lora(self, model, clip, lora_name, strength_model, strength_clip): if strength_model == 0 and strength_clip == 0: return (model, clip) lora_path = folder_paths.get_full_path_or_raise("loras", lora_name) lora = None if self.loaded_lora is not None: if self.loaded_lora[0] == lora_path: lora = self.loaded_lora[1] else: self.loaded_lora = None if lora is None: lora = comfy.utils.load_torch_file(lora_path, safe_load=True) self.loaded_lora = (lora_path, lora) model_lora, clip_lora = comfy.sd.load_bypass_lora_for_models(model, clip, lora, strength_model, strength_clip) return (model_lora, clip_lora) class LoraLoaderBypassModelOnly(LoraLoaderBypass): @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), "lora_name": (folder_paths.get_filename_list("loras"), ), "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), }} RETURN_TYPES = ("MODEL",) FUNCTION = "load_lora_model_only" def load_lora_model_only(self, model, lora_name, strength_model): return (self.load_lora(model, None, lora_name, strength_model, 0)[0],) NODE_CLASS_MAPPINGS = { "LoraLoaderBypass": LoraLoaderBypass, "LoraLoaderBypassModelOnly": LoraLoaderBypassModelOnly, } NODE_DISPLAY_NAME_MAPPINGS = { "LoraLoaderBypass": "Load LoRA (Bypass) (For debugging)", "LoraLoaderBypassModelOnly": "Load LoRA (Bypass, Model Only) (for debugging)", }
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_lora_debug.py", "license": "GNU General Public License v3.0", "lines": 62, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/weight_adapter/bypass.py
""" Bypass mode implementation for weight adapters (LoRA, LoKr, LoHa, etc.) Bypass mode applies adapters during forward pass without modifying base weights: bypass(f)(x) = g(f(x) + h(x)) Where: - f(x): Original layer forward - h(x): Additive component from adapter (LoRA path) - g(y): Output transformation (identity for most adapters) This is useful for: - Training with gradient checkpointing - Avoiding weight modifications when weights are offloaded - Supporting multiple adapters with different strengths dynamically """ import logging from typing import Optional, Union import torch import torch.nn as nn import comfy.model_management from .base import WeightAdapterBase, WeightAdapterTrainBase from comfy.patcher_extension import PatcherInjection # Type alias for adapters that support bypass mode BypassAdapter = Union[WeightAdapterBase, WeightAdapterTrainBase] def get_module_type_info(module: nn.Module) -> dict: """ Determine module type and extract conv parameters from module class. This is more reliable than checking weight.ndim, especially for quantized layers where weight shape might be different. Returns: dict with keys: is_conv, conv_dim, stride, padding, dilation, groups """ info = { "is_conv": False, "conv_dim": 0, "stride": (1,), "padding": (0,), "dilation": (1,), "groups": 1, "kernel_size": (1,), "in_channels": None, "out_channels": None, } # Determine conv type if isinstance(module, nn.Conv1d): info["is_conv"] = True info["conv_dim"] = 1 elif isinstance(module, nn.Conv2d): info["is_conv"] = True info["conv_dim"] = 2 elif isinstance(module, nn.Conv3d): info["is_conv"] = True info["conv_dim"] = 3 elif isinstance(module, nn.Linear): info["is_conv"] = False info["conv_dim"] = 0 else: # Try to infer from class name for custom/quantized layers class_name = type(module).__name__.lower() if "conv3d" in class_name: info["is_conv"] = True info["conv_dim"] = 3 elif "conv2d" in class_name: info["is_conv"] = True info["conv_dim"] = 2 elif "conv1d" in class_name: info["is_conv"] = True info["conv_dim"] = 1 elif "conv" in class_name: info["is_conv"] = True info["conv_dim"] = 2 # Extract conv parameters if it's a conv layer if info["is_conv"]: # Try to get stride, padding, dilation, groups, kernel_size from module info["stride"] = getattr(module, "stride", (1,) * info["conv_dim"]) info["padding"] = getattr(module, "padding", (0,) * info["conv_dim"]) info["dilation"] = getattr(module, "dilation", (1,) * info["conv_dim"]) info["groups"] = getattr(module, "groups", 1) info["kernel_size"] = getattr(module, "kernel_size", (1,) * info["conv_dim"]) info["in_channels"] = getattr(module, "in_channels", None) info["out_channels"] = getattr(module, "out_channels", None) # Ensure they're tuples if isinstance(info["stride"], int): info["stride"] = (info["stride"],) * info["conv_dim"] if isinstance(info["padding"], int): info["padding"] = (info["padding"],) * info["conv_dim"] if isinstance(info["dilation"], int): info["dilation"] = (info["dilation"],) * info["conv_dim"] if isinstance(info["kernel_size"], int): info["kernel_size"] = (info["kernel_size"],) * info["conv_dim"] return info class BypassForwardHook: """ Hook that wraps a layer's forward to apply adapter in bypass mode. Stores the original forward and replaces it with bypass version. Supports both: - WeightAdapterBase: Inference adapters (uses self.weights tuple) - WeightAdapterTrainBase: Training adapters (nn.Module with parameters) """ def __init__( self, module: nn.Module, adapter: BypassAdapter, multiplier: float = 1.0, ): self.module = module self.adapter = adapter self.multiplier = multiplier self.original_forward = None # Determine layer type and conv params from module class (works for quantized layers) module_info = get_module_type_info(module) # Set multiplier and layer type info on adapter for use in h() adapter.multiplier = multiplier adapter.is_conv = module_info["is_conv"] adapter.conv_dim = module_info["conv_dim"] adapter.kernel_size = module_info["kernel_size"] adapter.in_channels = module_info["in_channels"] adapter.out_channels = module_info["out_channels"] # Store kw_dict for conv operations (like LyCORIS extra_args) if module_info["is_conv"]: adapter.kw_dict = { "stride": module_info["stride"], "padding": module_info["padding"], "dilation": module_info["dilation"], "groups": module_info["groups"], } else: adapter.kw_dict = {} def _bypass_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: """Bypass forward: uses adapter's bypass_forward or default g(f(x) + h(x)) Note: Bypass mode does NOT access original model weights (org_weight). This is intentional - bypass mode is designed for quantized models where weights may not be in a usable format. All necessary shape information is provided via adapter attributes set during inject(). """ # Check if adapter has custom bypass_forward (e.g., GLoRA) adapter_bypass = getattr(self.adapter, "bypass_forward", None) if adapter_bypass is not None: # Check if it's overridden (not the base class default) # Need to check both base classes since adapter could be either type adapter_type = type(self.adapter) is_default_bypass = ( adapter_type.bypass_forward is WeightAdapterBase.bypass_forward or adapter_type.bypass_forward is WeightAdapterTrainBase.bypass_forward ) if not is_default_bypass: return adapter_bypass(self.original_forward, x, *args, **kwargs) # Default bypass: g(f(x) + h(x, f(x))) base_out = self.original_forward(x, *args, **kwargs) h_out = self.adapter.h(x, base_out) return self.adapter.g(base_out + h_out) def inject(self): """Replace module forward with bypass version.""" if self.original_forward is not None: logging.debug( f"[BypassHook] Already injected for {type(self.module).__name__}" ) return # Already injected # Move adapter weights to compute device (GPU) # Use get_torch_device() instead of module.weight.device because # with offloading, module weights may be on CPU while compute happens on GPU device = comfy.model_management.get_torch_device() # Get dtype from module weight if available dtype = None if hasattr(self.module, "weight") and self.module.weight is not None: dtype = self.module.weight.dtype # Only use dtype if it's a standard float type, not quantized if dtype is not None and dtype not in (torch.float32, torch.float16, torch.bfloat16): dtype = None self._move_adapter_weights_to_device(device, dtype) self.original_forward = self.module.forward self.module.forward = self._bypass_forward logging.debug( f"[BypassHook] Injected bypass forward for {type(self.module).__name__} (adapter={type(self.adapter).__name__})" ) def _move_adapter_weights_to_device(self, device, dtype=None): """Move adapter weights to specified device to avoid per-forward transfers. Handles both: - WeightAdapterBase: has self.weights tuple of tensors - WeightAdapterTrainBase: nn.Module with parameters, uses .to() method """ adapter = self.adapter # Check if adapter is an nn.Module (WeightAdapterTrainBase) if isinstance(adapter, nn.Module): # In training mode we don't touch dtype as trainer will handle it adapter.to(device=device) logging.debug( f"[BypassHook] Moved training adapter (nn.Module) to {device}" ) return # WeightAdapterBase: handle self.weights tuple if not hasattr(adapter, "weights") or adapter.weights is None: return weights = adapter.weights if isinstance(weights, (list, tuple)): new_weights = [] for w in weights: if isinstance(w, torch.Tensor): if dtype is not None: new_weights.append(w.to(device=device, dtype=dtype)) else: new_weights.append(w.to(device=device)) else: new_weights.append(w) adapter.weights = ( tuple(new_weights) if isinstance(weights, tuple) else new_weights ) elif isinstance(weights, torch.Tensor): if dtype is not None: adapter.weights = weights.to(device=device, dtype=dtype) else: adapter.weights = weights.to(device=device) logging.debug(f"[BypassHook] Moved adapter weights to {device}") def eject(self): """Restore original module forward.""" if self.original_forward is None: logging.debug(f"[BypassHook] Not injected for {type(self.module).__name__}") return # Not injected self.module.forward = self.original_forward self.original_forward = None logging.debug( f"[BypassHook] Ejected bypass forward for {type(self.module).__name__}" ) class BypassInjectionManager: """ Manages bypass mode injection for a collection of adapters. Creates PatcherInjection objects that can be used with ModelPatcher. Supports both inference adapters (WeightAdapterBase) and training adapters (WeightAdapterTrainBase). Usage: manager = BypassInjectionManager() manager.add_adapter("model.layers.0.self_attn.q_proj", lora_adapter, strength=0.8) manager.add_adapter("model.layers.0.self_attn.k_proj", lora_adapter, strength=0.8) injections = manager.create_injections(model) model_patcher.set_injections("bypass_lora", injections) """ def __init__(self): self.adapters: dict[str, tuple[BypassAdapter, float]] = {} self.hooks: list[BypassForwardHook] = [] def add_adapter( self, key: str, adapter: BypassAdapter, strength: float = 1.0, ): """ Add an adapter for a specific weight key. Args: key: Weight key (e.g., "model.layers.0.self_attn.q_proj.weight") adapter: The weight adapter (LoRAAdapter, LoKrAdapter, etc.) strength: Multiplier for adapter effect """ # Remove .weight suffix if present for module lookup module_key = key if module_key.endswith(".weight"): module_key = module_key[:-7] logging.debug( f"[BypassManager] Stripped .weight suffix: {key} -> {module_key}" ) self.adapters[module_key] = (adapter, strength) logging.debug( f"[BypassManager] Added adapter: {module_key} (type={type(adapter).__name__}, strength={strength})" ) def clear_adapters(self): """Remove all adapters.""" self.adapters.clear() def _get_module_by_key(self, model: nn.Module, key: str) -> Optional[nn.Module]: """Get a submodule by dot-separated key.""" parts = key.split(".") module = model try: for i, part in enumerate(parts): if part.isdigit(): module = module[int(part)] else: module = getattr(module, part) logging.debug( f"[BypassManager] Found module for key {key}: {type(module).__name__}" ) return module except (AttributeError, IndexError, KeyError) as e: logging.error(f"[BypassManager] Failed to find module for key {key}: {e}") logging.error( f"[BypassManager] Failed at part index {i}, part={part}, current module type={type(module).__name__}" ) return None def create_injections(self, model: nn.Module) -> list[PatcherInjection]: """ Create PatcherInjection objects for all registered adapters. Args: model: The model to inject into (e.g., model_patcher.model) Returns: List of PatcherInjection objects to use with model_patcher.set_injections() """ self.hooks.clear() logging.debug( f"[BypassManager] create_injections called with {len(self.adapters)} adapters" ) logging.debug(f"[BypassManager] Model type: {type(model).__name__}") for key, (adapter, strength) in self.adapters.items(): logging.debug(f"[BypassManager] Looking for module: {key}") module = self._get_module_by_key(model, key) if module is None: logging.warning(f"[BypassManager] Module not found for key {key}") continue if not hasattr(module, "weight"): logging.warning( f"[BypassManager] Module {key} has no weight attribute (type={type(module).__name__})" ) continue logging.debug( f"[BypassManager] Creating hook for {key} (module type={type(module).__name__}, weight shape={module.weight.shape})" ) hook = BypassForwardHook(module, adapter, multiplier=strength) self.hooks.append(hook) logging.debug(f"[BypassManager] Created {len(self.hooks)} hooks") # Create single injection that manages all hooks def inject_all(model_patcher): logging.debug( f"[BypassManager] inject_all called, injecting {len(self.hooks)} hooks" ) for hook in self.hooks: hook.inject() logging.debug( f"[BypassManager] Injected hook for {type(hook.module).__name__}" ) def eject_all(model_patcher): logging.debug( f"[BypassManager] eject_all called, ejecting {len(self.hooks)} hooks" ) for hook in self.hooks: hook.eject() return [PatcherInjection(inject=inject_all, eject=eject_all)] def get_hook_count(self) -> int: """Return number of hooks that will be/are injected.""" return len(self.hooks) def create_bypass_injections_from_patches( model: nn.Module, patches: dict, strength: float = 1.0, ) -> list[PatcherInjection]: """ Convenience function to create bypass injections from a patches dict. This is useful when you have patches in the format used by model_patcher.add_patches() and want to apply them in bypass mode instead. Args: model: The model to inject into patches: Dict mapping weight keys to adapter data strength: Global strength multiplier Returns: List of PatcherInjection objects """ manager = BypassInjectionManager() for key, patch_list in patches.items(): if not patch_list: continue # patches format: list of (strength_patch, patch_data, strength_model, offset, function) for patch in patch_list: patch_strength, patch_data, strength_model, offset, function = patch # patch_data should be a WeightAdapterBase/WeightAdapterTrainBase or tuple if isinstance(patch_data, (WeightAdapterBase, WeightAdapterTrainBase)): adapter = patch_data else: # Skip non-adapter patches continue combined_strength = strength * patch_strength manager.add_adapter(key, adapter, strength=combined_strength) return manager.create_injections(model)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/weight_adapter/bypass.py", "license": "GNU General Public License v3.0", "lines": 367, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/apis/hunyuan3d.py
from typing import TypedDict from pydantic import BaseModel, Field, model_validator class InputGenerateType(TypedDict): generate_type: str polygon_type: str pbr: bool class Hunyuan3DViewImage(BaseModel): ViewType: str = Field(..., description="Valid values: back, left, right.") ViewImageUrl: str = Field(...) class To3DProTaskRequest(BaseModel): Model: str = Field(...) Prompt: str | None = Field(None) ImageUrl: str | None = Field(None) MultiViewImages: list[Hunyuan3DViewImage] | None = Field(None) EnablePBR: bool | None = Field(...) FaceCount: int | None = Field(...) GenerateType: str | None = Field(...) PolygonType: str | None = Field(...) class RequestError(BaseModel): Code: str = Field("") Message: str = Field("") class To3DProTaskCreateResponse(BaseModel): JobId: str | None = Field(None) Error: RequestError | None = Field(None) @model_validator(mode="before") @classmethod def unwrap_data(cls, values: dict) -> dict: if "Response" in values and isinstance(values["Response"], dict): return values["Response"] return values class ResultFile3D(BaseModel): Type: str = Field(...) Url: str = Field(...) PreviewImageUrl: str = Field("") class To3DProTaskResultResponse(BaseModel): ErrorCode: str = Field("") ErrorMessage: str = Field("") ResultFile3Ds: list[ResultFile3D] = Field([]) Status: str = Field(...) @model_validator(mode="before") @classmethod def unwrap_data(cls, values: dict) -> dict: if "Response" in values and isinstance(values["Response"], dict): return values["Response"] return values class To3DProTaskQueryRequest(BaseModel): JobId: str = Field(...) class To3DUVFileInput(BaseModel): Type: str = Field(..., description="File type: GLB, OBJ, or FBX") Url: str = Field(...) class To3DUVTaskRequest(BaseModel): File: To3DUVFileInput = Field(...) class TextureEditImageInfo(BaseModel): Url: str = Field(...) class TextureEditTaskRequest(BaseModel): File3D: To3DUVFileInput = Field(...) Image: TextureEditImageInfo | None = Field(None) Prompt: str | None = Field(None) EnablePBR: bool | None = Field(None)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/hunyuan3d.py", "license": "GNU General Public License v3.0", "lines": 59, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_hunyuan3d.py
from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input, Types from comfy_api_nodes.apis.hunyuan3d import ( Hunyuan3DViewImage, InputGenerateType, ResultFile3D, TextureEditTaskRequest, To3DProTaskCreateResponse, To3DProTaskQueryRequest, To3DProTaskRequest, To3DProTaskResultResponse, To3DUVFileInput, To3DUVTaskRequest, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_file_3d, download_url_to_image_tensor, downscale_image_tensor_by_max_side, poll_op, sync_op, upload_3d_model_to_comfyapi, upload_image_to_comfyapi, validate_image_dimensions, validate_string, ) def _is_tencent_rate_limited(status: int, body: object) -> bool: return ( status == 400 and isinstance(body, dict) and "RequestLimitExceeded" in str(body.get("Response", {}).get("Error", {}).get("Code", "")) ) def get_file_from_response( response_objs: list[ResultFile3D], file_type: str, raise_if_not_found: bool = True ) -> ResultFile3D | None: for i in response_objs: if i.Type.lower() == file_type.lower(): return i if raise_if_not_found: raise ValueError(f"'{file_type}' file type is not found in the response.") return None class TencentTextToModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="TencentTextToModelNode", display_name="Hunyuan3D: Text to Model", category="api node/3d/Tencent", essentials_category="3D", inputs=[ IO.Combo.Input( "model", options=["3.0", "3.1"], tooltip="The LowPoly option is unavailable for the `3.1` model.", ), IO.String.Input("prompt", multiline=True, default="", tooltip="Supports up to 1024 characters."), IO.Int.Input("face_count", default=500000, min=40000, max=1500000), IO.DynamicCombo.Input( "generate_type", options=[ IO.DynamicCombo.Option("Normal", [IO.Boolean.Input("pbr", default=False)]), IO.DynamicCombo.Option( "LowPoly", [ IO.Combo.Input("polygon_type", options=["triangle", "quadrilateral"]), IO.Boolean.Input("pbr", default=False), ], ), IO.DynamicCombo.Option("Geometry", []), ], ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.File3DGLB.Output(display_name="GLB"), IO.File3DOBJ.Output(display_name="OBJ"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["generate_type", "generate_type.pbr", "face_count"]), expr=""" ( $base := widgets.generate_type = "normal" ? 25 : widgets.generate_type = "lowpoly" ? 30 : 15; $pbr := $lookup(widgets, "generate_type.pbr") ? 10 : 0; $face := widgets.face_count != 500000 ? 10 : 0; {"type":"usd","usd": ($base + $pbr + $face) * 0.02} ) """, ), ) @classmethod async def execute( cls, model: str, prompt: str, face_count: int, generate_type: InputGenerateType, seed: int, ) -> IO.NodeOutput: _ = seed validate_string(prompt, field_name="prompt", min_length=1, max_length=1024) if model == "3.1" and generate_type["generate_type"].lower() == "lowpoly": raise ValueError("The LowPoly option is currently unavailable for the 3.1 model.") response = await sync_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro", method="POST"), response_model=To3DProTaskCreateResponse, data=To3DProTaskRequest( Model=model, Prompt=prompt, FaceCount=face_count, GenerateType=generate_type["generate_type"], EnablePBR=generate_type.get("pbr", None), PolygonType=generate_type.get("polygon_type", None), ), is_rate_limited=_is_tencent_rate_limited, ) if response.Error: raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}") task_id = response.JobId result = await poll_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro/query", method="POST"), data=To3DProTaskQueryRequest(JobId=task_id), response_model=To3DProTaskResultResponse, status_extractor=lambda r: r.Status, ) return IO.NodeOutput( f"{task_id}.glb", await download_url_to_file_3d( get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id ), await download_url_to_file_3d( get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj", task_id=task_id ), ) class TencentImageToModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="TencentImageToModelNode", display_name="Hunyuan3D: Image(s) to Model", category="api node/3d/Tencent", essentials_category="3D", inputs=[ IO.Combo.Input( "model", options=["3.0", "3.1"], tooltip="The LowPoly option is unavailable for the `3.1` model.", ), IO.Image.Input("image"), IO.Image.Input("image_left", optional=True), IO.Image.Input("image_right", optional=True), IO.Image.Input("image_back", optional=True), IO.Int.Input("face_count", default=500000, min=40000, max=1500000), IO.DynamicCombo.Input( "generate_type", options=[ IO.DynamicCombo.Option("Normal", [IO.Boolean.Input("pbr", default=False)]), IO.DynamicCombo.Option( "LowPoly", [ IO.Combo.Input("polygon_type", options=["triangle", "quadrilateral"]), IO.Boolean.Input("pbr", default=False), ], ), IO.DynamicCombo.Option("Geometry", []), ], ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.File3DGLB.Output(display_name="GLB"), IO.File3DOBJ.Output(display_name="OBJ"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends( widgets=["generate_type", "generate_type.pbr", "face_count"], inputs=["image_left", "image_right", "image_back"], ), expr=""" ( $base := widgets.generate_type = "normal" ? 25 : widgets.generate_type = "lowpoly" ? 30 : 15; $multiview := ( inputs.image_left.connected or inputs.image_right.connected or inputs.image_back.connected ) ? 10 : 0; $pbr := $lookup(widgets, "generate_type.pbr") ? 10 : 0; $face := widgets.face_count != 500000 ? 10 : 0; {"type":"usd","usd": ($base + $multiview + $pbr + $face) * 0.02} ) """, ), ) @classmethod async def execute( cls, model: str, image: Input.Image, face_count: int, generate_type: InputGenerateType, seed: int, image_left: Input.Image | None = None, image_right: Input.Image | None = None, image_back: Input.Image | None = None, ) -> IO.NodeOutput: _ = seed if model == "3.1" and generate_type["generate_type"].lower() == "lowpoly": raise ValueError("The LowPoly option is currently unavailable for the 3.1 model.") validate_image_dimensions(image, min_width=128, min_height=128) multiview_images = [] for k, v in { "left": image_left, "right": image_right, "back": image_back, }.items(): if v is None: continue validate_image_dimensions(v, min_width=128, min_height=128) multiview_images.append( Hunyuan3DViewImage( ViewType=k, ViewImageUrl=await upload_image_to_comfyapi( cls, downscale_image_tensor_by_max_side(v, max_side=4900), mime_type="image/webp", total_pixels=24_010_000, ), ) ) response = await sync_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro", method="POST"), response_model=To3DProTaskCreateResponse, data=To3DProTaskRequest( Model=model, FaceCount=face_count, GenerateType=generate_type["generate_type"], ImageUrl=await upload_image_to_comfyapi( cls, downscale_image_tensor_by_max_side(image, max_side=4900), mime_type="image/webp", total_pixels=24_010_000, ), MultiViewImages=multiview_images if multiview_images else None, EnablePBR=generate_type.get("pbr", None), PolygonType=generate_type.get("polygon_type", None), ), is_rate_limited=_is_tencent_rate_limited, ) if response.Error: raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}") task_id = response.JobId result = await poll_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro/query", method="POST"), data=To3DProTaskQueryRequest(JobId=task_id), response_model=To3DProTaskResultResponse, status_extractor=lambda r: r.Status, ) return IO.NodeOutput( f"{task_id}.glb", await download_url_to_file_3d( get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id ), await download_url_to_file_3d( get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj", task_id=task_id ), ) class TencentModelTo3DUVNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="TencentModelTo3DUVNode", display_name="Hunyuan3D: Model to UV", category="api node/3d/Tencent", description="Perform UV unfolding on a 3D model to generate UV texture. " "Input model must have less than 30000 faces.", inputs=[ IO.MultiType.Input( "model_3d", types=[IO.File3DGLB, IO.File3DOBJ, IO.File3DFBX, IO.File3DAny], tooltip="Input 3D model (GLB, OBJ, or FBX)", ), IO.Int.Input( "seed", default=1, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.File3DOBJ.Output(display_name="OBJ"), IO.File3DFBX.Output(display_name="FBX"), IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge(expr='{"type":"usd","usd":0.2}'), ) SUPPORTED_FORMATS = {"glb", "obj", "fbx"} @classmethod async def execute( cls, model_3d: Types.File3D, seed: int, ) -> IO.NodeOutput: _ = seed file_format = model_3d.format.lower() if file_format not in cls.SUPPORTED_FORMATS: raise ValueError( f"Unsupported file format: '{file_format}'. " f"Supported formats: {', '.join(sorted(cls.SUPPORTED_FORMATS))}." ) response = await sync_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-uv", method="POST"), response_model=To3DProTaskCreateResponse, data=To3DUVTaskRequest( File=To3DUVFileInput( Type=file_format.upper(), Url=await upload_3d_model_to_comfyapi(cls, model_3d, file_format), ) ), is_rate_limited=_is_tencent_rate_limited, ) if response.Error: raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}") result = await poll_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-uv/query", method="POST"), data=To3DProTaskQueryRequest(JobId=response.JobId), response_model=To3DProTaskResultResponse, status_extractor=lambda r: r.Status, ) return IO.NodeOutput( await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "obj").Url, "obj"), await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"), await download_url_to_image_tensor(get_file_from_response(result.ResultFile3Ds, "image").Url), ) class Tencent3DTextureEditNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="Tencent3DTextureEditNode", display_name="Hunyuan3D: 3D Texture Edit", category="api node/3d/Tencent", description="After inputting the 3D model, perform 3D model texture redrawing.", inputs=[ IO.MultiType.Input( "model_3d", types=[IO.File3DFBX, IO.File3DAny], tooltip="3D model in FBX format. Model should have less than 100000 faces.", ), IO.String.Input( "prompt", multiline=True, default="", tooltip="Describes texture editing. Supports up to 1024 UTF-8 characters.", ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd": 0.6}""", ), ) @classmethod async def execute( cls, model_3d: Types.File3D, prompt: str, seed: int, ) -> IO.NodeOutput: _ = seed file_format = model_3d.format.lower() if file_format != "fbx": raise ValueError(f"Unsupported file format: '{file_format}'. Only FBX format is supported.") validate_string(prompt, field_name="prompt", min_length=1, max_length=1024) model_url = await upload_3d_model_to_comfyapi(cls, model_3d, file_format) response = await sync_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-texture-edit", method="POST"), response_model=To3DProTaskCreateResponse, data=TextureEditTaskRequest( File3D=To3DUVFileInput(Type=file_format.upper(), Url=model_url), Prompt=prompt, EnablePBR=True, ), is_rate_limited=_is_tencent_rate_limited, ) if response.Error: raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}") result = await poll_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-texture-edit/query", method="POST"), data=To3DProTaskQueryRequest(JobId=response.JobId), response_model=To3DProTaskResultResponse, status_extractor=lambda r: r.Status, ) return IO.NodeOutput( await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb"), await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"), ) class Tencent3DPartNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="Tencent3DPartNode", display_name="Hunyuan3D: 3D Part", category="api node/3d/Tencent", description="Automatically perform component identification and generation based on the model structure.", inputs=[ IO.MultiType.Input( "model_3d", types=[IO.File3DFBX, IO.File3DAny], tooltip="3D model in FBX format. Model should have less than 30000 faces.", ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge(expr='{"type":"usd","usd":0.6}'), ) @classmethod async def execute( cls, model_3d: Types.File3D, seed: int, ) -> IO.NodeOutput: _ = seed file_format = model_3d.format.lower() if file_format != "fbx": raise ValueError(f"Unsupported file format: '{file_format}'. Only FBX format is supported.") model_url = await upload_3d_model_to_comfyapi(cls, model_3d, file_format) response = await sync_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-part", method="POST"), response_model=To3DProTaskCreateResponse, data=To3DUVTaskRequest( File=To3DUVFileInput(Type=file_format.upper(), Url=model_url), ), is_rate_limited=_is_tencent_rate_limited, ) if response.Error: raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}") result = await poll_op( cls, ApiEndpoint(path="/proxy/tencent/hunyuan/3d-part/query", method="POST"), data=To3DProTaskQueryRequest(JobId=response.JobId), response_model=To3DProTaskResultResponse, status_extractor=lambda r: r.Status, ) return IO.NodeOutput( await download_url_to_file_3d(get_file_from_response(result.ResultFile3Ds, "fbx").Url, "fbx"), ) class TencentHunyuan3DExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ TencentTextToModelNode, TencentImageToModelNode, # TencentModelTo3DUVNode, # Tencent3DTextureEditNode, Tencent3DPartNode, ] async def comfy_entrypoint() -> TencentHunyuan3DExtension: return TencentHunyuan3DExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_hunyuan3d.py", "license": "GNU General Public License v3.0", "lines": 542, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/wan/model_multitalk.py
import torch from einops import rearrange, repeat import comfy from comfy.ldm.modules.attention import optimized_attention def calculate_x_ref_attn_map(visual_q, ref_k, ref_target_masks, split_num=8): scale = 1.0 / visual_q.shape[-1] ** 0.5 visual_q = visual_q.transpose(1, 2) * scale B, H, x_seqlens, K = visual_q.shape x_ref_attn_maps = [] for class_idx, ref_target_mask in enumerate(ref_target_masks): ref_target_mask = ref_target_mask.view(1, 1, 1, -1) x_ref_attnmap = torch.zeros(B, H, x_seqlens, device=visual_q.device, dtype=visual_q.dtype) chunk_size = min(max(x_seqlens // split_num, 1), x_seqlens) for i in range(0, x_seqlens, chunk_size): end_i = min(i + chunk_size, x_seqlens) attn_chunk = visual_q[:, :, i:end_i] @ ref_k.permute(0, 2, 3, 1) # B, H, chunk, ref_seqlens # Apply softmax attn_max = attn_chunk.max(dim=-1, keepdim=True).values attn_chunk = (attn_chunk - attn_max).exp() attn_sum = attn_chunk.sum(dim=-1, keepdim=True) attn_chunk = attn_chunk / (attn_sum + 1e-8) # Apply mask and sum masked_attn = attn_chunk * ref_target_mask x_ref_attnmap[:, :, i:end_i] = masked_attn.sum(-1) / (ref_target_mask.sum() + 1e-8) del attn_chunk, masked_attn # Average across heads x_ref_attnmap = x_ref_attnmap.mean(dim=1) # B, x_seqlens x_ref_attn_maps.append(x_ref_attnmap) del visual_q, ref_k return torch.cat(x_ref_attn_maps, dim=0) def get_attn_map_with_target(visual_q, ref_k, shape, ref_target_masks=None, split_num=2): """Args: query (torch.tensor): B M H K key (torch.tensor): B M H K shape (tuple): (N_t, N_h, N_w) ref_target_masks: [B, N_h * N_w] """ N_t, N_h, N_w = shape x_seqlens = N_h * N_w ref_k = ref_k[:, :x_seqlens] _, seq_lens, heads, _ = visual_q.shape class_num, _ = ref_target_masks.shape x_ref_attn_maps = torch.zeros(class_num, seq_lens).to(visual_q) split_chunk = heads // split_num for i in range(split_num): x_ref_attn_maps_perhead = calculate_x_ref_attn_map( visual_q[:, :, i*split_chunk:(i+1)*split_chunk, :], ref_k[:, :, i*split_chunk:(i+1)*split_chunk, :], ref_target_masks ) x_ref_attn_maps += x_ref_attn_maps_perhead return x_ref_attn_maps / split_num def normalize_and_scale(column, source_range, target_range, epsilon=1e-8): source_min, source_max = source_range new_min, new_max = target_range normalized = (column - source_min) / (source_max - source_min + epsilon) scaled = normalized * (new_max - new_min) + new_min return scaled def rotate_half(x): x = rearrange(x, "... (d r) -> ... d r", r=2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return rearrange(x, "... d r -> ... (d r)") def get_audio_embeds(encoded_audio, audio_start, audio_end): audio_embs = [] human_num = len(encoded_audio) audio_frames = encoded_audio[0].shape[0] indices = (torch.arange(4 + 1) - 2) * 1 for human_idx in range(human_num): if audio_end > audio_frames: # in case of not enough audio for current window, pad with first audio frame as that's most likely silence pad_len = audio_end - audio_frames pad_shape = list(encoded_audio[human_idx].shape) pad_shape[0] = pad_len pad_tensor = encoded_audio[human_idx][:1].repeat(pad_len, *([1] * (encoded_audio[human_idx].dim() - 1))) encoded_audio_in = torch.cat([encoded_audio[human_idx], pad_tensor], dim=0) else: encoded_audio_in = encoded_audio[human_idx] center_indices = torch.arange(audio_start, audio_end, 1).unsqueeze(1) + indices.unsqueeze(0) center_indices = torch.clamp(center_indices, min=0, max=encoded_audio_in.shape[0] - 1) audio_emb = encoded_audio_in[center_indices].unsqueeze(0) audio_embs.append(audio_emb) return torch.cat(audio_embs, dim=0) def project_audio_features(audio_proj, encoded_audio, audio_start, audio_end): audio_embs = get_audio_embeds(encoded_audio, audio_start, audio_end) first_frame_audio_emb_s = audio_embs[:, :1, ...] latter_frame_audio_emb = audio_embs[:, 1:, ...] latter_frame_audio_emb = rearrange(latter_frame_audio_emb, "b (n_t n) w s c -> b n_t n w s c", n=4) middle_index = audio_proj.seq_len // 2 latter_first_frame_audio_emb = latter_frame_audio_emb[:, :, :1, :middle_index+1, ...] latter_first_frame_audio_emb = rearrange(latter_first_frame_audio_emb, "b n_t n w s c -> b n_t (n w) s c") latter_last_frame_audio_emb = latter_frame_audio_emb[:, :, -1:, middle_index:, ...] latter_last_frame_audio_emb = rearrange(latter_last_frame_audio_emb, "b n_t n w s c -> b n_t (n w) s c") latter_middle_frame_audio_emb = latter_frame_audio_emb[:, :, 1:-1, middle_index:middle_index+1, ...] latter_middle_frame_audio_emb = rearrange(latter_middle_frame_audio_emb, "b n_t n w s c -> b n_t (n w) s c") latter_frame_audio_emb_s = torch.cat([latter_first_frame_audio_emb, latter_middle_frame_audio_emb, latter_last_frame_audio_emb], dim=2) audio_emb = audio_proj(first_frame_audio_emb_s, latter_frame_audio_emb_s) audio_emb = torch.cat(audio_emb.split(1), dim=2) return audio_emb class RotaryPositionalEmbedding1D(torch.nn.Module): def __init__(self, head_dim, ): super().__init__() self.head_dim = head_dim self.base = 10000 def precompute_freqs_cis_1d(self, pos_indices): freqs = 1.0 / (self.base ** (torch.arange(0, self.head_dim, 2)[: (self.head_dim // 2)].float() / self.head_dim)) freqs = freqs.to(pos_indices.device) freqs = torch.einsum("..., f -> ... f", pos_indices.float(), freqs) freqs = repeat(freqs, "... n -> ... (n r)", r=2) return freqs def forward(self, x, pos_indices): freqs_cis = self.precompute_freqs_cis_1d(pos_indices) x_ = x.float() freqs_cis = freqs_cis.float().to(x.device) cos, sin = freqs_cis.cos(), freqs_cis.sin() cos, sin = rearrange(cos, 'n d -> 1 1 n d'), rearrange(sin, 'n d -> 1 1 n d') x_ = (x_ * cos) + (rotate_half(x_) * sin) return x_.type_as(x) class SingleStreamAttention(torch.nn.Module): def __init__( self, dim: int, encoder_hidden_states_dim: int, num_heads: int, qkv_bias: bool, device=None, dtype=None, operations=None ) -> None: super().__init__() self.dim = dim self.encoder_hidden_states_dim = encoder_hidden_states_dim self.num_heads = num_heads self.head_dim = dim // num_heads self.q_linear = operations.Linear(dim, dim, bias=qkv_bias, device=device, dtype=dtype) self.proj = operations.Linear(dim, dim, device=device, dtype=dtype) self.kv_linear = operations.Linear(encoder_hidden_states_dim, dim * 2, bias=qkv_bias, device=device, dtype=dtype) def forward(self, x: torch.Tensor, encoder_hidden_states: torch.Tensor, shape=None) -> torch.Tensor: N_t, N_h, N_w = shape expected_tokens = N_t * N_h * N_w actual_tokens = x.shape[1] x_extra = None if actual_tokens != expected_tokens: x_extra = x[:, -N_h * N_w:, :] x = x[:, :-N_h * N_w, :] N_t = N_t - 1 B = x.shape[0] S = N_h * N_w x = x.view(B * N_t, S, self.dim) # get q for hidden_state q = self.q_linear(x).view(B * N_t, S, self.num_heads, self.head_dim) # get kv from encoder_hidden_states # shape: (B, N, num_heads, head_dim) kv = self.kv_linear(encoder_hidden_states) encoder_k, encoder_v = kv.view(B * N_t, encoder_hidden_states.shape[1], 2, self.num_heads, self.head_dim).unbind(2) #print("q.shape", q.shape) #torch.Size([21, 1024, 40, 128]) x = optimized_attention( q.transpose(1, 2), encoder_k.transpose(1, 2), encoder_v.transpose(1, 2), heads=self.num_heads, skip_reshape=True, skip_output_reshape=True).transpose(1, 2) # linear transform x = self.proj(x.reshape(B * N_t, S, self.dim)) x = x.view(B, N_t * S, self.dim) if x_extra is not None: x = torch.cat([x, torch.zeros_like(x_extra)], dim=1) return x class SingleStreamMultiAttention(SingleStreamAttention): def __init__( self, dim: int, encoder_hidden_states_dim: int, num_heads: int, qkv_bias: bool, class_range: int = 24, class_interval: int = 4, device=None, dtype=None, operations=None ) -> None: super().__init__( dim=dim, encoder_hidden_states_dim=encoder_hidden_states_dim, num_heads=num_heads, qkv_bias=qkv_bias, device=device, dtype=dtype, operations=operations ) # Rotary-embedding layout parameters self.class_interval = class_interval self.class_range = class_range self.max_humans = self.class_range // self.class_interval # Constant bucket used for background tokens self.rope_bak = int(self.class_range // 2) self.rope_1d = RotaryPositionalEmbedding1D(self.head_dim) def forward( self, x: torch.Tensor, encoder_hidden_states: torch.Tensor, shape=None, x_ref_attn_map=None ) -> torch.Tensor: encoder_hidden_states = encoder_hidden_states.squeeze(0).to(x.device) human_num = x_ref_attn_map.shape[0] if x_ref_attn_map is not None else 1 # Single-speaker fall-through if human_num <= 1: return super().forward(x, encoder_hidden_states, shape) N_t, N_h, N_w = shape x_extra = None if x.shape[0] * N_t != encoder_hidden_states.shape[0]: x_extra = x[:, -N_h * N_w:, :] x = x[:, :-N_h * N_w, :] N_t = N_t - 1 x = rearrange(x, "B (N_t S) C -> (B N_t) S C", N_t=N_t) # Query projection B, N, C = x.shape q = self.q_linear(x) q = q.view(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) # Use `class_range` logic for 2 speakers rope_h1 = (0, self.class_interval) rope_h2 = (self.class_range - self.class_interval, self.class_range) rope_bak = int(self.class_range // 2) # Normalize and scale attention maps for each speaker max_values = x_ref_attn_map.max(1).values[:, None, None] min_values = x_ref_attn_map.min(1).values[:, None, None] max_min_values = torch.cat([max_values, min_values], dim=2) human1_max_value, human1_min_value = max_min_values[0, :, 0].max(), max_min_values[0, :, 1].min() human2_max_value, human2_min_value = max_min_values[1, :, 0].max(), max_min_values[1, :, 1].min() human1 = normalize_and_scale(x_ref_attn_map[0], (human1_min_value, human1_max_value), rope_h1) human2 = normalize_and_scale(x_ref_attn_map[1], (human2_min_value, human2_max_value), rope_h2) back = torch.full((x_ref_attn_map.size(1),), rope_bak, dtype=human1.dtype, device=human1.device) # Token-wise speaker dominance max_indices = x_ref_attn_map.argmax(dim=0) normalized_map = torch.stack([human1, human2, back], dim=1) normalized_pos = normalized_map[torch.arange(x_ref_attn_map.size(1)), max_indices] # Apply rotary to Q q = rearrange(q, "(B N_t) H S C -> B H (N_t S) C", N_t=N_t) q = self.rope_1d(q, normalized_pos) q = rearrange(q, "B H (N_t S) C -> (B N_t) H S C", N_t=N_t) # Keys / Values _, N_a, _ = encoder_hidden_states.shape encoder_kv = self.kv_linear(encoder_hidden_states) encoder_kv = encoder_kv.view(B, N_a, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) encoder_k, encoder_v = encoder_kv.unbind(0) # Rotary for keys – assign centre of each speaker bucket to its context tokens per_frame = torch.zeros(N_a, dtype=encoder_k.dtype, device=encoder_k.device) per_frame[: per_frame.size(0) // 2] = (rope_h1[0] + rope_h1[1]) / 2 per_frame[per_frame.size(0) // 2 :] = (rope_h2[0] + rope_h2[1]) / 2 encoder_pos = torch.cat([per_frame] * N_t, dim=0) encoder_k = rearrange(encoder_k, "(B N_t) H S C -> B H (N_t S) C", N_t=N_t) encoder_k = self.rope_1d(encoder_k, encoder_pos) encoder_k = rearrange(encoder_k, "B H (N_t S) C -> (B N_t) H S C", N_t=N_t) # Final attention q = rearrange(q, "B H M K -> B M H K") encoder_k = rearrange(encoder_k, "B H M K -> B M H K") encoder_v = rearrange(encoder_v, "B H M K -> B M H K") x = optimized_attention( q.transpose(1, 2), encoder_k.transpose(1, 2), encoder_v.transpose(1, 2), heads=self.num_heads, skip_reshape=True, skip_output_reshape=True).transpose(1, 2) # Linear projection x = x.reshape(B, N, C) x = self.proj(x) # Restore original layout x = rearrange(x, "(B N_t) S C -> B (N_t S) C", N_t=N_t) if x_extra is not None: x = torch.cat([x, torch.zeros_like(x_extra)], dim=1) return x class MultiTalkAudioProjModel(torch.nn.Module): def __init__( self, seq_len: int = 5, seq_len_vf: int = 12, blocks: int = 12, channels: int = 768, intermediate_dim: int = 512, out_dim: int = 768, context_tokens: int = 32, device=None, dtype=None, operations=None ): super().__init__() self.seq_len = seq_len self.blocks = blocks self.channels = channels self.input_dim = seq_len * blocks * channels self.input_dim_vf = seq_len_vf * blocks * channels self.intermediate_dim = intermediate_dim self.context_tokens = context_tokens self.out_dim = out_dim # define multiple linear layers self.proj1 = operations.Linear(self.input_dim, intermediate_dim, device=device, dtype=dtype) self.proj1_vf = operations.Linear(self.input_dim_vf, intermediate_dim, device=device, dtype=dtype) self.proj2 = operations.Linear(intermediate_dim, intermediate_dim, device=device, dtype=dtype) self.proj3 = operations.Linear(intermediate_dim, context_tokens * out_dim, device=device, dtype=dtype) self.norm = operations.LayerNorm(out_dim, device=device, dtype=dtype) def forward(self, audio_embeds, audio_embeds_vf): video_length = audio_embeds.shape[1] + audio_embeds_vf.shape[1] B, _, _, S, C = audio_embeds.shape # process audio of first frame audio_embeds = rearrange(audio_embeds, "bz f w b c -> (bz f) w b c") batch_size, window_size, blocks, channels = audio_embeds.shape audio_embeds = audio_embeds.view(batch_size, window_size * blocks * channels) # process audio of latter frame audio_embeds_vf = rearrange(audio_embeds_vf, "bz f w b c -> (bz f) w b c") batch_size_vf, window_size_vf, blocks_vf, channels_vf = audio_embeds_vf.shape audio_embeds_vf = audio_embeds_vf.view(batch_size_vf, window_size_vf * blocks_vf * channels_vf) # first projection audio_embeds = torch.relu(self.proj1(audio_embeds)) audio_embeds_vf = torch.relu(self.proj1_vf(audio_embeds_vf)) audio_embeds = rearrange(audio_embeds, "(bz f) c -> bz f c", bz=B) audio_embeds_vf = rearrange(audio_embeds_vf, "(bz f) c -> bz f c", bz=B) audio_embeds_c = torch.concat([audio_embeds, audio_embeds_vf], dim=1) batch_size_c, N_t, C_a = audio_embeds_c.shape audio_embeds_c = audio_embeds_c.view(batch_size_c*N_t, C_a) # second projection audio_embeds_c = torch.relu(self.proj2(audio_embeds_c)) context_tokens = self.proj3(audio_embeds_c).reshape(batch_size_c*N_t, self.context_tokens, self.out_dim) # normalization and reshape context_tokens = self.norm(context_tokens) context_tokens = rearrange(context_tokens, "(bz f) m c -> bz f m c", f=video_length) return context_tokens class WanMultiTalkAttentionBlock(torch.nn.Module): def __init__(self, in_dim=5120, out_dim=768, device=None, dtype=None, operations=None): super().__init__() self.audio_cross_attn = SingleStreamMultiAttention(in_dim, out_dim, num_heads=40, qkv_bias=True, device=device, dtype=dtype, operations=operations) self.norm_x = operations.LayerNorm(in_dim, device=device, dtype=dtype, elementwise_affine=True) class MultiTalkGetAttnMapPatch: def __init__(self, ref_target_masks=None): self.ref_target_masks = ref_target_masks def __call__(self, kwargs): transformer_options = kwargs.get("transformer_options", {}) x = kwargs["x"] if self.ref_target_masks is not None: x_ref_attn_map = get_attn_map_with_target(kwargs["q"], kwargs["k"], transformer_options["grid_sizes"], ref_target_masks=self.ref_target_masks.to(x.device)) transformer_options["x_ref_attn_map"] = x_ref_attn_map return x class MultiTalkCrossAttnPatch: def __init__(self, model_patch, audio_scale=1.0, ref_target_masks=None): self.model_patch = model_patch self.audio_scale = audio_scale self.ref_target_masks = ref_target_masks def __call__(self, kwargs): transformer_options = kwargs.get("transformer_options", {}) block_idx = transformer_options.get("block_index", None) x = kwargs["x"] if block_idx is None: return torch.zeros_like(x) audio_embeds = transformer_options.get("audio_embeds") x_ref_attn_map = transformer_options.pop("x_ref_attn_map", None) norm_x = self.model_patch.model.blocks[block_idx].norm_x(x) x_audio = self.model_patch.model.blocks[block_idx].audio_cross_attn( norm_x, audio_embeds.to(x.dtype), shape=transformer_options["grid_sizes"], x_ref_attn_map=x_ref_attn_map ) x = x + x_audio * self.audio_scale return x def models(self): return [self.model_patch] class MultiTalkApplyModelWrapper: def __init__(self, init_latents): self.init_latents = init_latents def __call__(self, executor, x, *args, **kwargs): x[:, :, :self.init_latents.shape[2]] = self.init_latents.to(x) samples = executor(x, *args, **kwargs) return samples class InfiniteTalkOuterSampleWrapper: def __init__(self, motion_frames_latent, model_patch, is_extend=False): self.motion_frames_latent = motion_frames_latent self.model_patch = model_patch self.is_extend = is_extend def __call__(self, executor, *args, **kwargs): model_patcher = executor.class_obj.model_patcher model_options = executor.class_obj.model_options process_latent_in = model_patcher.model.process_latent_in # for InfiniteTalk, model input first latent(s) need to always be replaced on every step if self.motion_frames_latent is not None: wrappers = model_options["transformer_options"]["wrappers"] w = wrappers.setdefault(comfy.patcher_extension.WrappersMP.APPLY_MODEL, {}) w["MultiTalk_apply_model"] = [MultiTalkApplyModelWrapper(process_latent_in(self.motion_frames_latent))] # run the sampling process result = executor(*args, **kwargs) # insert motion frames before decoding if self.is_extend: overlap = self.motion_frames_latent.shape[2] result = torch.cat([self.motion_frames_latent.to(result), result[:, :, overlap:]], dim=2) return result def to(self, device_or_dtype): if isinstance(device_or_dtype, torch.device): if self.motion_frames_latent is not None: self.motion_frames_latent = self.motion_frames_latent.to(device_or_dtype) return self
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/wan/model_multitalk.py", "license": "GNU General Public License v3.0", "lines": 390, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/anima/model.py
from comfy.ldm.cosmos.predict2 import MiniTrainDIT import torch from torch import nn import torch.nn.functional as F def rotate_half(x): x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(x, cos, sin, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) x_embed = (x * cos) + (rotate_half(x) * sin) return x_embed class RotaryEmbedding(nn.Module): def __init__(self, head_dim): super().__init__() self.rope_theta = 10000 inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, head_dim, 2, dtype=torch.int64).to(dtype=torch.float) / head_dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) @torch.no_grad() def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Attention(nn.Module): def __init__(self, query_dim, context_dim, n_heads, head_dim, device=None, dtype=None, operations=None): super().__init__() inner_dim = head_dim * n_heads self.n_heads = n_heads self.head_dim = head_dim self.query_dim = query_dim self.context_dim = context_dim self.q_proj = operations.Linear(query_dim, inner_dim, bias=False, device=device, dtype=dtype) self.q_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) self.k_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) self.k_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) self.v_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) self.o_proj = operations.Linear(inner_dim, query_dim, bias=False, device=device, dtype=dtype) def forward(self, x, mask=None, context=None, position_embeddings=None, position_embeddings_context=None): context = x if context is None else context input_shape = x.shape[:-1] q_shape = (*input_shape, self.n_heads, self.head_dim) context_shape = context.shape[:-1] kv_shape = (*context_shape, self.n_heads, self.head_dim) query_states = self.q_norm(self.q_proj(x).view(q_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(context).view(kv_shape)).transpose(1, 2) value_states = self.v_proj(context).view(kv_shape).transpose(1, 2) if position_embeddings is not None: assert position_embeddings_context is not None cos, sin = position_embeddings query_states = apply_rotary_pos_emb(query_states, cos, sin) cos, sin = position_embeddings_context key_states = apply_rotary_pos_emb(key_states, cos, sin) attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=mask) attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output def init_weights(self): torch.nn.init.zeros_(self.o_proj.weight) class TransformerBlock(nn.Module): def __init__(self, source_dim, model_dim, num_heads=16, mlp_ratio=4.0, use_self_attn=False, layer_norm=False, device=None, dtype=None, operations=None): super().__init__() self.use_self_attn = use_self_attn if self.use_self_attn: self.norm_self_attn = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) self.self_attn = Attention( query_dim=model_dim, context_dim=model_dim, n_heads=num_heads, head_dim=model_dim//num_heads, device=device, dtype=dtype, operations=operations, ) self.norm_cross_attn = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) self.cross_attn = Attention( query_dim=model_dim, context_dim=source_dim, n_heads=num_heads, head_dim=model_dim//num_heads, device=device, dtype=dtype, operations=operations, ) self.norm_mlp = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) self.mlp = nn.Sequential( operations.Linear(model_dim, int(model_dim * mlp_ratio), device=device, dtype=dtype), nn.GELU(), operations.Linear(int(model_dim * mlp_ratio), model_dim, device=device, dtype=dtype) ) def forward(self, x, context, target_attention_mask=None, source_attention_mask=None, position_embeddings=None, position_embeddings_context=None): if self.use_self_attn: normed = self.norm_self_attn(x) attn_out = self.self_attn(normed, mask=target_attention_mask, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings) x = x + attn_out normed = self.norm_cross_attn(x) attn_out = self.cross_attn(normed, mask=source_attention_mask, context=context, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings_context) x = x + attn_out x = x + self.mlp(self.norm_mlp(x)) return x def init_weights(self): torch.nn.init.zeros_(self.mlp[2].weight) self.cross_attn.init_weights() class LLMAdapter(nn.Module): def __init__( self, source_dim=1024, target_dim=1024, model_dim=1024, num_layers=6, num_heads=16, use_self_attn=True, layer_norm=False, device=None, dtype=None, operations=None, ): super().__init__() self.embed = operations.Embedding(32128, target_dim, device=device, dtype=dtype) if model_dim != target_dim: self.in_proj = operations.Linear(target_dim, model_dim, device=device, dtype=dtype) else: self.in_proj = nn.Identity() self.rotary_emb = RotaryEmbedding(model_dim//num_heads) self.blocks = nn.ModuleList([ TransformerBlock(source_dim, model_dim, num_heads=num_heads, use_self_attn=use_self_attn, layer_norm=layer_norm, device=device, dtype=dtype, operations=operations) for _ in range(num_layers) ]) self.out_proj = operations.Linear(model_dim, target_dim, device=device, dtype=dtype) self.norm = operations.RMSNorm(target_dim, eps=1e-6, device=device, dtype=dtype) def forward(self, source_hidden_states, target_input_ids, target_attention_mask=None, source_attention_mask=None): if target_attention_mask is not None: target_attention_mask = target_attention_mask.to(torch.bool) if target_attention_mask.ndim == 2: target_attention_mask = target_attention_mask.unsqueeze(1).unsqueeze(1) if source_attention_mask is not None: source_attention_mask = source_attention_mask.to(torch.bool) if source_attention_mask.ndim == 2: source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1) context = source_hidden_states x = self.in_proj(self.embed(target_input_ids, out_dtype=context.dtype)) position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0) position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0) position_embeddings = self.rotary_emb(x, position_ids) position_embeddings_context = self.rotary_emb(x, position_ids_context) for block in self.blocks: x = block(x, context, target_attention_mask=target_attention_mask, source_attention_mask=source_attention_mask, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings_context) return self.norm(self.out_proj(x)) class Anima(MiniTrainDIT): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.llm_adapter = LLMAdapter(device=kwargs.get("device"), dtype=kwargs.get("dtype"), operations=kwargs.get("operations")) def preprocess_text_embeds(self, text_embeds, text_ids, t5xxl_weights=None): if text_ids is not None: out = self.llm_adapter(text_embeds, text_ids) if t5xxl_weights is not None: out = out * t5xxl_weights if out.shape[1] < 512: out = torch.nn.functional.pad(out, (0, 0, 0, 512 - out.shape[1])) return out else: return text_embeds def forward(self, x, timesteps, context, **kwargs): t5xxl_ids = kwargs.pop("t5xxl_ids", None) if t5xxl_ids is not None: context = self.preprocess_text_embeds(context, t5xxl_ids, t5xxl_weights=kwargs.pop("t5xxl_weights", None)) return super().forward(x, timesteps, context, **kwargs)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/anima/model.py", "license": "GNU General Public License v3.0", "lines": 172, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/text_encoders/anima.py
from transformers import Qwen2Tokenizer, T5TokenizerFast import comfy.text_encoders.llama from comfy import sd1_clip import os import torch class Qwen3Tokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024, embedding_key='qwen3_06b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) class AnimaTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): self.qwen3_06b = Qwen3Tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} qwen_ids = self.qwen3_06b.tokenize_with_weights(text, return_word_ids, **kwargs) out["qwen3_06b"] = [[(k[0], 1.0, k[2]) if return_word_ids else (k[0], 1.0) for k in inner_list] for inner_list in qwen_ids] # Set weights to 1.0 out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): return self.t5xxl.untokenize(token_weight_pair) def state_dict(self): return {} def decode(self, token_ids, **kwargs): return self.qwen3_06b.decode(token_ids, **kwargs) class Qwen3_06BModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_06B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class AnimaTEModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__(device=device, dtype=dtype, name="qwen3_06b", clip_model=Qwen3_06BModel, model_options=model_options) def encode_token_weights(self, token_weight_pairs): out = super().encode_token_weights(token_weight_pairs) out[2]["t5xxl_ids"] = torch.tensor(list(map(lambda a: a[0], token_weight_pairs["t5xxl"][0])), dtype=torch.int) out[2]["t5xxl_weights"] = torch.tensor(list(map(lambda a: a[1], token_weight_pairs["t5xxl"][0]))) return out def te(dtype_llama=None, llama_quantization_metadata=None): class AnimaTEModel_(AnimaTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): if dtype_llama is not None: dtype = dtype_llama if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, dtype=dtype, model_options=model_options) return AnimaTEModel_
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/anima.py", "license": "GNU General Public License v3.0", "lines": 50, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/wavespeed.py
from pydantic import BaseModel, Field class SeedVR2ImageRequest(BaseModel): image: str = Field(...) target_resolution: str = Field(...) output_format: str = Field("png") enable_sync_mode: bool = Field(False) class FlashVSRRequest(BaseModel): target_resolution: str = Field(...) video: str = Field(...) duration: float = Field(...) class TaskCreatedDataResponse(BaseModel): id: str = Field(...) class TaskCreatedResponse(BaseModel): code: int = Field(...) message: str = Field(...) data: TaskCreatedDataResponse | None = Field(None) class TaskResultDataResponse(BaseModel): status: str = Field(...) outputs: list[str] = Field([]) class TaskResultResponse(BaseModel): code: int = Field(...) message: str = Field(...) data: TaskResultDataResponse | None = Field(None)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/wavespeed.py", "license": "GNU General Public License v3.0", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_wavespeed.py
from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.wavespeed import ( FlashVSRRequest, TaskCreatedResponse, TaskResultResponse, SeedVR2ImageRequest, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_video_output, poll_op, sync_op, upload_video_to_comfyapi, validate_container_format_is_mp4, validate_video_duration, upload_images_to_comfyapi, get_number_of_images, download_url_to_image_tensor, ) class WavespeedFlashVSRNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="WavespeedFlashVSRNode", display_name="FlashVSR Video Upscale", category="api node/video/WaveSpeed", description="Fast, high-quality video upscaler that " "boosts resolution and restores clarity for low-resolution or blurry footage.", inputs=[ IO.Video.Input("video"), IO.Combo.Input("target_resolution", options=["720p", "1080p", "2K", "4K"]), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["target_resolution"]), expr=""" ( $price_for_1sec := {"720p": 0.012, "1080p": 0.018, "2k": 0.024, "4k": 0.032}; { "type":"usd", "usd": $lookup($price_for_1sec, widgets.target_resolution), "format":{"suffix": "/second", "approximate": true} } ) """, ), ) @classmethod async def execute( cls, video: Input.Video, target_resolution: str, ) -> IO.NodeOutput: validate_container_format_is_mp4(video) validate_video_duration(video, min_duration=5, max_duration=60 * 10) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/wavespeed/api/v3/wavespeed-ai/flashvsr", method="POST"), response_model=TaskCreatedResponse, data=FlashVSRRequest( target_resolution=target_resolution.lower(), video=await upload_video_to_comfyapi(cls, video), duration=video.get_duration(), ), ) if initial_res.code != 200: raise ValueError(f"Task creation fails with code={initial_res.code} and message={initial_res.message}") final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/wavespeed/api/v3/predictions/{initial_res.data.id}/result"), response_model=TaskResultResponse, status_extractor=lambda x: "failed" if x.data is None else x.data.status, poll_interval=10.0, max_poll_attempts=480, ) if final_response.code != 200: raise ValueError( f"Task processing failed with code={final_response.code} and message={final_response.message}" ) return IO.NodeOutput(await download_url_to_video_output(final_response.data.outputs[0])) class WavespeedImageUpscaleNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="WavespeedImageUpscaleNode", display_name="WaveSpeed Image Upscale", category="api node/image/WaveSpeed", description="Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.", inputs=[ IO.Combo.Input("model", options=["SeedVR2", "Ultimate"]), IO.Image.Input("image"), IO.Combo.Input("target_resolution", options=["2K", "4K", "8K"]), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["model"]), expr=""" ( $prices := {"seedvr2": 0.01, "ultimate": 0.06}; {"type":"usd", "usd": $lookup($prices, widgets.model)} ) """, ), ) @classmethod async def execute( cls, model: str, image: Input.Image, target_resolution: str, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") if model == "SeedVR2": model_path = "seedvr2/image" else: model_path = "ultimate-image-upscaler" initial_res = await sync_op( cls, ApiEndpoint(path=f"/proxy/wavespeed/api/v3/wavespeed-ai/{model_path}", method="POST"), response_model=TaskCreatedResponse, data=SeedVR2ImageRequest( target_resolution=target_resolution.lower(), image=(await upload_images_to_comfyapi(cls, image, max_images=1))[0], ), ) if initial_res.code != 200: raise ValueError(f"Task creation fails with code={initial_res.code} and message={initial_res.message}") final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/wavespeed/api/v3/predictions/{initial_res.data.id}/result"), response_model=TaskResultResponse, status_extractor=lambda x: "failed" if x.data is None else x.data.status, poll_interval=10.0, max_poll_attempts=480, ) if final_response.code != 200: raise ValueError( f"Task processing failed with code={final_response.code} and message={final_response.message}" ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.data.outputs[0])) class WavespeedExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ WavespeedFlashVSRNode, WavespeedImageUpscaleNode, ] async def comfy_entrypoint() -> WavespeedExtension: return WavespeedExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_wavespeed.py", "license": "GNU General Public License v3.0", "lines": 167, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_zimage.py
import node_helpers from typing_extensions import override from comfy_api.latest import ComfyExtension, io import math import comfy.utils class TextEncodeZImageOmni(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="TextEncodeZImageOmni", category="advanced/conditioning", is_experimental=True, inputs=[ io.Clip.Input("clip"), io.ClipVision.Input("image_encoder", optional=True), io.String.Input("prompt", multiline=True, dynamic_prompts=True), io.Boolean.Input("auto_resize_images", default=True, advanced=True), io.Vae.Input("vae", optional=True), io.Image.Input("image1", optional=True), io.Image.Input("image2", optional=True), io.Image.Input("image3", optional=True), ], outputs=[ io.Conditioning.Output(), ], ) @classmethod def execute(cls, clip, prompt, image_encoder=None, auto_resize_images=True, vae=None, image1=None, image2=None, image3=None) -> io.NodeOutput: ref_latents = [] images = list(filter(lambda a: a is not None, [image1, image2, image3])) prompt_list = [] template = None if len(images) > 0: prompt_list = ["<|im_start|>user\n<|vision_start|>"] prompt_list += ["<|vision_end|><|vision_start|>"] * (len(images) - 1) prompt_list += ["<|vision_end|><|im_end|>"] template = "<|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n<|vision_start|>" encoded_images = [] for i, image in enumerate(images): if image_encoder is not None: encoded_images.append(image_encoder.encode_image(image)) if vae is not None: if auto_resize_images: samples = image.movedim(-1, 1) total = int(1024 * 1024) scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) width = round(samples.shape[3] * scale_by / 8.0) * 8 height = round(samples.shape[2] * scale_by / 8.0) * 8 image = comfy.utils.common_upscale(samples, width, height, "area", "disabled").movedim(1, -1) ref_latents.append(vae.encode(image)) tokens = clip.tokenize(prompt, llama_template=template) conditioning = clip.encode_from_tokens_scheduled(tokens) extra_text_embeds = [] for p in prompt_list: tokens = clip.tokenize(p, llama_template="{}") text_embeds = clip.encode_from_tokens_scheduled(tokens) extra_text_embeds.append(text_embeds[0][0]) if len(ref_latents) > 0: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True) if len(encoded_images) > 0: conditioning = node_helpers.conditioning_set_values(conditioning, {"clip_vision_outputs": encoded_images}, append=True) if len(extra_text_embeds) > 0: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents_text_embeds": extra_text_embeds}, append=True) return io.NodeOutput(conditioning) class ZImageExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ TextEncodeZImageOmni, ] async def comfy_entrypoint() -> ZImageExtension: return ZImageExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_zimage.py", "license": "GNU General Public License v3.0", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/bria.py
from typing import TypedDict from pydantic import BaseModel, Field class InputModerationSettings(TypedDict): prompt_content_moderation: bool visual_input_moderation: bool visual_output_moderation: bool class BriaEditImageRequest(BaseModel): instruction: str | None = Field(...) structured_instruction: str | None = Field( ..., description="Use this instead of instruction for precise, programmatic control.", ) images: list[str] = Field( ..., description="Required. Publicly available URL or Base64-encoded. Must contain exactly one item.", ) mask: str | None = Field( None, description="Mask image (black and white). Black areas will be preserved, white areas will be edited. " "If omitted, the edit applies to the entire image. " "The input image and the the input mask must be of the same size.", ) negative_prompt: str | None = Field(None) guidance_scale: float = Field(...) model_version: str = Field(...) steps_num: int = Field(...) seed: int = Field(...) ip_signal: bool = Field( False, description="If true, returns a warning for potential IP content in the instruction.", ) prompt_content_moderation: bool = Field( False, description="If true, returns 422 on instruction moderation failure." ) visual_input_content_moderation: bool = Field( False, description="If true, returns 422 on images or mask moderation failure." ) visual_output_content_moderation: bool = Field( False, description="If true, returns 422 on visual output moderation failure." ) class BriaRemoveBackgroundRequest(BaseModel): image: str = Field(...) sync: bool = Field(False) visual_input_content_moderation: bool = Field( False, description="If true, returns 422 on input image moderation failure." ) visual_output_content_moderation: bool = Field( False, description="If true, returns 422 on visual output moderation failure." ) seed: int = Field(...) class BriaStatusResponse(BaseModel): request_id: str = Field(...) status_url: str = Field(...) warning: str | None = Field(None) class BriaRemoveBackgroundResult(BaseModel): image_url: str = Field(...) class BriaRemoveBackgroundResponse(BaseModel): status: str = Field(...) result: BriaRemoveBackgroundResult | None = Field(None) class BriaImageEditResult(BaseModel): structured_prompt: str = Field(...) image_url: str = Field(...) class BriaImageEditResponse(BaseModel): status: str = Field(...) result: BriaImageEditResult | None = Field(None) class BriaRemoveVideoBackgroundRequest(BaseModel): video: str = Field(...) background_color: str = Field(default="transparent", description="Background color for the output video.") output_container_and_codec: str = Field(...) preserve_audio: bool = Field(True) seed: int = Field(...) class BriaRemoveVideoBackgroundResult(BaseModel): video_url: str = Field(...) class BriaRemoveVideoBackgroundResponse(BaseModel): status: str = Field(...) result: BriaRemoveVideoBackgroundResult | None = Field(None)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/bria.py", "license": "GNU General Public License v3.0", "lines": 76, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_bria.py
from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.bria import ( BriaEditImageRequest, BriaRemoveBackgroundRequest, BriaRemoveBackgroundResponse, BriaRemoveVideoBackgroundRequest, BriaRemoveVideoBackgroundResponse, BriaImageEditResponse, BriaStatusResponse, InputModerationSettings, ) from comfy_api_nodes.util import ( ApiEndpoint, convert_mask_to_image, download_url_to_image_tensor, download_url_to_video_output, poll_op, sync_op, upload_image_to_comfyapi, upload_video_to_comfyapi, validate_video_duration, ) class BriaImageEditNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="BriaImageEditNode", display_name="Bria FIBO Image Edit", category="api node/image/Bria", description="Edit images using Bria latest model", inputs=[ IO.Combo.Input("model", options=["FIBO"]), IO.Image.Input("image"), IO.String.Input( "prompt", multiline=True, default="", tooltip="Instruction to edit image", ), IO.String.Input("negative_prompt", multiline=True, default=""), IO.String.Input( "structured_prompt", multiline=True, default="", tooltip="A string containing the structured edit prompt in JSON format. " "Use this instead of usual prompt for precise, programmatic control.", ), IO.Int.Input( "seed", default=1, min=1, max=2147483647, step=1, display_mode=IO.NumberDisplay.number, control_after_generate=True, ), IO.Float.Input( "guidance_scale", default=3, min=3, max=5, step=0.01, display_mode=IO.NumberDisplay.number, tooltip="Higher value makes the image follow the prompt more closely.", ), IO.Int.Input( "steps", default=50, min=20, max=50, step=1, display_mode=IO.NumberDisplay.number, ), IO.DynamicCombo.Input( "moderation", options=[ IO.DynamicCombo.Option("false", []), IO.DynamicCombo.Option( "true", [ IO.Boolean.Input("prompt_content_moderation", default=False), IO.Boolean.Input("visual_input_moderation", default=False), IO.Boolean.Input("visual_output_moderation", default=True), ], ), ], tooltip="Moderation settings", ), IO.Mask.Input( "mask", tooltip="If omitted, the edit applies to the entire image.", optional=True, ), ], outputs=[ IO.Image.Output(), IO.String.Output(display_name="structured_prompt"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.04}""", ), ) @classmethod async def execute( cls, model: str, image: Input.Image, prompt: str, negative_prompt: str, structured_prompt: str, seed: int, guidance_scale: float, steps: int, moderation: InputModerationSettings, mask: Input.Image | None = None, ) -> IO.NodeOutput: if not prompt and not structured_prompt: raise ValueError("One of prompt or structured_prompt is required to be non-empty.") mask_url = None if mask is not None: mask_url = await upload_image_to_comfyapi(cls, convert_mask_to_image(mask), wait_label="Uploading mask") response = await sync_op( cls, ApiEndpoint(path="proxy/bria/v2/image/edit", method="POST"), data=BriaEditImageRequest( instruction=prompt if prompt else None, structured_instruction=structured_prompt if structured_prompt else None, images=[await upload_image_to_comfyapi(cls, image, wait_label="Uploading image")], mask=mask_url, negative_prompt=negative_prompt if negative_prompt else None, guidance_scale=guidance_scale, seed=seed, model_version=model, steps_num=steps, prompt_content_moderation=moderation.get("prompt_content_moderation", False), visual_input_content_moderation=moderation.get("visual_input_moderation", False), visual_output_content_moderation=moderation.get("visual_output_moderation", False), ), response_model=BriaStatusResponse, ) response = await poll_op( cls, ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"), status_extractor=lambda r: r.status, response_model=BriaImageEditResponse, ) return IO.NodeOutput( await download_url_to_image_tensor(response.result.image_url), response.result.structured_prompt, ) class BriaRemoveImageBackground(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="BriaRemoveImageBackground", display_name="Bria Remove Image Background", category="api node/image/Bria", description="Remove the background from an image using Bria RMBG 2.0.", inputs=[ IO.Image.Input("image"), IO.DynamicCombo.Input( "moderation", options=[ IO.DynamicCombo.Option("false", []), IO.DynamicCombo.Option( "true", [ IO.Boolean.Input("visual_input_moderation", default=False), IO.Boolean.Input("visual_output_moderation", default=True), ], ), ], tooltip="Moderation settings", ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[IO.Image.Output()], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.018}""", ), ) @classmethod async def execute( cls, image: Input.Image, moderation: dict, seed: int, ) -> IO.NodeOutput: response = await sync_op( cls, ApiEndpoint(path="/proxy/bria/v2/image/edit/remove_background", method="POST"), data=BriaRemoveBackgroundRequest( image=await upload_image_to_comfyapi(cls, image, wait_label="Uploading image"), sync=False, visual_input_content_moderation=moderation.get("visual_input_moderation", False), visual_output_content_moderation=moderation.get("visual_output_moderation", False), seed=seed, ), response_model=BriaStatusResponse, ) response = await poll_op( cls, ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"), status_extractor=lambda r: r.status, response_model=BriaRemoveBackgroundResponse, ) return IO.NodeOutput(await download_url_to_image_tensor(response.result.image_url)) class BriaRemoveVideoBackground(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="BriaRemoveVideoBackground", display_name="Bria Remove Video Background", category="api node/video/Bria", description="Remove the background from a video using Bria. ", inputs=[ IO.Video.Input("video"), IO.Combo.Input( "background_color", options=[ "Black", "White", "Gray", "Red", "Green", "Blue", "Yellow", "Cyan", "Magenta", "Orange", ], tooltip="Background color for the output video.", ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[IO.Video.Output()], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.14,"format":{"suffix":"/second"}}""", ), ) @classmethod async def execute( cls, video: Input.Video, background_color: str, seed: int, ) -> IO.NodeOutput: validate_video_duration(video, max_duration=60.0) response = await sync_op( cls, ApiEndpoint(path="/proxy/bria/v2/video/edit/remove_background", method="POST"), data=BriaRemoveVideoBackgroundRequest( video=await upload_video_to_comfyapi(cls, video), background_color=background_color, output_container_and_codec="mp4_h264", seed=seed, ), response_model=BriaStatusResponse, ) response = await poll_op( cls, ApiEndpoint(path=f"/proxy/bria/v2/status/{response.request_id}"), status_extractor=lambda r: r.status, response_model=BriaRemoveVideoBackgroundResponse, ) return IO.NodeOutput(await download_url_to_video_output(response.result.video_url)) class BriaExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ BriaImageEditNode, BriaRemoveImageBackground, BriaRemoveVideoBackground, ] async def comfy_entrypoint() -> BriaExtension: return BriaExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_bria.py", "license": "GNU General Public License v3.0", "lines": 313, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/ideogram.py
from enum import Enum from typing import Optional, List, Dict, Any, Union from datetime import datetime from pydantic import BaseModel, Field, RootModel, StrictBytes class IdeogramColorPalette1(BaseModel): name: str = Field(..., description='Name of the preset color palette') class Member(BaseModel): color: Optional[str] = Field( None, description='Hexadecimal color code', pattern='^#[0-9A-Fa-f]{6}$' ) weight: Optional[float] = Field( None, description='Optional weight for the color (0-1)', ge=0.0, le=1.0 ) class IdeogramColorPalette2(BaseModel): members: List[Member] = Field( ..., description='Array of color definitions with optional weights' ) class IdeogramColorPalette( RootModel[Union[IdeogramColorPalette1, IdeogramColorPalette2]] ): root: Union[IdeogramColorPalette1, IdeogramColorPalette2] = Field( ..., description='A color palette specification that can either use a preset name or explicit color definitions with weights', ) class ImageRequest(BaseModel): aspect_ratio: Optional[str] = Field( None, description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", ) color_palette: Optional[Dict[str, Any]] = Field( None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' ) magic_prompt_option: Optional[str] = Field( None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." ) model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") negative_prompt: Optional[str] = Field( None, description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', ) num_images: Optional[int] = Field( 1, description='Optional. Number of images to generate (1-8). Defaults to 1.', ge=1, le=8, ) prompt: str = Field( ..., description='Required. The prompt to use to generate the image.' ) resolution: Optional[str] = Field( None, description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", ) seed: Optional[int] = Field( None, description='Optional. A number between 0 and 2147483647.', ge=0, le=2147483647, ) style_type: Optional[str] = Field( None, description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", ) class IdeogramGenerateRequest(BaseModel): image_request: ImageRequest = Field( ..., description='The image generation request parameters.' ) class Datum(BaseModel): is_image_safe: Optional[bool] = Field( None, description='Indicates whether the image is considered safe.' ) prompt: Optional[str] = Field( None, description='The prompt used to generate this image.' ) resolution: Optional[str] = Field( None, description="The resolution of the generated image (e.g., '1024x1024')." ) seed: Optional[int] = Field( None, description='The seed value used for this generation.' ) style_type: Optional[str] = Field( None, description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", ) url: Optional[str] = Field(None, description='URL to the generated image.') class IdeogramGenerateResponse(BaseModel): created: Optional[datetime] = Field( None, description='Timestamp when the generation was created.' ) data: Optional[List[Datum]] = Field( None, description='Array of generated image information.' ) class StyleCode(RootModel[str]): root: str = Field(..., pattern='^[0-9A-Fa-f]{8}$') class Datum1(BaseModel): is_image_safe: Optional[bool] = None prompt: Optional[str] = None resolution: Optional[str] = None seed: Optional[int] = None style_type: Optional[str] = None url: Optional[str] = None class IdeogramV3IdeogramResponse(BaseModel): created: Optional[datetime] = None data: Optional[List[Datum1]] = None class RenderingSpeed1(str, Enum): TURBO = 'TURBO' DEFAULT = 'DEFAULT' QUALITY = 'QUALITY' class IdeogramV3ReframeRequest(BaseModel): color_palette: Optional[Dict[str, Any]] = None image: Optional[StrictBytes] = None num_images: Optional[int] = Field(None, ge=1, le=8) rendering_speed: Optional[RenderingSpeed1] = None resolution: str seed: Optional[int] = Field(None, ge=0, le=2147483647) style_codes: Optional[List[str]] = None style_reference_images: Optional[List[StrictBytes]] = None class MagicPrompt(str, Enum): AUTO = 'AUTO' ON = 'ON' OFF = 'OFF' class StyleType(str, Enum): AUTO = 'AUTO' GENERAL = 'GENERAL' REALISTIC = 'REALISTIC' DESIGN = 'DESIGN' class IdeogramV3RemixRequest(BaseModel): aspect_ratio: Optional[str] = None color_palette: Optional[Dict[str, Any]] = None image: Optional[StrictBytes] = None image_weight: Optional[int] = Field(50, ge=1, le=100) magic_prompt: Optional[MagicPrompt] = None negative_prompt: Optional[str] = None num_images: Optional[int] = Field(None, ge=1, le=8) prompt: str rendering_speed: Optional[RenderingSpeed1] = None resolution: Optional[str] = None seed: Optional[int] = Field(None, ge=0, le=2147483647) style_codes: Optional[List[str]] = None style_reference_images: Optional[List[StrictBytes]] = None style_type: Optional[StyleType] = None class IdeogramV3ReplaceBackgroundRequest(BaseModel): color_palette: Optional[Dict[str, Any]] = None image: Optional[StrictBytes] = None magic_prompt: Optional[MagicPrompt] = None num_images: Optional[int] = Field(None, ge=1, le=8) prompt: str rendering_speed: Optional[RenderingSpeed1] = None seed: Optional[int] = Field(None, ge=0, le=2147483647) style_codes: Optional[List[str]] = None style_reference_images: Optional[List[StrictBytes]] = None class ColorPalette(BaseModel): name: str = Field(..., description='Name of the color palette', examples=['PASTEL']) class MagicPrompt2(str, Enum): ON = 'ON' OFF = 'OFF' class StyleType1(str, Enum): AUTO = 'AUTO' GENERAL = 'GENERAL' REALISTIC = 'REALISTIC' DESIGN = 'DESIGN' FICTION = 'FICTION' class RenderingSpeed(str, Enum): DEFAULT = 'DEFAULT' TURBO = 'TURBO' QUALITY = 'QUALITY' class IdeogramV3EditRequest(BaseModel): color_palette: Optional[IdeogramColorPalette] = None image: Optional[StrictBytes] = Field( None, description='The image being edited (max size 10MB); only JPEG, WebP and PNG formats are supported at this time.', ) magic_prompt: Optional[str] = Field( None, description='Determine if MagicPrompt should be used in generating the request or not.', ) mask: Optional[StrictBytes] = Field( None, description='A black and white image of the same size as the image being edited (max size 10MB). Black regions in the mask should match up with the regions of the image that you would like to edit; only JPEG, WebP and PNG formats are supported at this time.', ) num_images: Optional[int] = Field( None, description='The number of images to generate.' ) prompt: str = Field( ..., description='The prompt used to describe the edited result.' ) rendering_speed: RenderingSpeed seed: Optional[int] = Field( None, description='Random seed. Set for reproducible generation.' ) style_codes: Optional[List[StyleCode]] = Field( None, description='A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style_type.', ) style_reference_images: Optional[List[StrictBytes]] = Field( None, description='A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format.', ) character_reference_images: Optional[List[str]] = Field( None, description='Generations with character reference are subject to the character reference pricing. A set of images to use as character references (maximum total size 10MB across all character references), currently only supports 1 character reference image. The images should be in JPEG, PNG or WebP format.' ) character_reference_images_mask: Optional[List[str]] = Field( None, description='Optional masks for character reference images. When provided, must match the number of character_reference_images. Each mask should be a grayscale image of the same dimensions as the corresponding character reference image. The images should be in JPEG, PNG or WebP format.' ) class IdeogramV3Request(BaseModel): aspect_ratio: Optional[str] = Field( None, description='Aspect ratio in format WxH', examples=['1x3'] ) color_palette: Optional[ColorPalette] = None magic_prompt: Optional[MagicPrompt2] = Field( None, description='Whether to enable magic prompt enhancement' ) negative_prompt: Optional[str] = Field( None, description='Text prompt specifying what to avoid in the generation' ) num_images: Optional[int] = Field( None, description='Number of images to generate', ge=1 ) prompt: str = Field(..., description='The text prompt for image generation') rendering_speed: RenderingSpeed resolution: Optional[str] = Field( None, description='Image resolution in format WxH', examples=['1280x800'] ) seed: Optional[int] = Field( None, description='Seed value for reproducible generation' ) style_codes: Optional[List[StyleCode]] = Field( None, description='Array of style codes in hexadecimal format' ) style_reference_images: Optional[List[str]] = Field( None, description='Array of reference image URLs or identifiers' ) style_type: Optional[StyleType1] = Field( None, description='The type of style to apply' ) character_reference_images: Optional[List[str]] = Field( None, description='Generations with character reference are subject to the character reference pricing. A set of images to use as character references (maximum total size 10MB across all character references), currently only supports 1 character reference image. The images should be in JPEG, PNG or WebP format.' ) character_reference_images_mask: Optional[List[str]] = Field( None, description='Optional masks for character reference images. When provided, must match the number of character_reference_images. Each mask should be a grayscale image of the same dimensions as the corresponding character reference image. The images should be in JPEG, PNG or WebP format.' )
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/ideogram.py", "license": "GNU General Public License v3.0", "lines": 245, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/apis/moonvalley.py
from enum import Enum from typing import Optional, Dict, Any from pydantic import BaseModel, Field, StrictBytes class MoonvalleyPromptResponse(BaseModel): error: Optional[Dict[str, Any]] = None frame_conditioning: Optional[Dict[str, Any]] = None id: Optional[str] = None inference_params: Optional[Dict[str, Any]] = None meta: Optional[Dict[str, Any]] = None model_params: Optional[Dict[str, Any]] = None output_url: Optional[str] = None prompt_text: Optional[str] = None status: Optional[str] = None class MoonvalleyTextToVideoInferenceParams(BaseModel): add_quality_guidance: Optional[bool] = Field( True, description='Whether to add quality guidance' ) caching_coefficient: Optional[float] = Field( 0.3, description='Caching coefficient for optimization' ) caching_cooldown: Optional[int] = Field( 3, description='Number of caching cooldown steps' ) caching_warmup: Optional[int] = Field( 3, description='Number of caching warmup steps' ) clip_value: Optional[float] = Field( 3, description='CLIP value for generation control' ) conditioning_frame_index: Optional[int] = Field( 0, description='Index of the conditioning frame' ) cooldown_steps: Optional[int] = Field( 75, description='Number of cooldown steps (calculated based on num_frames)' ) fps: Optional[int] = Field( 24, description='Frames per second of the generated video' ) guidance_scale: Optional[float] = Field( 10, description='Guidance scale for generation control' ) height: Optional[int] = Field( 1080, description='Height of the generated video in pixels' ) negative_prompt: Optional[str] = Field(None, description='Negative prompt text') num_frames: Optional[int] = Field(64, description='Number of frames to generate') seed: Optional[int] = Field( None, description='Random seed for generation (default: random)' ) shift_value: Optional[float] = Field( 3, description='Shift value for generation control' ) steps: Optional[int] = Field(80, description='Number of denoising steps') use_guidance_schedule: Optional[bool] = Field( True, description='Whether to use guidance scheduling' ) use_negative_prompts: Optional[bool] = Field( False, description='Whether to use negative prompts' ) use_timestep_transform: Optional[bool] = Field( True, description='Whether to use timestep transformation' ) warmup_steps: Optional[int] = Field( 0, description='Number of warmup steps (calculated based on num_frames)' ) width: Optional[int] = Field( 1920, description='Width of the generated video in pixels' ) class MoonvalleyTextToVideoRequest(BaseModel): image_url: Optional[str] = None inference_params: Optional[MoonvalleyTextToVideoInferenceParams] = None prompt_text: Optional[str] = None webhook_url: Optional[str] = None class MoonvalleyUploadFileRequest(BaseModel): file: Optional[StrictBytes] = None class MoonvalleyUploadFileResponse(BaseModel): access_url: Optional[str] = None class MoonvalleyVideoToVideoInferenceParams(BaseModel): add_quality_guidance: Optional[bool] = Field( True, description='Whether to add quality guidance' ) caching_coefficient: Optional[float] = Field( 0.3, description='Caching coefficient for optimization' ) caching_cooldown: Optional[int] = Field( 3, description='Number of caching cooldown steps' ) caching_warmup: Optional[int] = Field( 3, description='Number of caching warmup steps' ) clip_value: Optional[float] = Field( 3, description='CLIP value for generation control' ) conditioning_frame_index: Optional[int] = Field( 0, description='Index of the conditioning frame' ) cooldown_steps: Optional[int] = Field( 36, description='Number of cooldown steps (calculated based on num_frames)' ) guidance_scale: Optional[float] = Field( 15, description='Guidance scale for generation control' ) negative_prompt: Optional[str] = Field(None, description='Negative prompt text') seed: Optional[int] = Field( None, description='Random seed for generation (default: random)' ) shift_value: Optional[float] = Field( 3, description='Shift value for generation control' ) steps: Optional[int] = Field(80, description='Number of denoising steps') use_guidance_schedule: Optional[bool] = Field( True, description='Whether to use guidance scheduling' ) use_negative_prompts: Optional[bool] = Field( False, description='Whether to use negative prompts' ) use_timestep_transform: Optional[bool] = Field( True, description='Whether to use timestep transformation' ) warmup_steps: Optional[int] = Field( 24, description='Number of warmup steps (calculated based on num_frames)' ) class ControlType(str, Enum): motion_control = 'motion_control' pose_control = 'pose_control' class MoonvalleyVideoToVideoRequest(BaseModel): control_type: ControlType = Field( ..., description='Supported types for video control' ) inference_params: Optional[MoonvalleyVideoToVideoInferenceParams] = None prompt_text: str = Field(..., description='Describes the video to generate') video_url: str = Field(..., description='Url to control video') webhook_url: Optional[str] = Field( None, description='Optional webhook URL for notifications' )
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/moonvalley.py", "license": "GNU General Public License v3.0", "lines": 135, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/openai.py
from pydantic import BaseModel, Field class Datum2(BaseModel): b64_json: str | None = Field(None, description="Base64 encoded image data") revised_prompt: str | None = Field(None, description="Revised prompt") url: str | None = Field(None, description="URL of the image") class InputTokensDetails(BaseModel): image_tokens: int | None = Field(None) text_tokens: int | None = Field(None) class Usage(BaseModel): input_tokens: int | None = Field(None) input_tokens_details: InputTokensDetails | None = Field(None) output_tokens: int | None = Field(None) total_tokens: int | None = Field(None) class OpenAIImageGenerationResponse(BaseModel): data: list[Datum2] | None = Field(None) usage: Usage | None = Field(None) class OpenAIImageEditRequest(BaseModel): background: str | None = Field(None, description="Background transparency") model: str = Field(...) moderation: str | None = Field(None) n: int | None = Field(None, description="The number of images to generate") output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)") output_format: str | None = Field(None) prompt: str = Field(...) quality: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)") size: str | None = Field(None, description="Size of the output image") class OpenAIImageGenerationRequest(BaseModel): background: str | None = Field(None, description="Background transparency") model: str | None = Field(None) moderation: str | None = Field(None) n: int | None = Field( None, description="The number of images to generate.", ) output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)") output_format: str | None = Field(None) prompt: str = Field(...) quality: str | None = Field(None, description="The quality of the generated image") size: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)") style: str | None = Field(None, description="Style of the image (only for dall-e-3)") class ModelResponseProperties(BaseModel): instructions: str | None = Field(None) max_output_tokens: int | None = Field(None) model: str | None = Field(None) temperature: float | None = Field(1, description="Controls randomness in the response", ge=0.0, le=2.0) top_p: float | None = Field( 1, description="Controls diversity of the response via nucleus sampling", ge=0.0, le=1.0, ) truncation: str | None = Field("disabled", description="Allowed values: 'auto' or 'disabled'") class ResponseProperties(BaseModel): instructions: str | None = Field(None) max_output_tokens: int | None = Field(None) model: str | None = Field(None) previous_response_id: str | None = Field(None) truncation: str | None = Field("disabled", description="Allowed values: 'auto' or 'disabled'") class ResponseError(BaseModel): code: str = Field(...) message: str = Field(...) class OutputTokensDetails(BaseModel): reasoning_tokens: int = Field(..., description="The number of reasoning tokens.") class CachedTokensDetails(BaseModel): cached_tokens: int = Field( ..., description="The number of tokens that were retrieved from the cache.", ) class ResponseUsage(BaseModel): input_tokens: int = Field(..., description="The number of input tokens.") input_tokens_details: CachedTokensDetails = Field(...) output_tokens: int = Field(..., description="The number of output tokens.") output_tokens_details: OutputTokensDetails = Field(...) total_tokens: int = Field(..., description="The total number of tokens used.") class InputTextContent(BaseModel): text: str = Field(..., description="The text input to the model.") type: str = Field("input_text") class OutputContent(BaseModel): type: str = Field(..., description="The type of output content") text: str | None = Field(None, description="The text content") data: str | None = Field(None, description="Base64-encoded audio data") transcript: str | None = Field(None, description="Transcript of the audio") class OutputMessage(BaseModel): type: str = Field(..., description="The type of output item") content: list[OutputContent] | None = Field(None, description="The content of the message") role: str | None = Field(None, description="The role of the message") class OpenAIResponse(ModelResponseProperties, ResponseProperties): created_at: float | None = Field( None, description="Unix timestamp (in seconds) of when this Response was created.", ) error: ResponseError | None = Field(None) id: str | None = Field(None, description="Unique identifier for this Response.") object: str | None = Field(None, description="The object type of this resource - always set to `response`.") output: list[OutputMessage] | None = Field(None) parallel_tool_calls: bool | None = Field(True) status: str | None = Field( None, description="One of `completed`, `failed`, `in_progress`, or `incomplete`.", ) usage: ResponseUsage | None = Field(None) class InputImageContent(BaseModel): detail: str = Field(..., description="One of `high`, `low`, or `auto`. Defaults to `auto`.") file_id: str | None = Field(None) image_url: str | None = Field(None) type: str = Field(..., description="The type of the input item. Always `input_image`.") class InputFileContent(BaseModel): file_data: str | None = Field(None) file_id: str | None = Field(None) filename: str | None = Field(None, description="The name of the file to be sent to the model.") type: str = Field(..., description="The type of the input item. Always `input_file`.") class InputMessage(BaseModel): content: list[InputTextContent | InputImageContent | InputFileContent] = Field( ..., description="A list of one or many input items to the model, containing different content types.", ) role: str | None = Field(None) type: str | None = Field(None) class OpenAICreateResponse(ModelResponseProperties, ResponseProperties): include: str | None = Field(None) input: list[InputMessage] = Field(...) parallel_tool_calls: bool | None = Field( True, description="Whether to allow the model to run tool calls in parallel." ) store: bool | None = Field( True, description="Whether to store the generated model response for later retrieval via API.", ) stream: bool | None = Field(False) usage: ResponseUsage | None = Field(None)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/openai.py", "license": "GNU General Public License v3.0", "lines": 130, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/runway.py
from enum import Enum from typing import Optional, List, Union from datetime import datetime from pydantic import BaseModel, Field, RootModel class RunwayAspectRatioEnum(str, Enum): field_1280_720 = '1280:720' field_720_1280 = '720:1280' field_1104_832 = '1104:832' field_832_1104 = '832:1104' field_960_960 = '960:960' field_1584_672 = '1584:672' field_1280_768 = '1280:768' field_768_1280 = '768:1280' class Position(str, Enum): first = 'first' last = 'last' class RunwayPromptImageDetailedObject(BaseModel): position: Position = Field( ..., description="The position of the image in the output video. 'last' is currently supported for gen3a_turbo only.", ) uri: str = Field( ..., description='A HTTPS URL or data URI containing an encoded image.' ) class RunwayPromptImageObject( RootModel[Union[str, List[RunwayPromptImageDetailedObject]]] ): root: Union[str, List[RunwayPromptImageDetailedObject]] = Field( ..., description='Image(s) to use for the video generation. Can be a single URI or an array of image objects with positions.', ) class RunwayModelEnum(str, Enum): gen4_turbo = 'gen4_turbo' gen3a_turbo = 'gen3a_turbo' class RunwayDurationEnum(int, Enum): integer_5 = 5 integer_10 = 10 class RunwayImageToVideoRequest(BaseModel): duration: RunwayDurationEnum model: RunwayModelEnum promptImage: RunwayPromptImageObject promptText: Optional[str] = Field( None, description='Text prompt for the generation', max_length=1000 ) ratio: RunwayAspectRatioEnum seed: int = Field( ..., description='Random seed for generation', ge=0, le=4294967295 ) class RunwayImageToVideoResponse(BaseModel): id: Optional[str] = Field(None, description='Task ID') class RunwayTaskStatusEnum(str, Enum): SUCCEEDED = 'SUCCEEDED' RUNNING = 'RUNNING' FAILED = 'FAILED' PENDING = 'PENDING' CANCELLED = 'CANCELLED' THROTTLED = 'THROTTLED' class RunwayTaskStatusResponse(BaseModel): createdAt: datetime = Field(..., description='Task creation timestamp') id: str = Field(..., description='Task ID') output: Optional[List[str]] = Field(None, description='Array of output video URLs') progress: Optional[float] = Field( None, description='Float value between 0 and 1 representing the progress of the task. Only available if status is RUNNING.', ge=0.0, le=1.0, ) status: RunwayTaskStatusEnum class Model4(str, Enum): gen4_image = 'gen4_image' class ReferenceImage(BaseModel): uri: Optional[str] = Field( None, description='A HTTPS URL or data URI containing an encoded image' ) class RunwayTextToImageAspectRatioEnum(str, Enum): field_1920_1080 = '1920:1080' field_1080_1920 = '1080:1920' field_1024_1024 = '1024:1024' field_1360_768 = '1360:768' field_1080_1080 = '1080:1080' field_1168_880 = '1168:880' field_1440_1080 = '1440:1080' field_1080_1440 = '1080:1440' field_1808_768 = '1808:768' field_2112_912 = '2112:912' class RunwayTextToImageRequest(BaseModel): model: Model4 = Field(..., description='Model to use for generation') promptText: str = Field( ..., description='Text prompt for the image generation', max_length=1000 ) ratio: RunwayTextToImageAspectRatioEnum referenceImages: Optional[List[ReferenceImage]] = Field( None, description='Array of reference images to guide the generation' ) class RunwayTextToImageResponse(BaseModel): id: Optional[str] = Field(None, description='Task ID')
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/runway.py", "license": "GNU General Public License v3.0", "lines": 96, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/apis/meshy.py
from typing import TypedDict from pydantic import BaseModel, Field from comfy_api.latest import Input class InputShouldRemesh(TypedDict): should_remesh: str topology: str target_polycount: int class InputShouldTexture(TypedDict): should_texture: str enable_pbr: bool texture_prompt: str texture_image: Input.Image | None class MeshyTaskResponse(BaseModel): result: str = Field(...) class MeshyTextToModelRequest(BaseModel): mode: str = Field("preview") prompt: str = Field(..., max_length=600) art_style: str = Field(..., description="'realistic' or 'sculpture'") ai_model: str = Field(...) topology: str | None = Field(..., description="'quad' or 'triangle'") target_polycount: int | None = Field(..., ge=100, le=300000) should_remesh: bool = Field( True, description="False returns the original mesh, ignoring topology and polycount.", ) symmetry_mode: str = Field(..., description="'auto', 'off' or 'on'") pose_mode: str = Field(...) seed: int = Field(...) moderation: bool = Field(False) class MeshyRefineTask(BaseModel): mode: str = Field("refine") preview_task_id: str = Field(...) enable_pbr: bool | None = Field(...) texture_prompt: str | None = Field(...) texture_image_url: str | None = Field(...) ai_model: str = Field(...) moderation: bool = Field(False) class MeshyImageToModelRequest(BaseModel): image_url: str = Field(...) ai_model: str = Field(...) topology: str | None = Field(..., description="'quad' or 'triangle'") target_polycount: int | None = Field(..., ge=100, le=300000) symmetry_mode: str = Field(..., description="'auto', 'off' or 'on'") should_remesh: bool = Field( True, description="False returns the original mesh, ignoring topology and polycount.", ) should_texture: bool = Field(...) enable_pbr: bool | None = Field(...) pose_mode: str = Field(...) texture_prompt: str | None = Field(None, max_length=600) texture_image_url: str | None = Field(None) seed: int = Field(...) moderation: bool = Field(False) class MeshyMultiImageToModelRequest(BaseModel): image_urls: list[str] = Field(...) ai_model: str = Field(...) topology: str | None = Field(..., description="'quad' or 'triangle'") target_polycount: int | None = Field(..., ge=100, le=300000) symmetry_mode: str = Field(..., description="'auto', 'off' or 'on'") should_remesh: bool = Field( True, description="False returns the original mesh, ignoring topology and polycount.", ) should_texture: bool = Field(...) enable_pbr: bool | None = Field(...) pose_mode: str = Field(...) texture_prompt: str | None = Field(None, max_length=600) texture_image_url: str | None = Field(None) seed: int = Field(...) moderation: bool = Field(False) class MeshyRiggingRequest(BaseModel): input_task_id: str = Field(...) height_meters: float = Field(...) texture_image_url: str | None = Field(...) class MeshyAnimationRequest(BaseModel): rig_task_id: str = Field(...) action_id: int = Field(...) class MeshyTextureRequest(BaseModel): input_task_id: str = Field(...) ai_model: str = Field(...) enable_original_uv: bool = Field(...) enable_pbr: bool = Field(...) text_style_prompt: str | None = Field(...) image_style_url: str | None = Field(...) class MeshyModelsUrls(BaseModel): glb: str = Field("") fbx: str = Field("") usdz: str = Field("") obj: str = Field("") class MeshyRiggedModelsUrls(BaseModel): rigged_character_glb_url: str = Field("") rigged_character_fbx_url: str = Field("") class MeshyAnimatedModelsUrls(BaseModel): animation_glb_url: str = Field("") animation_fbx_url: str = Field("") class MeshyResultTextureUrls(BaseModel): base_color: str = Field(...) metallic: str | None = Field(None) normal: str | None = Field(None) roughness: str | None = Field(None) class MeshyTaskError(BaseModel): message: str | None = Field(None) class MeshyModelResult(BaseModel): id: str = Field(...) type: str = Field(...) model_urls: MeshyModelsUrls = Field(MeshyModelsUrls()) thumbnail_url: str = Field(...) video_url: str | None = Field(None) status: str = Field(...) progress: int = Field(0) texture_urls: list[MeshyResultTextureUrls] | None = Field([]) task_error: MeshyTaskError | None = Field(None) class MeshyRiggedResult(BaseModel): id: str = Field(...) type: str = Field(...) status: str = Field(...) progress: int = Field(0) result: MeshyRiggedModelsUrls = Field(MeshyRiggedModelsUrls()) task_error: MeshyTaskError | None = Field(None) class MeshyAnimationResult(BaseModel): id: str = Field(...) type: str = Field(...) status: str = Field(...) progress: int = Field(0) result: MeshyAnimatedModelsUrls = Field(MeshyAnimatedModelsUrls()) task_error: MeshyTaskError | None = Field(None)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/meshy.py", "license": "GNU General Public License v3.0", "lines": 127, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_meshy.py
from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.meshy import ( InputShouldRemesh, InputShouldTexture, MeshyAnimationRequest, MeshyAnimationResult, MeshyImageToModelRequest, MeshyModelResult, MeshyMultiImageToModelRequest, MeshyRefineTask, MeshyRiggedResult, MeshyRiggingRequest, MeshyTaskResponse, MeshyTextToModelRequest, MeshyTextureRequest, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_file_3d, poll_op, sync_op, upload_images_to_comfyapi, validate_string, ) class MeshyTextToModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyTextToModelNode", display_name="Meshy: Text to Model", category="api node/3d/Meshy", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.String.Input("prompt", multiline=True, default=""), IO.Combo.Input("style", options=["realistic", "sculpture"]), IO.DynamicCombo.Input( "should_remesh", options=[ IO.DynamicCombo.Option( "true", [ IO.Combo.Input("topology", options=["triangle", "quad"]), IO.Int.Input( "target_polycount", default=300000, min=100, max=300000, display_mode=IO.NumberDisplay.number, ), ], ), IO.DynamicCombo.Option("false", []), ], tooltip="When set to false, returns an unprocessed triangular mesh.", ), IO.Combo.Input("symmetry_mode", options=["auto", "on", "off"], advanced=True), IO.Combo.Input( "pose_mode", options=["", "A-pose", "T-pose"], tooltip="Specify the pose mode for the generated model.", advanced=True, ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"), IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.8}""", ), ) @classmethod async def execute( cls, model: str, prompt: str, style: str, should_remesh: InputShouldRemesh, symmetry_mode: str, pose_mode: str, seed: int, ) -> IO.NodeOutput: validate_string(prompt, field_name="prompt", min_length=1, max_length=600) response = await sync_op( cls, ApiEndpoint(path="/proxy/meshy/openapi/v2/text-to-3d", method="POST"), response_model=MeshyTaskResponse, data=MeshyTextToModelRequest( prompt=prompt, art_style=style, ai_model=model, topology=should_remesh.get("topology", None), target_polycount=should_remesh.get("target_polycount", None), should_remesh=should_remesh["should_remesh"] == "true", symmetry_mode=symmetry_mode, pose_mode=pose_mode.lower(), seed=seed, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v2/text-to-3d/{task_id}"), response_model=MeshyModelResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", task_id, await download_url_to_file_3d(result.model_urls.glb, "glb", task_id=task_id), await download_url_to_file_3d(result.model_urls.fbx, "fbx", task_id=task_id), ) class MeshyRefineNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyRefineNode", display_name="Meshy: Refine Draft Model", category="api node/3d/Meshy", description="Refine a previously created draft model.", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Boolean.Input( "enable_pbr", default=False, tooltip="Generate PBR Maps (metallic, roughness, normal) in addition to the base color. " "Note: this should be set to false when using Sculpture style, " "as Sculpture style generates its own set of PBR maps.", advanced=True, ), IO.String.Input( "texture_prompt", default="", multiline=True, tooltip="Provide a text prompt to guide the texturing process. " "Maximum 600 characters. Cannot be used at the same time as 'texture_image'.", ), IO.Image.Input( "texture_image", tooltip="Only one of 'texture_image' or 'texture_prompt' may be used at the same time.", optional=True, ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"), IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.4}""", ), ) @classmethod async def execute( cls, model: str, meshy_task_id: str, enable_pbr: bool, texture_prompt: str, texture_image: Input.Image | None = None, ) -> IO.NodeOutput: if texture_prompt and texture_image is not None: raise ValueError("texture_prompt and texture_image cannot be used at the same time") texture_image_url = None if texture_prompt: validate_string(texture_prompt, field_name="texture_prompt", max_length=600) if texture_image is not None: texture_image_url = (await upload_images_to_comfyapi(cls, texture_image, wait_label="Uploading texture"))[0] response = await sync_op( cls, endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v2/text-to-3d", method="POST"), response_model=MeshyTaskResponse, data=MeshyRefineTask( preview_task_id=meshy_task_id, enable_pbr=enable_pbr, texture_prompt=texture_prompt if texture_prompt else None, texture_image_url=texture_image_url, ai_model=model, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v2/text-to-3d/{task_id}"), response_model=MeshyModelResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", task_id, await download_url_to_file_3d(result.model_urls.glb, "glb", task_id=task_id), await download_url_to_file_3d(result.model_urls.fbx, "fbx", task_id=task_id), ) class MeshyImageToModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyImageToModelNode", display_name="Meshy: Image to Model", category="api node/3d/Meshy", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Image.Input("image"), IO.DynamicCombo.Input( "should_remesh", options=[ IO.DynamicCombo.Option( "true", [ IO.Combo.Input("topology", options=["triangle", "quad"]), IO.Int.Input( "target_polycount", default=300000, min=100, max=300000, display_mode=IO.NumberDisplay.number, ), ], ), IO.DynamicCombo.Option("false", []), ], tooltip="When set to false, returns an unprocessed triangular mesh.", ), IO.Combo.Input("symmetry_mode", options=["auto", "on", "off"]), IO.DynamicCombo.Input( "should_texture", options=[ IO.DynamicCombo.Option( "true", [ IO.Boolean.Input( "enable_pbr", default=False, tooltip="Generate PBR Maps (metallic, roughness, normal) " "in addition to the base color.", ), IO.String.Input( "texture_prompt", default="", multiline=True, tooltip="Provide a text prompt to guide the texturing process. " "Maximum 600 characters. Cannot be used at the same time as 'texture_image'.", ), IO.Image.Input( "texture_image", tooltip="Only one of 'texture_image' or 'texture_prompt' " "may be used at the same time.", optional=True, ), ], ), IO.DynamicCombo.Option("false", []), ], tooltip="Determines whether textures are generated. " "Setting it to false skips the texture phase and returns a mesh without textures.", ), IO.Combo.Input( "pose_mode", options=["", "A-pose", "T-pose"], tooltip="Specify the pose mode for the generated model.", advanced=True, ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"), IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["should_texture"]), expr=""" ( $prices := {"true": 1.2, "false": 0.8}; {"type":"usd","usd": $lookup($prices, widgets.should_texture)} ) """, ), ) @classmethod async def execute( cls, model: str, image: Input.Image, should_remesh: InputShouldRemesh, symmetry_mode: str, should_texture: InputShouldTexture, pose_mode: str, seed: int, ) -> IO.NodeOutput: texture = should_texture["should_texture"] == "true" texture_image_url = texture_prompt = None if texture: if should_texture["texture_prompt"] and should_texture["texture_image"] is not None: raise ValueError("texture_prompt and texture_image cannot be used at the same time") if should_texture["texture_prompt"]: validate_string(should_texture["texture_prompt"], field_name="texture_prompt", max_length=600) texture_prompt = should_texture["texture_prompt"] if should_texture["texture_image"] is not None: texture_image_url = ( await upload_images_to_comfyapi( cls, should_texture["texture_image"], wait_label="Uploading texture" ) )[0] response = await sync_op( cls, ApiEndpoint(path="/proxy/meshy/openapi/v1/image-to-3d", method="POST"), response_model=MeshyTaskResponse, data=MeshyImageToModelRequest( image_url=(await upload_images_to_comfyapi(cls, image, wait_label="Uploading base image"))[0], ai_model=model, topology=should_remesh.get("topology", None), target_polycount=should_remesh.get("target_polycount", None), symmetry_mode=symmetry_mode, should_remesh=should_remesh["should_remesh"] == "true", should_texture=texture, enable_pbr=should_texture.get("enable_pbr", None), pose_mode=pose_mode.lower(), texture_prompt=texture_prompt, texture_image_url=texture_image_url, seed=seed, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v1/image-to-3d/{task_id}"), response_model=MeshyModelResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", task_id, await download_url_to_file_3d(result.model_urls.glb, "glb", task_id=task_id), await download_url_to_file_3d(result.model_urls.fbx, "fbx", task_id=task_id), ) class MeshyMultiImageToModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyMultiImageToModelNode", display_name="Meshy: Multi-Image to Model", category="api node/3d/Meshy", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Autogrow.Input( "images", template=IO.Autogrow.TemplatePrefix(IO.Image.Input("image"), prefix="image", min=2, max=4), ), IO.DynamicCombo.Input( "should_remesh", options=[ IO.DynamicCombo.Option( "true", [ IO.Combo.Input("topology", options=["triangle", "quad"]), IO.Int.Input( "target_polycount", default=300000, min=100, max=300000, display_mode=IO.NumberDisplay.number, ), ], ), IO.DynamicCombo.Option("false", []), ], tooltip="When set to false, returns an unprocessed triangular mesh.", ), IO.Combo.Input("symmetry_mode", options=["auto", "on", "off"], advanced=True), IO.DynamicCombo.Input( "should_texture", options=[ IO.DynamicCombo.Option( "true", [ IO.Boolean.Input( "enable_pbr", default=False, tooltip="Generate PBR Maps (metallic, roughness, normal) " "in addition to the base color.", ), IO.String.Input( "texture_prompt", default="", multiline=True, tooltip="Provide a text prompt to guide the texturing process. " "Maximum 600 characters. Cannot be used at the same time as 'texture_image'.", ), IO.Image.Input( "texture_image", tooltip="Only one of 'texture_image' or 'texture_prompt' " "may be used at the same time.", optional=True, ), ], ), IO.DynamicCombo.Option("false", []), ], tooltip="Determines whether textures are generated. " "Setting it to false skips the texture phase and returns a mesh without textures.", ), IO.Combo.Input( "pose_mode", options=["", "A-pose", "T-pose"], tooltip="Specify the pose mode for the generated model.", advanced=True, ), IO.Int.Input( "seed", default=0, min=0, max=2147483647, display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed controls whether the node should re-run; " "results are non-deterministic regardless of seed.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"), IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["should_texture"]), expr=""" ( $prices := {"true": 0.6, "false": 0.2}; {"type":"usd","usd": $lookup($prices, widgets.should_texture)} ) """, ), ) @classmethod async def execute( cls, model: str, images: IO.Autogrow.Type, should_remesh: InputShouldRemesh, symmetry_mode: str, should_texture: InputShouldTexture, pose_mode: str, seed: int, ) -> IO.NodeOutput: texture = should_texture["should_texture"] == "true" texture_image_url = texture_prompt = None if texture: if should_texture["texture_prompt"] and should_texture["texture_image"] is not None: raise ValueError("texture_prompt and texture_image cannot be used at the same time") if should_texture["texture_prompt"]: validate_string(should_texture["texture_prompt"], field_name="texture_prompt", max_length=600) texture_prompt = should_texture["texture_prompt"] if should_texture["texture_image"] is not None: texture_image_url = ( await upload_images_to_comfyapi( cls, should_texture["texture_image"], wait_label="Uploading texture" ) )[0] response = await sync_op( cls, ApiEndpoint(path="/proxy/meshy/openapi/v1/multi-image-to-3d", method="POST"), response_model=MeshyTaskResponse, data=MeshyMultiImageToModelRequest( image_urls=await upload_images_to_comfyapi( cls, list(images.values()), wait_label="Uploading base images" ), ai_model=model, topology=should_remesh.get("topology", None), target_polycount=should_remesh.get("target_polycount", None), symmetry_mode=symmetry_mode, should_remesh=should_remesh["should_remesh"] == "true", should_texture=texture, enable_pbr=should_texture.get("enable_pbr", None), pose_mode=pose_mode.lower(), texture_prompt=texture_prompt, texture_image_url=texture_image_url, seed=seed, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v1/multi-image-to-3d/{task_id}"), response_model=MeshyModelResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", task_id, await download_url_to_file_3d(result.model_urls.glb, "glb", task_id=task_id), await download_url_to_file_3d(result.model_urls.fbx, "fbx", task_id=task_id), ) class MeshyRigModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyRigModelNode", display_name="Meshy: Rig Model", category="api node/3d/Meshy", description="Provides a rigged character in standard formats. " "Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, " "or humanoid assets with unclear limb and body structure.", inputs=[ IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Float.Input( "height_meters", min=0.1, max=15.0, default=1.7, tooltip="The approximate height of the character model in meters. " "This aids in scaling and rigging accuracy.", ), IO.Image.Input( "texture_image", tooltip="The model's UV-unwrapped base color texture image.", optional=True, ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.Custom("MESHY_RIGGED_TASK_ID").Output(display_name="rig_task_id"), IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.2}""", ), ) @classmethod async def execute( cls, meshy_task_id: str, height_meters: float, texture_image: Input.Image | None = None, ) -> IO.NodeOutput: texture_image_url = None if texture_image is not None: texture_image_url = (await upload_images_to_comfyapi(cls, texture_image, wait_label="Uploading texture"))[0] response = await sync_op( cls, endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v1/rigging", method="POST"), response_model=MeshyTaskResponse, data=MeshyRiggingRequest( input_task_id=meshy_task_id, height_meters=height_meters, texture_image_url=texture_image_url, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v1/rigging/{task_id}"), response_model=MeshyRiggedResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", task_id, await download_url_to_file_3d(result.result.rigged_character_glb_url, "glb", task_id=task_id), await download_url_to_file_3d(result.result.rigged_character_fbx_url, "fbx", task_id=task_id), ) class MeshyAnimateModelNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyAnimateModelNode", display_name="Meshy: Animate Model", category="api node/3d/Meshy", description="Apply a specific animation action to a previously rigged character.", inputs=[ IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"), IO.Int.Input( "action_id", default=0, min=0, max=696, tooltip="Visit https://docs.meshy.ai/en/api/animation-library for a list of available values.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.12}""", ), ) @classmethod async def execute( cls, rig_task_id: str, action_id: int, ) -> IO.NodeOutput: response = await sync_op( cls, endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v1/animations", method="POST"), response_model=MeshyTaskResponse, data=MeshyAnimationRequest( rig_task_id=rig_task_id, action_id=action_id, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v1/animations/{task_id}"), response_model=MeshyAnimationResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", await download_url_to_file_3d(result.result.animation_glb_url, "glb", task_id=task_id), await download_url_to_file_3d(result.result.animation_fbx_url, "fbx", task_id=task_id), ) class MeshyTextureNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="MeshyTextureNode", display_name="Meshy: Texture Model", category="api node/3d/Meshy", inputs=[ IO.Combo.Input("model", options=["latest"]), IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"), IO.Boolean.Input( "enable_original_uv", default=True, tooltip="Use the original UV of the model instead of generating new UVs. " "When enabled, Meshy preserves existing textures from the uploaded model. " "If the model has no original UV, the quality of the output might not be as good.", advanced=True, ), IO.Boolean.Input("pbr", default=False, advanced=True), IO.String.Input( "text_style_prompt", default="", multiline=True, tooltip="Describe your desired texture style of the object using text. Maximum 600 characters." "Maximum 600 characters. Cannot be used at the same time as 'image_style'.", ), IO.Image.Input( "image_style", optional=True, tooltip="A 2d image to guide the texturing process. " "Can not be used at the same time with 'text_style_prompt'.", ), ], outputs=[ IO.String.Output(display_name="model_file"), # for backward compatibility only IO.Custom("MODEL_TASK_ID").Output(display_name="meshy_task_id"), IO.File3DGLB.Output(display_name="GLB"), IO.File3DFBX.Output(display_name="FBX"), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, is_output_node=True, price_badge=IO.PriceBadge( expr="""{"type":"usd","usd":0.4}""", ), ) @classmethod async def execute( cls, model: str, meshy_task_id: str, enable_original_uv: bool, pbr: bool, text_style_prompt: str, image_style: Input.Image | None = None, ) -> IO.NodeOutput: if text_style_prompt and image_style is not None: raise ValueError("text_style_prompt and image_style cannot be used at the same time") if not text_style_prompt and image_style is None: raise ValueError("Either text_style_prompt or image_style is required") image_style_url = None if image_style is not None: image_style_url = (await upload_images_to_comfyapi(cls, image_style, wait_label="Uploading style"))[0] response = await sync_op( cls, endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v1/retexture", method="POST"), response_model=MeshyTaskResponse, data=MeshyTextureRequest( input_task_id=meshy_task_id, ai_model=model, enable_original_uv=enable_original_uv, enable_pbr=pbr, text_style_prompt=text_style_prompt if text_style_prompt else None, image_style_url=image_style_url, ), ) task_id = response.result result = await poll_op( cls, ApiEndpoint(path=f"/proxy/meshy/openapi/v1/retexture/{task_id}"), response_model=MeshyModelResult, status_extractor=lambda r: r.status, progress_extractor=lambda r: r.progress, ) return IO.NodeOutput( f"{task_id}.glb", task_id, await download_url_to_file_3d(result.model_urls.glb, "glb", task_id=task_id), await download_url_to_file_3d(result.model_urls.fbx, "fbx", task_id=task_id), ) class MeshyExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ MeshyTextToModelNode, MeshyRefineNode, MeshyImageToModelNode, MeshyMultiImageToModelNode, MeshyRigModelNode, MeshyAnimateModelNode, MeshyTextureNode, ] async def comfy_entrypoint() -> MeshyExtension: return MeshyExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_meshy.py", "license": "GNU General Public License v3.0", "lines": 798, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/apis/vidu.py
from pydantic import BaseModel, Field class SubjectReference(BaseModel): id: str = Field(...) images: list[str] = Field(...) class FrameSetting(BaseModel): prompt: str = Field(...) key_image: str = Field(...) duration: int = Field(...) class TaskMultiFrameCreationRequest(BaseModel): model: str = Field(...) seed: int = Field(..., ge=0, le=2147483647) resolution: str = Field(...) start_image: str = Field(...) image_settings: list[FrameSetting] = Field(...) class TaskExtendCreationRequest(BaseModel): model: str = Field(...) prompt: str = Field(..., max_length=2000) duration: int = Field(...) seed: int = Field(..., ge=0, le=2147483647) resolution: str = Field(...) images: list[str] | None = Field(None, description="Base64 encoded string or image URL") video_url: str = Field(..., description="URL of the video to extend") class TaskCreationRequest(BaseModel): model: str = Field(...) prompt: str = Field(..., max_length=2000) duration: int = Field(...) seed: int = Field(..., ge=0, le=2147483647) aspect_ratio: str | None = Field(None) resolution: str | None = Field(None) movement_amplitude: str | None = Field(None) images: list[str] | None = Field(None, description="Base64 encoded string or image URL") subjects: list[SubjectReference] | None = Field(None) bgm: bool | None = Field(None) audio: bool | None = Field(None) class TaskCreationResponse(BaseModel): task_id: str = Field(...) state: str = Field(...) created_at: str = Field(...) code: int | None = Field(None, description="Error code") class TaskResult(BaseModel): id: str = Field(..., description="Creation id") url: str = Field(..., description="The URL of the generated results, valid for one hour") cover_url: str = Field(..., description="The cover URL of the generated results, valid for one hour") class TaskStatusResponse(BaseModel): state: str = Field(...) err_code: str | None = Field(None) progress: float | None = Field(None) credits: int | None = Field(None) creations: list[TaskResult] = Field(..., description="Generated results")
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/apis/vidu.py", "license": "GNU General Public License v3.0", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_extras/nodes_image_compare.py
import nodes from typing_extensions import override from comfy_api.latest import IO, ComfyExtension class ImageCompare(IO.ComfyNode): """Compares two images with a slider interface.""" @classmethod def define_schema(cls): return IO.Schema( node_id="ImageCompare", display_name="Image Compare", description="Compares two images side by side with a slider.", category="image", is_experimental=True, is_output_node=True, inputs=[ IO.Image.Input("image_a", optional=True), IO.Image.Input("image_b", optional=True), IO.ImageCompare.Input("compare_view"), ], outputs=[], ) @classmethod def execute(cls, image_a=None, image_b=None, compare_view=None) -> IO.NodeOutput: result = {"a_images": [], "b_images": []} preview_node = nodes.PreviewImage() if image_a is not None and len(image_a) > 0: saved = preview_node.save_images(image_a, "comfy.compare.a") result["a_images"] = saved["ui"]["images"] if image_b is not None and len(image_b) > 0: saved = preview_node.save_images(image_b, "comfy.compare.b") result["b_images"] = saved["ui"]["images"] return IO.NodeOutput(ui=result) class ImageCompareExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ ImageCompare, ] async def comfy_entrypoint() -> ImageCompareExtension: return ImageCompareExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_image_compare.py", "license": "GNU General Public License v3.0", "lines": 40, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:alembic_db/versions/0001_assets.py
""" Initial assets schema Revision ID: 0001_assets Revises: None Create Date: 2025-12-10 00:00:00 """ from alembic import op import sqlalchemy as sa revision = "0001_assets" down_revision = None branch_labels = None depends_on = None def upgrade() -> None: # ASSETS: content identity op.create_table( "assets", sa.Column("id", sa.String(length=36), primary_key=True), sa.Column("hash", sa.String(length=256), nullable=True), sa.Column("size_bytes", sa.BigInteger(), nullable=False, server_default="0"), sa.Column("mime_type", sa.String(length=255), nullable=True), sa.Column("created_at", sa.DateTime(timezone=False), nullable=False), sa.CheckConstraint("size_bytes >= 0", name="ck_assets_size_nonneg"), ) op.create_index("uq_assets_hash", "assets", ["hash"], unique=True) op.create_index("ix_assets_mime_type", "assets", ["mime_type"]) # ASSETS_INFO: user-visible references op.create_table( "assets_info", sa.Column("id", sa.String(length=36), primary_key=True), sa.Column("owner_id", sa.String(length=128), nullable=False, server_default=""), sa.Column("name", sa.String(length=512), nullable=False), sa.Column("asset_id", sa.String(length=36), sa.ForeignKey("assets.id", ondelete="RESTRICT"), nullable=False), sa.Column("preview_id", sa.String(length=36), sa.ForeignKey("assets.id", ondelete="SET NULL"), nullable=True), sa.Column("user_metadata", sa.JSON(), nullable=True), sa.Column("created_at", sa.DateTime(timezone=False), nullable=False), sa.Column("updated_at", sa.DateTime(timezone=False), nullable=False), sa.Column("last_access_time", sa.DateTime(timezone=False), nullable=False), sa.UniqueConstraint("asset_id", "owner_id", "name", name="uq_assets_info_asset_owner_name"), ) op.create_index("ix_assets_info_owner_id", "assets_info", ["owner_id"]) op.create_index("ix_assets_info_asset_id", "assets_info", ["asset_id"]) op.create_index("ix_assets_info_name", "assets_info", ["name"]) op.create_index("ix_assets_info_created_at", "assets_info", ["created_at"]) op.create_index("ix_assets_info_last_access_time", "assets_info", ["last_access_time"]) op.create_index("ix_assets_info_owner_name", "assets_info", ["owner_id", "name"]) # TAGS: normalized tag vocabulary op.create_table( "tags", sa.Column("name", sa.String(length=512), primary_key=True), sa.Column("tag_type", sa.String(length=32), nullable=False, server_default="user"), sa.CheckConstraint("name = lower(name)", name="ck_tags_lowercase"), ) op.create_index("ix_tags_tag_type", "tags", ["tag_type"]) # ASSET_INFO_TAGS: many-to-many for tags on AssetInfo op.create_table( "asset_info_tags", sa.Column("asset_info_id", sa.String(length=36), sa.ForeignKey("assets_info.id", ondelete="CASCADE"), nullable=False), sa.Column("tag_name", sa.String(length=512), sa.ForeignKey("tags.name", ondelete="RESTRICT"), nullable=False), sa.Column("origin", sa.String(length=32), nullable=False, server_default="manual"), sa.Column("added_at", sa.DateTime(timezone=False), nullable=False), sa.PrimaryKeyConstraint("asset_info_id", "tag_name", name="pk_asset_info_tags"), ) op.create_index("ix_asset_info_tags_tag_name", "asset_info_tags", ["tag_name"]) op.create_index("ix_asset_info_tags_asset_info_id", "asset_info_tags", ["asset_info_id"]) # ASSET_CACHE_STATE: N:1 local cache rows per Asset op.create_table( "asset_cache_state", sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), sa.Column("asset_id", sa.String(length=36), sa.ForeignKey("assets.id", ondelete="CASCADE"), nullable=False), sa.Column("file_path", sa.Text(), nullable=False), # absolute local path to cached file sa.Column("mtime_ns", sa.BigInteger(), nullable=True), sa.Column("needs_verify", sa.Boolean(), nullable=False, server_default=sa.text("false")), sa.CheckConstraint("(mtime_ns IS NULL) OR (mtime_ns >= 0)", name="ck_acs_mtime_nonneg"), sa.UniqueConstraint("file_path", name="uq_asset_cache_state_file_path"), ) op.create_index("ix_asset_cache_state_file_path", "asset_cache_state", ["file_path"]) op.create_index("ix_asset_cache_state_asset_id", "asset_cache_state", ["asset_id"]) # ASSET_INFO_META: typed KV projection of user_metadata for filtering/sorting op.create_table( "asset_info_meta", sa.Column("asset_info_id", sa.String(length=36), sa.ForeignKey("assets_info.id", ondelete="CASCADE"), nullable=False), sa.Column("key", sa.String(length=256), nullable=False), sa.Column("ordinal", sa.Integer(), nullable=False, server_default="0"), sa.Column("val_str", sa.String(length=2048), nullable=True), sa.Column("val_num", sa.Numeric(38, 10), nullable=True), sa.Column("val_bool", sa.Boolean(), nullable=True), sa.Column("val_json", sa.JSON(), nullable=True), sa.PrimaryKeyConstraint("asset_info_id", "key", "ordinal", name="pk_asset_info_meta"), ) op.create_index("ix_asset_info_meta_key", "asset_info_meta", ["key"]) op.create_index("ix_asset_info_meta_key_val_str", "asset_info_meta", ["key", "val_str"]) op.create_index("ix_asset_info_meta_key_val_num", "asset_info_meta", ["key", "val_num"]) op.create_index("ix_asset_info_meta_key_val_bool", "asset_info_meta", ["key", "val_bool"]) # Tags vocabulary tags_table = sa.table( "tags", sa.column("name", sa.String(length=512)), sa.column("tag_type", sa.String()), ) op.bulk_insert( tags_table, [ {"name": "models", "tag_type": "system"}, {"name": "input", "tag_type": "system"}, {"name": "output", "tag_type": "system"}, {"name": "configs", "tag_type": "system"}, {"name": "checkpoints", "tag_type": "system"}, {"name": "loras", "tag_type": "system"}, {"name": "vae", "tag_type": "system"}, {"name": "text_encoders", "tag_type": "system"}, {"name": "diffusion_models", "tag_type": "system"}, {"name": "clip_vision", "tag_type": "system"}, {"name": "style_models", "tag_type": "system"}, {"name": "embeddings", "tag_type": "system"}, {"name": "diffusers", "tag_type": "system"}, {"name": "vae_approx", "tag_type": "system"}, {"name": "controlnet", "tag_type": "system"}, {"name": "gligen", "tag_type": "system"}, {"name": "upscale_models", "tag_type": "system"}, {"name": "hypernetworks", "tag_type": "system"}, {"name": "photomaker", "tag_type": "system"}, {"name": "classifiers", "tag_type": "system"}, {"name": "encoder", "tag_type": "system"}, {"name": "decoder", "tag_type": "system"}, {"name": "missing", "tag_type": "system"}, {"name": "rescan", "tag_type": "system"}, ], ) def downgrade() -> None: op.drop_index("ix_asset_info_meta_key_val_bool", table_name="asset_info_meta") op.drop_index("ix_asset_info_meta_key_val_num", table_name="asset_info_meta") op.drop_index("ix_asset_info_meta_key_val_str", table_name="asset_info_meta") op.drop_index("ix_asset_info_meta_key", table_name="asset_info_meta") op.drop_table("asset_info_meta") op.drop_index("ix_asset_cache_state_asset_id", table_name="asset_cache_state") op.drop_index("ix_asset_cache_state_file_path", table_name="asset_cache_state") op.drop_constraint("uq_asset_cache_state_file_path", table_name="asset_cache_state") op.drop_table("asset_cache_state") op.drop_index("ix_asset_info_tags_asset_info_id", table_name="asset_info_tags") op.drop_index("ix_asset_info_tags_tag_name", table_name="asset_info_tags") op.drop_table("asset_info_tags") op.drop_index("ix_tags_tag_type", table_name="tags") op.drop_table("tags") op.drop_constraint("uq_assets_info_asset_owner_name", table_name="assets_info") op.drop_index("ix_assets_info_owner_name", table_name="assets_info") op.drop_index("ix_assets_info_last_access_time", table_name="assets_info") op.drop_index("ix_assets_info_created_at", table_name="assets_info") op.drop_index("ix_assets_info_name", table_name="assets_info") op.drop_index("ix_assets_info_asset_id", table_name="assets_info") op.drop_index("ix_assets_info_owner_id", table_name="assets_info") op.drop_table("assets_info") op.drop_index("uq_assets_hash", table_name="assets") op.drop_index("ix_assets_mime_type", table_name="assets") op.drop_table("assets")
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "alembic_db/versions/0001_assets.py", "license": "GNU General Public License v3.0", "lines": 154, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:app/assets/api/routes.py
import logging import uuid import urllib.parse import os import contextlib from aiohttp import web from pydantic import ValidationError import app.assets.manager as manager from app import user_manager from app.assets.api import schemas_in from app.assets.helpers import get_query_dict from app.assets.scanner import seed_assets import folder_paths ROUTES = web.RouteTableDef() USER_MANAGER: user_manager.UserManager | None = None # UUID regex (canonical hyphenated form, case-insensitive) UUID_RE = r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" # Note to any custom node developers reading this code: # The assets system is not yet fully implemented, do not rely on the code in /app/assets remaining the same. def register_assets_system(app: web.Application, user_manager_instance: user_manager.UserManager) -> None: global USER_MANAGER USER_MANAGER = user_manager_instance app.add_routes(ROUTES) def _error_response(status: int, code: str, message: str, details: dict | None = None) -> web.Response: return web.json_response({"error": {"code": code, "message": message, "details": details or {}}}, status=status) def _validation_error_response(code: str, ve: ValidationError) -> web.Response: return _error_response(400, code, "Validation failed.", {"errors": ve.json()}) @ROUTES.head("/api/assets/hash/{hash}") async def head_asset_by_hash(request: web.Request) -> web.Response: hash_str = request.match_info.get("hash", "").strip().lower() if not hash_str or ":" not in hash_str: return _error_response(400, "INVALID_HASH", "hash must be like 'blake3:<hex>'") algo, digest = hash_str.split(":", 1) if algo != "blake3" or not digest or any(c for c in digest if c not in "0123456789abcdef"): return _error_response(400, "INVALID_HASH", "hash must be like 'blake3:<hex>'") exists = manager.asset_exists(asset_hash=hash_str) return web.Response(status=200 if exists else 404) @ROUTES.get("/api/assets") async def list_assets(request: web.Request) -> web.Response: """ GET request to list assets. """ query_dict = get_query_dict(request) try: q = schemas_in.ListAssetsQuery.model_validate(query_dict) except ValidationError as ve: return _validation_error_response("INVALID_QUERY", ve) payload = manager.list_assets( include_tags=q.include_tags, exclude_tags=q.exclude_tags, name_contains=q.name_contains, metadata_filter=q.metadata_filter, limit=q.limit, offset=q.offset, sort=q.sort, order=q.order, owner_id=USER_MANAGER.get_request_user_id(request), ) return web.json_response(payload.model_dump(mode="json", exclude_none=True)) @ROUTES.get(f"/api/assets/{{id:{UUID_RE}}}") async def get_asset(request: web.Request) -> web.Response: """ GET request to get an asset's info as JSON. """ asset_info_id = str(uuid.UUID(request.match_info["id"])) try: result = manager.get_asset( asset_info_id=asset_info_id, owner_id=USER_MANAGER.get_request_user_id(request), ) except ValueError as e: return _error_response(404, "ASSET_NOT_FOUND", str(e), {"id": asset_info_id}) except Exception: logging.exception( "get_asset failed for asset_info_id=%s, owner_id=%s", asset_info_id, USER_MANAGER.get_request_user_id(request), ) return _error_response(500, "INTERNAL", "Unexpected server error.") return web.json_response(result.model_dump(mode="json"), status=200) @ROUTES.get(f"/api/assets/{{id:{UUID_RE}}}/content") async def download_asset_content(request: web.Request) -> web.Response: # question: do we need disposition? could we just stick with one of these? disposition = request.query.get("disposition", "attachment").lower().strip() if disposition not in {"inline", "attachment"}: disposition = "attachment" try: abs_path, content_type, filename = manager.resolve_asset_content_for_download( asset_info_id=str(uuid.UUID(request.match_info["id"])), owner_id=USER_MANAGER.get_request_user_id(request), ) except ValueError as ve: return _error_response(404, "ASSET_NOT_FOUND", str(ve)) except NotImplementedError as nie: return _error_response(501, "BACKEND_UNSUPPORTED", str(nie)) except FileNotFoundError: return _error_response(404, "FILE_NOT_FOUND", "Underlying file not found on disk.") quoted = (filename or "").replace("\r", "").replace("\n", "").replace('"', "'") cd = f'{disposition}; filename="{quoted}"; filename*=UTF-8\'\'{urllib.parse.quote(filename)}' file_size = os.path.getsize(abs_path) logging.info( "download_asset_content: path=%s, size=%d bytes (%.2f MB), content_type=%s, filename=%s", abs_path, file_size, file_size / (1024 * 1024), content_type, filename, ) async def file_sender(): chunk_size = 64 * 1024 with open(abs_path, "rb") as f: while True: chunk = f.read(chunk_size) if not chunk: break yield chunk return web.Response( body=file_sender(), content_type=content_type, headers={ "Content-Disposition": cd, "Content-Length": str(file_size), }, ) @ROUTES.post("/api/assets/from-hash") async def create_asset_from_hash(request: web.Request) -> web.Response: try: payload = await request.json() body = schemas_in.CreateFromHashBody.model_validate(payload) except ValidationError as ve: return _validation_error_response("INVALID_BODY", ve) except Exception: return _error_response(400, "INVALID_JSON", "Request body must be valid JSON.") result = manager.create_asset_from_hash( hash_str=body.hash, name=body.name, tags=body.tags, user_metadata=body.user_metadata, owner_id=USER_MANAGER.get_request_user_id(request), ) if result is None: return _error_response(404, "ASSET_NOT_FOUND", f"Asset content {body.hash} does not exist") return web.json_response(result.model_dump(mode="json"), status=201) @ROUTES.post("/api/assets") async def upload_asset(request: web.Request) -> web.Response: """Multipart/form-data endpoint for Asset uploads.""" if not (request.content_type or "").lower().startswith("multipart/"): return _error_response(415, "UNSUPPORTED_MEDIA_TYPE", "Use multipart/form-data for uploads.") reader = await request.multipart() file_present = False file_client_name: str | None = None tags_raw: list[str] = [] provided_name: str | None = None user_metadata_raw: str | None = None provided_hash: str | None = None provided_hash_exists: bool | None = None file_written = 0 tmp_path: str | None = None while True: field = await reader.next() if field is None: break fname = getattr(field, "name", "") or "" if fname == "hash": try: s = ((await field.text()) or "").strip().lower() except Exception: return _error_response(400, "INVALID_HASH", "hash must be like 'blake3:<hex>'") if s: if ":" not in s: return _error_response(400, "INVALID_HASH", "hash must be like 'blake3:<hex>'") algo, digest = s.split(":", 1) if algo != "blake3" or not digest or any(c for c in digest if c not in "0123456789abcdef"): return _error_response(400, "INVALID_HASH", "hash must be like 'blake3:<hex>'") provided_hash = f"{algo}:{digest}" try: provided_hash_exists = manager.asset_exists(asset_hash=provided_hash) except Exception: provided_hash_exists = None # do not fail the whole request here elif fname == "file": file_present = True file_client_name = (field.filename or "").strip() if provided_hash and provided_hash_exists is True: # If client supplied a hash that we know exists, drain but do not write to disk try: while True: chunk = await field.read_chunk(8 * 1024 * 1024) if not chunk: break file_written += len(chunk) except Exception: return _error_response(500, "UPLOAD_IO_ERROR", "Failed to receive uploaded file.") continue # Do not create temp file; we will create AssetInfo from the existing content # Otherwise, store to temp for hashing/ingest uploads_root = os.path.join(folder_paths.get_temp_directory(), "uploads") unique_dir = os.path.join(uploads_root, uuid.uuid4().hex) os.makedirs(unique_dir, exist_ok=True) tmp_path = os.path.join(unique_dir, ".upload.part") try: with open(tmp_path, "wb") as f: while True: chunk = await field.read_chunk(8 * 1024 * 1024) if not chunk: break f.write(chunk) file_written += len(chunk) except Exception: try: if os.path.exists(tmp_path or ""): os.remove(tmp_path) finally: return _error_response(500, "UPLOAD_IO_ERROR", "Failed to receive and store uploaded file.") elif fname == "tags": tags_raw.append((await field.text()) or "") elif fname == "name": provided_name = (await field.text()) or None elif fname == "user_metadata": user_metadata_raw = (await field.text()) or None # If client did not send file, and we are not doing a from-hash fast path -> error if not file_present and not (provided_hash and provided_hash_exists): return _error_response(400, "MISSING_FILE", "Form must include a 'file' part or a known 'hash'.") if file_present and file_written == 0 and not (provided_hash and provided_hash_exists): # Empty upload is only acceptable if we are fast-pathing from existing hash try: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) finally: return _error_response(400, "EMPTY_UPLOAD", "Uploaded file is empty.") try: spec = schemas_in.UploadAssetSpec.model_validate({ "tags": tags_raw, "name": provided_name, "user_metadata": user_metadata_raw, "hash": provided_hash, }) except ValidationError as ve: try: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) finally: return _validation_error_response("INVALID_BODY", ve) # Validate models category against configured folders (consistent with previous behavior) if spec.tags and spec.tags[0] == "models": if len(spec.tags) < 2 or spec.tags[1] not in folder_paths.folder_names_and_paths: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) return _error_response( 400, "INVALID_BODY", f"unknown models category '{spec.tags[1] if len(spec.tags) >= 2 else ''}'" ) owner_id = USER_MANAGER.get_request_user_id(request) # Fast path: if a valid provided hash exists, create AssetInfo without writing anything if spec.hash and provided_hash_exists is True: try: result = manager.create_asset_from_hash( hash_str=spec.hash, name=spec.name or (spec.hash.split(":", 1)[1]), tags=spec.tags, user_metadata=spec.user_metadata or {}, owner_id=owner_id, ) except Exception: logging.exception("create_asset_from_hash failed for hash=%s, owner_id=%s", spec.hash, owner_id) return _error_response(500, "INTERNAL", "Unexpected server error.") if result is None: return _error_response(404, "ASSET_NOT_FOUND", f"Asset content {spec.hash} does not exist") # Drain temp if we accidentally saved (e.g., hash field came after file) if tmp_path and os.path.exists(tmp_path): with contextlib.suppress(Exception): os.remove(tmp_path) status = 200 if (not result.created_new) else 201 return web.json_response(result.model_dump(mode="json"), status=status) # Otherwise, we must have a temp file path to ingest if not tmp_path or not os.path.exists(tmp_path): # The only case we reach here without a temp file is: client sent a hash that does not exist and no file return _error_response(404, "ASSET_NOT_FOUND", "Provided hash not found and no file uploaded.") try: created = manager.upload_asset_from_temp_path( spec, temp_path=tmp_path, client_filename=file_client_name, owner_id=owner_id, expected_asset_hash=spec.hash, ) status = 201 if created.created_new else 200 return web.json_response(created.model_dump(mode="json"), status=status) except ValueError as e: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) msg = str(e) if "HASH_MISMATCH" in msg or msg.strip().upper() == "HASH_MISMATCH": return _error_response( 400, "HASH_MISMATCH", "Uploaded file hash does not match provided hash.", ) return _error_response(400, "BAD_REQUEST", "Invalid inputs.") except Exception: if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) logging.exception("upload_asset_from_temp_path failed for tmp_path=%s, owner_id=%s", tmp_path, owner_id) return _error_response(500, "INTERNAL", "Unexpected server error.") @ROUTES.put(f"/api/assets/{{id:{UUID_RE}}}") async def update_asset(request: web.Request) -> web.Response: asset_info_id = str(uuid.UUID(request.match_info["id"])) try: body = schemas_in.UpdateAssetBody.model_validate(await request.json()) except ValidationError as ve: return _validation_error_response("INVALID_BODY", ve) except Exception: return _error_response(400, "INVALID_JSON", "Request body must be valid JSON.") try: result = manager.update_asset( asset_info_id=asset_info_id, name=body.name, user_metadata=body.user_metadata, owner_id=USER_MANAGER.get_request_user_id(request), ) except (ValueError, PermissionError) as ve: return _error_response(404, "ASSET_NOT_FOUND", str(ve), {"id": asset_info_id}) except Exception: logging.exception( "update_asset failed for asset_info_id=%s, owner_id=%s", asset_info_id, USER_MANAGER.get_request_user_id(request), ) return _error_response(500, "INTERNAL", "Unexpected server error.") return web.json_response(result.model_dump(mode="json"), status=200) @ROUTES.delete(f"/api/assets/{{id:{UUID_RE}}}") async def delete_asset(request: web.Request) -> web.Response: asset_info_id = str(uuid.UUID(request.match_info["id"])) delete_content = request.query.get("delete_content") delete_content = True if delete_content is None else delete_content.lower() not in {"0", "false", "no"} try: deleted = manager.delete_asset_reference( asset_info_id=asset_info_id, owner_id=USER_MANAGER.get_request_user_id(request), delete_content_if_orphan=delete_content, ) except Exception: logging.exception( "delete_asset_reference failed for asset_info_id=%s, owner_id=%s", asset_info_id, USER_MANAGER.get_request_user_id(request), ) return _error_response(500, "INTERNAL", "Unexpected server error.") if not deleted: return _error_response(404, "ASSET_NOT_FOUND", f"AssetInfo {asset_info_id} not found.") return web.Response(status=204) @ROUTES.get("/api/tags") async def get_tags(request: web.Request) -> web.Response: """ GET request to list all tags based on query parameters. """ query_map = dict(request.rel_url.query) try: query = schemas_in.TagsListQuery.model_validate(query_map) except ValidationError as e: return web.json_response( {"error": {"code": "INVALID_QUERY", "message": "Invalid query parameters", "details": e.errors()}}, status=400, ) result = manager.list_tags( prefix=query.prefix, limit=query.limit, offset=query.offset, order=query.order, include_zero=query.include_zero, owner_id=USER_MANAGER.get_request_user_id(request), ) return web.json_response(result.model_dump(mode="json")) @ROUTES.post(f"/api/assets/{{id:{UUID_RE}}}/tags") async def add_asset_tags(request: web.Request) -> web.Response: asset_info_id = str(uuid.UUID(request.match_info["id"])) try: payload = await request.json() data = schemas_in.TagsAdd.model_validate(payload) except ValidationError as ve: return _error_response(400, "INVALID_BODY", "Invalid JSON body for tags add.", {"errors": ve.errors()}) except Exception: return _error_response(400, "INVALID_JSON", "Request body must be valid JSON.") try: result = manager.add_tags_to_asset( asset_info_id=asset_info_id, tags=data.tags, origin="manual", owner_id=USER_MANAGER.get_request_user_id(request), ) except (ValueError, PermissionError) as ve: return _error_response(404, "ASSET_NOT_FOUND", str(ve), {"id": asset_info_id}) except Exception: logging.exception( "add_tags_to_asset failed for asset_info_id=%s, owner_id=%s", asset_info_id, USER_MANAGER.get_request_user_id(request), ) return _error_response(500, "INTERNAL", "Unexpected server error.") return web.json_response(result.model_dump(mode="json"), status=200) @ROUTES.delete(f"/api/assets/{{id:{UUID_RE}}}/tags") async def delete_asset_tags(request: web.Request) -> web.Response: asset_info_id = str(uuid.UUID(request.match_info["id"])) try: payload = await request.json() data = schemas_in.TagsRemove.model_validate(payload) except ValidationError as ve: return _error_response(400, "INVALID_BODY", "Invalid JSON body for tags remove.", {"errors": ve.errors()}) except Exception: return _error_response(400, "INVALID_JSON", "Request body must be valid JSON.") try: result = manager.remove_tags_from_asset( asset_info_id=asset_info_id, tags=data.tags, owner_id=USER_MANAGER.get_request_user_id(request), ) except ValueError as ve: return _error_response(404, "ASSET_NOT_FOUND", str(ve), {"id": asset_info_id}) except Exception: logging.exception( "remove_tags_from_asset failed for asset_info_id=%s, owner_id=%s", asset_info_id, USER_MANAGER.get_request_user_id(request), ) return _error_response(500, "INTERNAL", "Unexpected server error.") return web.json_response(result.model_dump(mode="json"), status=200) @ROUTES.post("/api/assets/seed") async def seed_assets_endpoint(request: web.Request) -> web.Response: """Trigger asset seeding for specified roots (models, input, output).""" try: payload = await request.json() roots = payload.get("roots", ["models", "input", "output"]) except Exception: roots = ["models", "input", "output"] valid_roots = [r for r in roots if r in ("models", "input", "output")] if not valid_roots: return _error_response(400, "INVALID_BODY", "No valid roots specified") try: seed_assets(tuple(valid_roots)) except Exception: logging.exception("seed_assets failed for roots=%s", valid_roots) return _error_response(500, "INTERNAL", "Seed operation failed") return web.json_response({"seeded": valid_roots}, status=200)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/api/routes.py", "license": "GNU General Public License v3.0", "lines": 440, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:app/assets/api/schemas_in.py
import json from typing import Any, Literal from pydantic import ( BaseModel, ConfigDict, Field, conint, field_validator, model_validator, ) class ListAssetsQuery(BaseModel): include_tags: list[str] = Field(default_factory=list) exclude_tags: list[str] = Field(default_factory=list) name_contains: str | None = None # Accept either a JSON string (query param) or a dict metadata_filter: dict[str, Any] | None = None limit: conint(ge=1, le=500) = 20 offset: conint(ge=0) = 0 sort: Literal["name", "created_at", "updated_at", "size", "last_access_time"] = "created_at" order: Literal["asc", "desc"] = "desc" @field_validator("include_tags", "exclude_tags", mode="before") @classmethod def _split_csv_tags(cls, v): # Accept "a,b,c" or ["a","b"] (we are liberal in what we accept) if v is None: return [] if isinstance(v, str): return [t.strip() for t in v.split(",") if t.strip()] if isinstance(v, list): out: list[str] = [] for item in v: if isinstance(item, str): out.extend([t.strip() for t in item.split(",") if t.strip()]) return out return v @field_validator("metadata_filter", mode="before") @classmethod def _parse_metadata_json(cls, v): if v is None or isinstance(v, dict): return v if isinstance(v, str) and v.strip(): try: parsed = json.loads(v) except Exception as e: raise ValueError(f"metadata_filter must be JSON: {e}") from e if not isinstance(parsed, dict): raise ValueError("metadata_filter must be a JSON object") return parsed return None class UpdateAssetBody(BaseModel): name: str | None = None user_metadata: dict[str, Any] | None = None @model_validator(mode="after") def _at_least_one(self): if self.name is None and self.user_metadata is None: raise ValueError("Provide at least one of: name, user_metadata.") return self class CreateFromHashBody(BaseModel): model_config = ConfigDict(extra="ignore", str_strip_whitespace=True) hash: str name: str tags: list[str] = Field(default_factory=list) user_metadata: dict[str, Any] = Field(default_factory=dict) @field_validator("hash") @classmethod def _require_blake3(cls, v): s = (v or "").strip().lower() if ":" not in s: raise ValueError("hash must be 'blake3:<hex>'") algo, digest = s.split(":", 1) if algo != "blake3": raise ValueError("only canonical 'blake3:<hex>' is accepted here") if not digest or any(c for c in digest if c not in "0123456789abcdef"): raise ValueError("hash digest must be lowercase hex") return s @field_validator("tags", mode="before") @classmethod def _tags_norm(cls, v): if v is None: return [] if isinstance(v, list): out = [str(t).strip().lower() for t in v if str(t).strip()] seen = set() dedup = [] for t in out: if t not in seen: seen.add(t) dedup.append(t) return dedup if isinstance(v, str): return [t.strip().lower() for t in v.split(",") if t.strip()] return [] class TagsListQuery(BaseModel): model_config = ConfigDict(extra="ignore", str_strip_whitespace=True) prefix: str | None = Field(None, min_length=1, max_length=256) limit: int = Field(100, ge=1, le=1000) offset: int = Field(0, ge=0, le=10_000_000) order: Literal["count_desc", "name_asc"] = "count_desc" include_zero: bool = True @field_validator("prefix") @classmethod def normalize_prefix(cls, v: str | None) -> str | None: if v is None: return v v = v.strip() return v.lower() or None class TagsAdd(BaseModel): model_config = ConfigDict(extra="ignore") tags: list[str] = Field(..., min_length=1) @field_validator("tags") @classmethod def normalize_tags(cls, v: list[str]) -> list[str]: out = [] for t in v: if not isinstance(t, str): raise TypeError("tags must be strings") tnorm = t.strip().lower() if tnorm: out.append(tnorm) seen = set() deduplicated = [] for x in out: if x not in seen: seen.add(x) deduplicated.append(x) return deduplicated class TagsRemove(TagsAdd): pass class UploadAssetSpec(BaseModel): """Upload Asset operation. - tags: ordered; first is root ('models'|'input'|'output'); if root == 'models', second must be a valid category from folder_paths.folder_names_and_paths - name: display name - user_metadata: arbitrary JSON object (optional) - hash: optional canonical 'blake3:<hex>' provided by the client for validation / fast-path Files created via this endpoint are stored on disk using the **content hash** as the filename stem and the original extension is preserved when available. """ model_config = ConfigDict(extra="ignore", str_strip_whitespace=True) tags: list[str] = Field(..., min_length=1) name: str | None = Field(default=None, max_length=512, description="Display Name") user_metadata: dict[str, Any] = Field(default_factory=dict) hash: str | None = Field(default=None) @field_validator("hash", mode="before") @classmethod def _parse_hash(cls, v): if v is None: return None s = str(v).strip().lower() if not s: return None if ":" not in s: raise ValueError("hash must be 'blake3:<hex>'") algo, digest = s.split(":", 1) if algo != "blake3": raise ValueError("only canonical 'blake3:<hex>' is accepted here") if not digest or any(c for c in digest if c not in "0123456789abcdef"): raise ValueError("hash digest must be lowercase hex") return f"{algo}:{digest}" @field_validator("tags", mode="before") @classmethod def _parse_tags(cls, v): """ Accepts a list of strings (possibly multiple form fields), where each string can be: - JSON array (e.g., '["models","loras","foo"]') - comma-separated ('models, loras, foo') - single token ('models') Returns a normalized, deduplicated, ordered list. """ items: list[str] = [] if v is None: return [] if isinstance(v, str): v = [v] if isinstance(v, list): for item in v: if item is None: continue s = str(item).strip() if not s: continue if s.startswith("["): try: arr = json.loads(s) if isinstance(arr, list): items.extend(str(x) for x in arr) continue except Exception: pass # fallback to CSV parse below items.extend([p for p in s.split(",") if p.strip()]) else: return [] # normalize + dedupe norm = [] seen = set() for t in items: tnorm = str(t).strip().lower() if tnorm and tnorm not in seen: seen.add(tnorm) norm.append(tnorm) return norm @field_validator("user_metadata", mode="before") @classmethod def _parse_metadata_json(cls, v): if v is None or isinstance(v, dict): return v or {} if isinstance(v, str): s = v.strip() if not s: return {} try: parsed = json.loads(s) except Exception as e: raise ValueError(f"user_metadata must be JSON: {e}") from e if not isinstance(parsed, dict): raise ValueError("user_metadata must be a JSON object") return parsed return {} @model_validator(mode="after") def _validate_order(self): if not self.tags: raise ValueError("tags must be provided and non-empty") root = self.tags[0] if root not in {"models", "input", "output"}: raise ValueError("first tag must be one of: models, input, output") if root == "models": if len(self.tags) < 2: raise ValueError("models uploads require a category tag as the second tag") return self
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/api/schemas_in.py", "license": "GNU General Public License v3.0", "lines": 230, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:app/assets/api/schemas_out.py
from datetime import datetime from typing import Any from pydantic import BaseModel, ConfigDict, Field, field_serializer class AssetSummary(BaseModel): id: str name: str asset_hash: str | None = None size: int | None = None mime_type: str | None = None tags: list[str] = Field(default_factory=list) preview_url: str | None = None created_at: datetime | None = None updated_at: datetime | None = None last_access_time: datetime | None = None model_config = ConfigDict(from_attributes=True) @field_serializer("created_at", "updated_at", "last_access_time") def _ser_dt(self, v: datetime | None, _info): return v.isoformat() if v else None class AssetsList(BaseModel): assets: list[AssetSummary] total: int has_more: bool class AssetUpdated(BaseModel): id: str name: str asset_hash: str | None = None tags: list[str] = Field(default_factory=list) user_metadata: dict[str, Any] = Field(default_factory=dict) updated_at: datetime | None = None model_config = ConfigDict(from_attributes=True) @field_serializer("updated_at") def _ser_updated(self, v: datetime | None, _info): return v.isoformat() if v else None class AssetDetail(BaseModel): id: str name: str asset_hash: str | None = None size: int | None = None mime_type: str | None = None tags: list[str] = Field(default_factory=list) user_metadata: dict[str, Any] = Field(default_factory=dict) preview_id: str | None = None created_at: datetime | None = None last_access_time: datetime | None = None model_config = ConfigDict(from_attributes=True) @field_serializer("created_at", "last_access_time") def _ser_dt(self, v: datetime | None, _info): return v.isoformat() if v else None class AssetCreated(AssetDetail): created_new: bool class TagUsage(BaseModel): name: str count: int type: str class TagsList(BaseModel): tags: list[TagUsage] = Field(default_factory=list) total: int has_more: bool class TagsAdd(BaseModel): model_config = ConfigDict(str_strip_whitespace=True) added: list[str] = Field(default_factory=list) already_present: list[str] = Field(default_factory=list) total_tags: list[str] = Field(default_factory=list) class TagsRemove(BaseModel): model_config = ConfigDict(str_strip_whitespace=True) removed: list[str] = Field(default_factory=list) not_present: list[str] = Field(default_factory=list) total_tags: list[str] = Field(default_factory=list)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/api/schemas_out.py", "license": "GNU General Public License v3.0", "lines": 68, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:app/assets/database/bulk_ops.py
import os import uuid import sqlalchemy from typing import Iterable from sqlalchemy.orm import Session from sqlalchemy.dialects import sqlite from app.assets.helpers import utcnow from app.assets.database.models import Asset, AssetCacheState, AssetInfo, AssetInfoTag, AssetInfoMeta MAX_BIND_PARAMS = 800 def _chunk_rows(rows: list[dict], cols_per_row: int, max_bind_params: int) -> Iterable[list[dict]]: if not rows: return [] rows_per_stmt = max(1, max_bind_params // max(1, cols_per_row)) for i in range(0, len(rows), rows_per_stmt): yield rows[i:i + rows_per_stmt] def _iter_chunks(seq, n: int): for i in range(0, len(seq), n): yield seq[i:i + n] def _rows_per_stmt(cols: int) -> int: return max(1, MAX_BIND_PARAMS // max(1, cols)) def seed_from_paths_batch( session: Session, *, specs: list[dict], owner_id: str = "", ) -> dict: """Each spec is a dict with keys: - abs_path: str - size_bytes: int - mtime_ns: int - info_name: str - tags: list[str] - fname: Optional[str] """ if not specs: return {"inserted_infos": 0, "won_states": 0, "lost_states": 0} now = utcnow() asset_rows: list[dict] = [] state_rows: list[dict] = [] path_to_asset: dict[str, str] = {} asset_to_info: dict[str, dict] = {} # asset_id -> prepared info row path_list: list[str] = [] for sp in specs: ap = os.path.abspath(sp["abs_path"]) aid = str(uuid.uuid4()) iid = str(uuid.uuid4()) path_list.append(ap) path_to_asset[ap] = aid asset_rows.append( { "id": aid, "hash": None, "size_bytes": sp["size_bytes"], "mime_type": None, "created_at": now, } ) state_rows.append( { "asset_id": aid, "file_path": ap, "mtime_ns": sp["mtime_ns"], } ) asset_to_info[aid] = { "id": iid, "owner_id": owner_id, "name": sp["info_name"], "asset_id": aid, "preview_id": None, "user_metadata": {"filename": sp["fname"]} if sp["fname"] else None, "created_at": now, "updated_at": now, "last_access_time": now, "_tags": sp["tags"], "_filename": sp["fname"], } # insert all seed Assets (hash=NULL) ins_asset = sqlite.insert(Asset) for chunk in _iter_chunks(asset_rows, _rows_per_stmt(5)): session.execute(ins_asset, chunk) # try to claim AssetCacheState (file_path) # Insert with ON CONFLICT DO NOTHING, then query to find which paths were actually inserted ins_state = ( sqlite.insert(AssetCacheState) .on_conflict_do_nothing(index_elements=[AssetCacheState.file_path]) ) for chunk in _iter_chunks(state_rows, _rows_per_stmt(3)): session.execute(ins_state, chunk) # Query to find which of our paths won (were actually inserted) winners_by_path: set[str] = set() for chunk in _iter_chunks(path_list, MAX_BIND_PARAMS): result = session.execute( sqlalchemy.select(AssetCacheState.file_path) .where(AssetCacheState.file_path.in_(chunk)) .where(AssetCacheState.asset_id.in_([path_to_asset[p] for p in chunk])) ) winners_by_path.update(result.scalars().all()) all_paths_set = set(path_list) losers_by_path = all_paths_set - winners_by_path lost_assets = [path_to_asset[p] for p in losers_by_path] if lost_assets: # losers get their Asset removed for id_chunk in _iter_chunks(lost_assets, MAX_BIND_PARAMS): session.execute(sqlalchemy.delete(Asset).where(Asset.id.in_(id_chunk))) if not winners_by_path: return {"inserted_infos": 0, "won_states": 0, "lost_states": len(losers_by_path)} # insert AssetInfo only for winners # Insert with ON CONFLICT DO NOTHING, then query to find which were actually inserted winner_info_rows = [asset_to_info[path_to_asset[p]] for p in winners_by_path] ins_info = ( sqlite.insert(AssetInfo) .on_conflict_do_nothing(index_elements=[AssetInfo.asset_id, AssetInfo.owner_id, AssetInfo.name]) ) for chunk in _iter_chunks(winner_info_rows, _rows_per_stmt(9)): session.execute(ins_info, chunk) # Query to find which info rows were actually inserted (by matching our generated IDs) all_info_ids = [row["id"] for row in winner_info_rows] inserted_info_ids: set[str] = set() for chunk in _iter_chunks(all_info_ids, MAX_BIND_PARAMS): result = session.execute( sqlalchemy.select(AssetInfo.id).where(AssetInfo.id.in_(chunk)) ) inserted_info_ids.update(result.scalars().all()) # build and insert tag + meta rows for the AssetInfo tag_rows: list[dict] = [] meta_rows: list[dict] = [] if inserted_info_ids: for row in winner_info_rows: iid = row["id"] if iid not in inserted_info_ids: continue for t in row["_tags"]: tag_rows.append({ "asset_info_id": iid, "tag_name": t, "origin": "automatic", "added_at": now, }) if row["_filename"]: meta_rows.append( { "asset_info_id": iid, "key": "filename", "ordinal": 0, "val_str": row["_filename"], "val_num": None, "val_bool": None, "val_json": None, } ) bulk_insert_tags_and_meta(session, tag_rows=tag_rows, meta_rows=meta_rows, max_bind_params=MAX_BIND_PARAMS) return { "inserted_infos": len(inserted_info_ids), "won_states": len(winners_by_path), "lost_states": len(losers_by_path), } def bulk_insert_tags_and_meta( session: Session, *, tag_rows: list[dict], meta_rows: list[dict], max_bind_params: int, ) -> None: """Batch insert into asset_info_tags and asset_info_meta with ON CONFLICT DO NOTHING. - tag_rows keys: asset_info_id, tag_name, origin, added_at - meta_rows keys: asset_info_id, key, ordinal, val_str, val_num, val_bool, val_json """ if tag_rows: ins_links = ( sqlite.insert(AssetInfoTag) .on_conflict_do_nothing(index_elements=[AssetInfoTag.asset_info_id, AssetInfoTag.tag_name]) ) for chunk in _chunk_rows(tag_rows, cols_per_row=4, max_bind_params=max_bind_params): session.execute(ins_links, chunk) if meta_rows: ins_meta = ( sqlite.insert(AssetInfoMeta) .on_conflict_do_nothing( index_elements=[AssetInfoMeta.asset_info_id, AssetInfoMeta.key, AssetInfoMeta.ordinal] ) ) for chunk in _chunk_rows(meta_rows, cols_per_row=7, max_bind_params=max_bind_params): session.execute(ins_meta, chunk)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/database/bulk_ops.py", "license": "GNU General Public License v3.0", "lines": 183, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:app/assets/database/models.py
from __future__ import annotations import uuid from datetime import datetime from typing import Any from sqlalchemy import ( JSON, BigInteger, Boolean, CheckConstraint, DateTime, ForeignKey, Index, Integer, Numeric, String, Text, UniqueConstraint, ) from sqlalchemy.orm import Mapped, foreign, mapped_column, relationship from app.assets.helpers import utcnow from app.database.models import to_dict, Base class Asset(Base): __tablename__ = "assets" id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) hash: Mapped[str | None] = mapped_column(String(256), nullable=True) size_bytes: Mapped[int] = mapped_column(BigInteger, nullable=False, default=0) mime_type: Mapped[str | None] = mapped_column(String(255)) created_at: Mapped[datetime] = mapped_column( DateTime(timezone=False), nullable=False, default=utcnow ) infos: Mapped[list[AssetInfo]] = relationship( "AssetInfo", back_populates="asset", primaryjoin=lambda: Asset.id == foreign(AssetInfo.asset_id), foreign_keys=lambda: [AssetInfo.asset_id], cascade="all,delete-orphan", passive_deletes=True, ) preview_of: Mapped[list[AssetInfo]] = relationship( "AssetInfo", back_populates="preview_asset", primaryjoin=lambda: Asset.id == foreign(AssetInfo.preview_id), foreign_keys=lambda: [AssetInfo.preview_id], viewonly=True, ) cache_states: Mapped[list[AssetCacheState]] = relationship( back_populates="asset", cascade="all, delete-orphan", passive_deletes=True, ) __table_args__ = ( Index("uq_assets_hash", "hash", unique=True), Index("ix_assets_mime_type", "mime_type"), CheckConstraint("size_bytes >= 0", name="ck_assets_size_nonneg"), ) def to_dict(self, include_none: bool = False) -> dict[str, Any]: return to_dict(self, include_none=include_none) def __repr__(self) -> str: return f"<Asset id={self.id} hash={(self.hash or '')[:12]}>" class AssetCacheState(Base): __tablename__ = "asset_cache_state" id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) asset_id: Mapped[str] = mapped_column(String(36), ForeignKey("assets.id", ondelete="CASCADE"), nullable=False) file_path: Mapped[str] = mapped_column(Text, nullable=False) mtime_ns: Mapped[int | None] = mapped_column(BigInteger, nullable=True) needs_verify: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) asset: Mapped[Asset] = relationship(back_populates="cache_states") __table_args__ = ( Index("ix_asset_cache_state_file_path", "file_path"), Index("ix_asset_cache_state_asset_id", "asset_id"), CheckConstraint("(mtime_ns IS NULL) OR (mtime_ns >= 0)", name="ck_acs_mtime_nonneg"), UniqueConstraint("file_path", name="uq_asset_cache_state_file_path"), ) def to_dict(self, include_none: bool = False) -> dict[str, Any]: return to_dict(self, include_none=include_none) def __repr__(self) -> str: return f"<AssetCacheState id={self.id} asset_id={self.asset_id} path={self.file_path!r}>" class AssetInfo(Base): __tablename__ = "assets_info" id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) owner_id: Mapped[str] = mapped_column(String(128), nullable=False, default="") name: Mapped[str] = mapped_column(String(512), nullable=False) asset_id: Mapped[str] = mapped_column(String(36), ForeignKey("assets.id", ondelete="RESTRICT"), nullable=False) preview_id: Mapped[str | None] = mapped_column(String(36), ForeignKey("assets.id", ondelete="SET NULL")) user_metadata: Mapped[dict[str, Any] | None] = mapped_column(JSON(none_as_null=True)) created_at: Mapped[datetime] = mapped_column(DateTime(timezone=False), nullable=False, default=utcnow) updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=False), nullable=False, default=utcnow) last_access_time: Mapped[datetime] = mapped_column(DateTime(timezone=False), nullable=False, default=utcnow) asset: Mapped[Asset] = relationship( "Asset", back_populates="infos", foreign_keys=[asset_id], lazy="selectin", ) preview_asset: Mapped[Asset | None] = relationship( "Asset", back_populates="preview_of", foreign_keys=[preview_id], ) metadata_entries: Mapped[list[AssetInfoMeta]] = relationship( back_populates="asset_info", cascade="all,delete-orphan", passive_deletes=True, ) tag_links: Mapped[list[AssetInfoTag]] = relationship( back_populates="asset_info", cascade="all,delete-orphan", passive_deletes=True, overlaps="tags,asset_infos", ) tags: Mapped[list[Tag]] = relationship( secondary="asset_info_tags", back_populates="asset_infos", lazy="selectin", viewonly=True, overlaps="tag_links,asset_info_links,asset_infos,tag", ) __table_args__ = ( UniqueConstraint("asset_id", "owner_id", "name", name="uq_assets_info_asset_owner_name"), Index("ix_assets_info_owner_name", "owner_id", "name"), Index("ix_assets_info_owner_id", "owner_id"), Index("ix_assets_info_asset_id", "asset_id"), Index("ix_assets_info_name", "name"), Index("ix_assets_info_created_at", "created_at"), Index("ix_assets_info_last_access_time", "last_access_time"), ) def to_dict(self, include_none: bool = False) -> dict[str, Any]: data = to_dict(self, include_none=include_none) data["tags"] = [t.name for t in self.tags] return data def __repr__(self) -> str: return f"<AssetInfo id={self.id} name={self.name!r} asset_id={self.asset_id}>" class AssetInfoMeta(Base): __tablename__ = "asset_info_meta" asset_info_id: Mapped[str] = mapped_column( String(36), ForeignKey("assets_info.id", ondelete="CASCADE"), primary_key=True ) key: Mapped[str] = mapped_column(String(256), primary_key=True) ordinal: Mapped[int] = mapped_column(Integer, primary_key=True, default=0) val_str: Mapped[str | None] = mapped_column(String(2048), nullable=True) val_num: Mapped[float | None] = mapped_column(Numeric(38, 10), nullable=True) val_bool: Mapped[bool | None] = mapped_column(Boolean, nullable=True) val_json: Mapped[Any | None] = mapped_column(JSON(none_as_null=True), nullable=True) asset_info: Mapped[AssetInfo] = relationship(back_populates="metadata_entries") __table_args__ = ( Index("ix_asset_info_meta_key", "key"), Index("ix_asset_info_meta_key_val_str", "key", "val_str"), Index("ix_asset_info_meta_key_val_num", "key", "val_num"), Index("ix_asset_info_meta_key_val_bool", "key", "val_bool"), ) class AssetInfoTag(Base): __tablename__ = "asset_info_tags" asset_info_id: Mapped[str] = mapped_column( String(36), ForeignKey("assets_info.id", ondelete="CASCADE"), primary_key=True ) tag_name: Mapped[str] = mapped_column( String(512), ForeignKey("tags.name", ondelete="RESTRICT"), primary_key=True ) origin: Mapped[str] = mapped_column(String(32), nullable=False, default="manual") added_at: Mapped[datetime] = mapped_column( DateTime(timezone=False), nullable=False, default=utcnow ) asset_info: Mapped[AssetInfo] = relationship(back_populates="tag_links") tag: Mapped[Tag] = relationship(back_populates="asset_info_links") __table_args__ = ( Index("ix_asset_info_tags_tag_name", "tag_name"), Index("ix_asset_info_tags_asset_info_id", "asset_info_id"), ) class Tag(Base): __tablename__ = "tags" name: Mapped[str] = mapped_column(String(512), primary_key=True) tag_type: Mapped[str] = mapped_column(String(32), nullable=False, default="user") asset_info_links: Mapped[list[AssetInfoTag]] = relationship( back_populates="tag", overlaps="asset_infos,tags", ) asset_infos: Mapped[list[AssetInfo]] = relationship( secondary="asset_info_tags", back_populates="tags", viewonly=True, overlaps="asset_info_links,tag_links,tags,asset_info", ) __table_args__ = ( Index("ix_tags_tag_type", "tag_type"), ) def __repr__(self) -> str: return f"<Tag {self.name}>"
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/database/models.py", "license": "GNU General Public License v3.0", "lines": 187, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:app/assets/database/queries.py
import os import logging import sqlalchemy as sa from collections import defaultdict from datetime import datetime from typing import Iterable, Any from sqlalchemy import select, delete, exists, func from sqlalchemy.dialects import sqlite from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session, contains_eager, noload from app.assets.database.models import Asset, AssetInfo, AssetCacheState, AssetInfoMeta, AssetInfoTag, Tag from app.assets.helpers import ( compute_relative_filename, escape_like_prefix, normalize_tags, project_kv, utcnow ) from typing import Sequence def visible_owner_clause(owner_id: str) -> sa.sql.ClauseElement: """Build owner visibility predicate for reads. Owner-less rows are visible to everyone.""" owner_id = (owner_id or "").strip() if owner_id == "": return AssetInfo.owner_id == "" return AssetInfo.owner_id.in_(["", owner_id]) def pick_best_live_path(states: Sequence[AssetCacheState]) -> str: """ Return the best on-disk path among cache states: 1) Prefer a path that exists with needs_verify == False (already verified). 2) Otherwise, pick the first path that exists. 3) Otherwise return empty string. """ alive = [s for s in states if getattr(s, "file_path", None) and os.path.isfile(s.file_path)] if not alive: return "" for s in alive: if not getattr(s, "needs_verify", False): return s.file_path return alive[0].file_path def apply_tag_filters( stmt: sa.sql.Select, include_tags: Sequence[str] | None = None, exclude_tags: Sequence[str] | None = None, ) -> sa.sql.Select: """include_tags: every tag must be present; exclude_tags: none may be present.""" include_tags = normalize_tags(include_tags) exclude_tags = normalize_tags(exclude_tags) if include_tags: for tag_name in include_tags: stmt = stmt.where( exists().where( (AssetInfoTag.asset_info_id == AssetInfo.id) & (AssetInfoTag.tag_name == tag_name) ) ) if exclude_tags: stmt = stmt.where( ~exists().where( (AssetInfoTag.asset_info_id == AssetInfo.id) & (AssetInfoTag.tag_name.in_(exclude_tags)) ) ) return stmt def apply_metadata_filter( stmt: sa.sql.Select, metadata_filter: dict | None = None, ) -> sa.sql.Select: """Apply filters using asset_info_meta projection table.""" if not metadata_filter: return stmt def _exists_for_pred(key: str, *preds) -> sa.sql.ClauseElement: return sa.exists().where( AssetInfoMeta.asset_info_id == AssetInfo.id, AssetInfoMeta.key == key, *preds, ) def _exists_clause_for_value(key: str, value) -> sa.sql.ClauseElement: if value is None: no_row_for_key = sa.not_( sa.exists().where( AssetInfoMeta.asset_info_id == AssetInfo.id, AssetInfoMeta.key == key, ) ) null_row = _exists_for_pred( key, AssetInfoMeta.val_json.is_(None), AssetInfoMeta.val_str.is_(None), AssetInfoMeta.val_num.is_(None), AssetInfoMeta.val_bool.is_(None), ) return sa.or_(no_row_for_key, null_row) if isinstance(value, bool): return _exists_for_pred(key, AssetInfoMeta.val_bool == bool(value)) if isinstance(value, (int, float)): from decimal import Decimal num = value if isinstance(value, Decimal) else Decimal(str(value)) return _exists_for_pred(key, AssetInfoMeta.val_num == num) if isinstance(value, str): return _exists_for_pred(key, AssetInfoMeta.val_str == value) return _exists_for_pred(key, AssetInfoMeta.val_json == value) for k, v in metadata_filter.items(): if isinstance(v, list): ors = [_exists_clause_for_value(k, elem) for elem in v] if ors: stmt = stmt.where(sa.or_(*ors)) else: stmt = stmt.where(_exists_clause_for_value(k, v)) return stmt def asset_exists_by_hash( session: Session, *, asset_hash: str, ) -> bool: """ Check if an asset with a given hash exists in database. """ row = ( session.execute( select(sa.literal(True)).select_from(Asset).where(Asset.hash == asset_hash).limit(1) ) ).first() return row is not None def asset_info_exists_for_asset_id( session: Session, *, asset_id: str, ) -> bool: q = ( select(sa.literal(True)) .select_from(AssetInfo) .where(AssetInfo.asset_id == asset_id) .limit(1) ) return (session.execute(q)).first() is not None def get_asset_by_hash( session: Session, *, asset_hash: str, ) -> Asset | None: return ( session.execute(select(Asset).where(Asset.hash == asset_hash).limit(1)) ).scalars().first() def get_asset_info_by_id( session: Session, *, asset_info_id: str, ) -> AssetInfo | None: return session.get(AssetInfo, asset_info_id) def list_asset_infos_page( session: Session, owner_id: str = "", include_tags: Sequence[str] | None = None, exclude_tags: Sequence[str] | None = None, name_contains: str | None = None, metadata_filter: dict | None = None, limit: int = 20, offset: int = 0, sort: str = "created_at", order: str = "desc", ) -> tuple[list[AssetInfo], dict[str, list[str]], int]: base = ( select(AssetInfo) .join(Asset, Asset.id == AssetInfo.asset_id) .options(contains_eager(AssetInfo.asset), noload(AssetInfo.tags)) .where(visible_owner_clause(owner_id)) ) if name_contains: escaped, esc = escape_like_prefix(name_contains) base = base.where(AssetInfo.name.ilike(f"%{escaped}%", escape=esc)) base = apply_tag_filters(base, include_tags, exclude_tags) base = apply_metadata_filter(base, metadata_filter) sort = (sort or "created_at").lower() order = (order or "desc").lower() sort_map = { "name": AssetInfo.name, "created_at": AssetInfo.created_at, "updated_at": AssetInfo.updated_at, "last_access_time": AssetInfo.last_access_time, "size": Asset.size_bytes, } sort_col = sort_map.get(sort, AssetInfo.created_at) sort_exp = sort_col.desc() if order == "desc" else sort_col.asc() base = base.order_by(sort_exp).limit(limit).offset(offset) count_stmt = ( select(sa.func.count()) .select_from(AssetInfo) .join(Asset, Asset.id == AssetInfo.asset_id) .where(visible_owner_clause(owner_id)) ) if name_contains: escaped, esc = escape_like_prefix(name_contains) count_stmt = count_stmt.where(AssetInfo.name.ilike(f"%{escaped}%", escape=esc)) count_stmt = apply_tag_filters(count_stmt, include_tags, exclude_tags) count_stmt = apply_metadata_filter(count_stmt, metadata_filter) total = int((session.execute(count_stmt)).scalar_one() or 0) infos = (session.execute(base)).unique().scalars().all() id_list: list[str] = [i.id for i in infos] tag_map: dict[str, list[str]] = defaultdict(list) if id_list: rows = session.execute( select(AssetInfoTag.asset_info_id, Tag.name) .join(Tag, Tag.name == AssetInfoTag.tag_name) .where(AssetInfoTag.asset_info_id.in_(id_list)) .order_by(AssetInfoTag.added_at) ) for aid, tag_name in rows.all(): tag_map[aid].append(tag_name) return infos, tag_map, total def fetch_asset_info_asset_and_tags( session: Session, asset_info_id: str, owner_id: str = "", ) -> tuple[AssetInfo, Asset, list[str]] | None: stmt = ( select(AssetInfo, Asset, Tag.name) .join(Asset, Asset.id == AssetInfo.asset_id) .join(AssetInfoTag, AssetInfoTag.asset_info_id == AssetInfo.id, isouter=True) .join(Tag, Tag.name == AssetInfoTag.tag_name, isouter=True) .where( AssetInfo.id == asset_info_id, visible_owner_clause(owner_id), ) .options(noload(AssetInfo.tags)) .order_by(Tag.name.asc()) ) rows = (session.execute(stmt)).all() if not rows: return None first_info, first_asset, _ = rows[0] tags: list[str] = [] seen: set[str] = set() for _info, _asset, tag_name in rows: if tag_name and tag_name not in seen: seen.add(tag_name) tags.append(tag_name) return first_info, first_asset, tags def fetch_asset_info_and_asset( session: Session, *, asset_info_id: str, owner_id: str = "", ) -> tuple[AssetInfo, Asset] | None: stmt = ( select(AssetInfo, Asset) .join(Asset, Asset.id == AssetInfo.asset_id) .where( AssetInfo.id == asset_info_id, visible_owner_clause(owner_id), ) .limit(1) .options(noload(AssetInfo.tags)) ) row = session.execute(stmt) pair = row.first() if not pair: return None return pair[0], pair[1] def list_cache_states_by_asset_id( session: Session, *, asset_id: str ) -> Sequence[AssetCacheState]: return ( session.execute( select(AssetCacheState) .where(AssetCacheState.asset_id == asset_id) .order_by(AssetCacheState.id.asc()) ) ).scalars().all() def touch_asset_info_by_id( session: Session, *, asset_info_id: str, ts: datetime | None = None, only_if_newer: bool = True, ) -> None: ts = ts or utcnow() stmt = sa.update(AssetInfo).where(AssetInfo.id == asset_info_id) if only_if_newer: stmt = stmt.where( sa.or_(AssetInfo.last_access_time.is_(None), AssetInfo.last_access_time < ts) ) session.execute(stmt.values(last_access_time=ts)) def create_asset_info_for_existing_asset( session: Session, *, asset_hash: str, name: str, user_metadata: dict | None = None, tags: Sequence[str] | None = None, tag_origin: str = "manual", owner_id: str = "", ) -> AssetInfo: """Create or return an existing AssetInfo for an Asset identified by asset_hash.""" now = utcnow() asset = get_asset_by_hash(session, asset_hash=asset_hash) if not asset: raise ValueError(f"Unknown asset hash {asset_hash}") info = AssetInfo( owner_id=owner_id, name=name, asset_id=asset.id, preview_id=None, created_at=now, updated_at=now, last_access_time=now, ) try: with session.begin_nested(): session.add(info) session.flush() except IntegrityError: existing = ( session.execute( select(AssetInfo) .options(noload(AssetInfo.tags)) .where( AssetInfo.asset_id == asset.id, AssetInfo.name == name, AssetInfo.owner_id == owner_id, ) .limit(1) ) ).unique().scalars().first() if not existing: raise RuntimeError("AssetInfo upsert failed to find existing row after conflict.") return existing # metadata["filename"] hack new_meta = dict(user_metadata or {}) computed_filename = None try: p = pick_best_live_path(list_cache_states_by_asset_id(session, asset_id=asset.id)) if p: computed_filename = compute_relative_filename(p) except Exception: computed_filename = None if computed_filename: new_meta["filename"] = computed_filename if new_meta: replace_asset_info_metadata_projection( session, asset_info_id=info.id, user_metadata=new_meta, ) if tags is not None: set_asset_info_tags( session, asset_info_id=info.id, tags=tags, origin=tag_origin, ) return info def set_asset_info_tags( session: Session, *, asset_info_id: str, tags: Sequence[str], origin: str = "manual", ) -> dict: desired = normalize_tags(tags) current = set( tag_name for (tag_name,) in ( session.execute(select(AssetInfoTag.tag_name).where(AssetInfoTag.asset_info_id == asset_info_id)) ).all() ) to_add = [t for t in desired if t not in current] to_remove = [t for t in current if t not in desired] if to_add: ensure_tags_exist(session, to_add, tag_type="user") session.add_all([ AssetInfoTag(asset_info_id=asset_info_id, tag_name=t, origin=origin, added_at=utcnow()) for t in to_add ]) session.flush() if to_remove: session.execute( delete(AssetInfoTag) .where(AssetInfoTag.asset_info_id == asset_info_id, AssetInfoTag.tag_name.in_(to_remove)) ) session.flush() return {"added": to_add, "removed": to_remove, "total": desired} def replace_asset_info_metadata_projection( session: Session, *, asset_info_id: str, user_metadata: dict | None = None, ) -> None: info = session.get(AssetInfo, asset_info_id) if not info: raise ValueError(f"AssetInfo {asset_info_id} not found") info.user_metadata = user_metadata or {} info.updated_at = utcnow() session.flush() session.execute(delete(AssetInfoMeta).where(AssetInfoMeta.asset_info_id == asset_info_id)) session.flush() if not user_metadata: return rows: list[AssetInfoMeta] = [] for k, v in user_metadata.items(): for r in project_kv(k, v): rows.append( AssetInfoMeta( asset_info_id=asset_info_id, key=r["key"], ordinal=int(r["ordinal"]), val_str=r.get("val_str"), val_num=r.get("val_num"), val_bool=r.get("val_bool"), val_json=r.get("val_json"), ) ) if rows: session.add_all(rows) session.flush() def ingest_fs_asset( session: Session, *, asset_hash: str, abs_path: str, size_bytes: int, mtime_ns: int, mime_type: str | None = None, info_name: str | None = None, owner_id: str = "", preview_id: str | None = None, user_metadata: dict | None = None, tags: Sequence[str] = (), tag_origin: str = "manual", require_existing_tags: bool = False, ) -> dict: """ Idempotently upsert: - Asset by content hash (create if missing) - AssetCacheState(file_path) pointing to asset_id - Optionally AssetInfo + tag links and metadata projection Returns flags and ids. """ locator = os.path.abspath(abs_path) now = utcnow() if preview_id: if not session.get(Asset, preview_id): preview_id = None out: dict[str, Any] = { "asset_created": False, "asset_updated": False, "state_created": False, "state_updated": False, "asset_info_id": None, } # 1) Asset by hash asset = ( session.execute(select(Asset).where(Asset.hash == asset_hash).limit(1)) ).scalars().first() if not asset: vals = { "hash": asset_hash, "size_bytes": int(size_bytes), "mime_type": mime_type, "created_at": now, } res = session.execute( sqlite.insert(Asset) .values(**vals) .on_conflict_do_nothing(index_elements=[Asset.hash]) ) if int(res.rowcount or 0) > 0: out["asset_created"] = True asset = ( session.execute( select(Asset).where(Asset.hash == asset_hash).limit(1) ) ).scalars().first() if not asset: raise RuntimeError("Asset row not found after upsert.") else: changed = False if asset.size_bytes != int(size_bytes) and int(size_bytes) > 0: asset.size_bytes = int(size_bytes) changed = True if mime_type and asset.mime_type != mime_type: asset.mime_type = mime_type changed = True if changed: out["asset_updated"] = True # 2) AssetCacheState upsert by file_path (unique) vals = { "asset_id": asset.id, "file_path": locator, "mtime_ns": int(mtime_ns), } ins = ( sqlite.insert(AssetCacheState) .values(**vals) .on_conflict_do_nothing(index_elements=[AssetCacheState.file_path]) ) res = session.execute(ins) if int(res.rowcount or 0) > 0: out["state_created"] = True else: upd = ( sa.update(AssetCacheState) .where(AssetCacheState.file_path == locator) .where( sa.or_( AssetCacheState.asset_id != asset.id, AssetCacheState.mtime_ns.is_(None), AssetCacheState.mtime_ns != int(mtime_ns), ) ) .values(asset_id=asset.id, mtime_ns=int(mtime_ns)) ) res2 = session.execute(upd) if int(res2.rowcount or 0) > 0: out["state_updated"] = True # 3) Optional AssetInfo + tags + metadata if info_name: try: with session.begin_nested(): info = AssetInfo( owner_id=owner_id, name=info_name, asset_id=asset.id, preview_id=preview_id, created_at=now, updated_at=now, last_access_time=now, ) session.add(info) session.flush() out["asset_info_id"] = info.id except IntegrityError: pass existing_info = ( session.execute( select(AssetInfo) .where( AssetInfo.asset_id == asset.id, AssetInfo.name == info_name, (AssetInfo.owner_id == owner_id), ) .limit(1) ) ).unique().scalar_one_or_none() if not existing_info: raise RuntimeError("Failed to update or insert AssetInfo.") if preview_id and existing_info.preview_id != preview_id: existing_info.preview_id = preview_id existing_info.updated_at = now if existing_info.last_access_time < now: existing_info.last_access_time = now session.flush() out["asset_info_id"] = existing_info.id norm = [t.strip().lower() for t in (tags or []) if (t or "").strip()] if norm and out["asset_info_id"] is not None: if not require_existing_tags: ensure_tags_exist(session, norm, tag_type="user") existing_tag_names = set( name for (name,) in (session.execute(select(Tag.name).where(Tag.name.in_(norm)))).all() ) missing = [t for t in norm if t not in existing_tag_names] if missing and require_existing_tags: raise ValueError(f"Unknown tags: {missing}") existing_links = set( tag_name for (tag_name,) in ( session.execute( select(AssetInfoTag.tag_name).where(AssetInfoTag.asset_info_id == out["asset_info_id"]) ) ).all() ) to_add = [t for t in norm if t in existing_tag_names and t not in existing_links] if to_add: session.add_all( [ AssetInfoTag( asset_info_id=out["asset_info_id"], tag_name=t, origin=tag_origin, added_at=now, ) for t in to_add ] ) session.flush() # metadata["filename"] hack if out["asset_info_id"] is not None: primary_path = pick_best_live_path(list_cache_states_by_asset_id(session, asset_id=asset.id)) computed_filename = compute_relative_filename(primary_path) if primary_path else None current_meta = existing_info.user_metadata or {} new_meta = dict(current_meta) if user_metadata is not None: for k, v in user_metadata.items(): new_meta[k] = v if computed_filename: new_meta["filename"] = computed_filename if new_meta != current_meta: replace_asset_info_metadata_projection( session, asset_info_id=out["asset_info_id"], user_metadata=new_meta, ) try: remove_missing_tag_for_asset_id(session, asset_id=asset.id) except Exception: logging.exception("Failed to clear 'missing' tag for asset %s", asset.id) return out def update_asset_info_full( session: Session, *, asset_info_id: str, name: str | None = None, tags: Sequence[str] | None = None, user_metadata: dict | None = None, tag_origin: str = "manual", asset_info_row: Any = None, ) -> AssetInfo: if not asset_info_row: info = session.get(AssetInfo, asset_info_id) if not info: raise ValueError(f"AssetInfo {asset_info_id} not found") else: info = asset_info_row touched = False if name is not None and name != info.name: info.name = name touched = True computed_filename = None try: p = pick_best_live_path(list_cache_states_by_asset_id(session, asset_id=info.asset_id)) if p: computed_filename = compute_relative_filename(p) except Exception: computed_filename = None if user_metadata is not None: new_meta = dict(user_metadata) if computed_filename: new_meta["filename"] = computed_filename replace_asset_info_metadata_projection( session, asset_info_id=asset_info_id, user_metadata=new_meta ) touched = True else: if computed_filename: current_meta = info.user_metadata or {} if current_meta.get("filename") != computed_filename: new_meta = dict(current_meta) new_meta["filename"] = computed_filename replace_asset_info_metadata_projection( session, asset_info_id=asset_info_id, user_metadata=new_meta ) touched = True if tags is not None: set_asset_info_tags( session, asset_info_id=asset_info_id, tags=tags, origin=tag_origin, ) touched = True if touched and user_metadata is None: info.updated_at = utcnow() session.flush() return info def delete_asset_info_by_id( session: Session, *, asset_info_id: str, owner_id: str, ) -> bool: stmt = sa.delete(AssetInfo).where( AssetInfo.id == asset_info_id, visible_owner_clause(owner_id), ) return int((session.execute(stmt)).rowcount or 0) > 0 def list_tags_with_usage( session: Session, prefix: str | None = None, limit: int = 100, offset: int = 0, include_zero: bool = True, order: str = "count_desc", owner_id: str = "", ) -> tuple[list[tuple[str, str, int]], int]: counts_sq = ( select( AssetInfoTag.tag_name.label("tag_name"), func.count(AssetInfoTag.asset_info_id).label("cnt"), ) .select_from(AssetInfoTag) .join(AssetInfo, AssetInfo.id == AssetInfoTag.asset_info_id) .where(visible_owner_clause(owner_id)) .group_by(AssetInfoTag.tag_name) .subquery() ) q = ( select( Tag.name, Tag.tag_type, func.coalesce(counts_sq.c.cnt, 0).label("count"), ) .select_from(Tag) .join(counts_sq, counts_sq.c.tag_name == Tag.name, isouter=True) ) if prefix: escaped, esc = escape_like_prefix(prefix.strip().lower()) q = q.where(Tag.name.like(escaped + "%", escape=esc)) if not include_zero: q = q.where(func.coalesce(counts_sq.c.cnt, 0) > 0) if order == "name_asc": q = q.order_by(Tag.name.asc()) else: q = q.order_by(func.coalesce(counts_sq.c.cnt, 0).desc(), Tag.name.asc()) total_q = select(func.count()).select_from(Tag) if prefix: escaped, esc = escape_like_prefix(prefix.strip().lower()) total_q = total_q.where(Tag.name.like(escaped + "%", escape=esc)) if not include_zero: total_q = total_q.where( Tag.name.in_(select(AssetInfoTag.tag_name).group_by(AssetInfoTag.tag_name)) ) rows = (session.execute(q.limit(limit).offset(offset))).all() total = (session.execute(total_q)).scalar_one() rows_norm = [(name, ttype, int(count or 0)) for (name, ttype, count) in rows] return rows_norm, int(total or 0) def ensure_tags_exist(session: Session, names: Iterable[str], tag_type: str = "user") -> None: wanted = normalize_tags(list(names)) if not wanted: return rows = [{"name": n, "tag_type": tag_type} for n in list(dict.fromkeys(wanted))] ins = ( sqlite.insert(Tag) .values(rows) .on_conflict_do_nothing(index_elements=[Tag.name]) ) session.execute(ins) def get_asset_tags(session: Session, *, asset_info_id: str) -> list[str]: return [ tag_name for (tag_name,) in ( session.execute( select(AssetInfoTag.tag_name).where(AssetInfoTag.asset_info_id == asset_info_id) ) ).all() ] def add_tags_to_asset_info( session: Session, *, asset_info_id: str, tags: Sequence[str], origin: str = "manual", create_if_missing: bool = True, asset_info_row: Any = None, ) -> dict: if not asset_info_row: info = session.get(AssetInfo, asset_info_id) if not info: raise ValueError(f"AssetInfo {asset_info_id} not found") norm = normalize_tags(tags) if not norm: total = get_asset_tags(session, asset_info_id=asset_info_id) return {"added": [], "already_present": [], "total_tags": total} if create_if_missing: ensure_tags_exist(session, norm, tag_type="user") current = { tag_name for (tag_name,) in ( session.execute( sa.select(AssetInfoTag.tag_name).where(AssetInfoTag.asset_info_id == asset_info_id) ) ).all() } want = set(norm) to_add = sorted(want - current) if to_add: with session.begin_nested() as nested: try: session.add_all( [ AssetInfoTag( asset_info_id=asset_info_id, tag_name=t, origin=origin, added_at=utcnow(), ) for t in to_add ] ) session.flush() except IntegrityError: nested.rollback() after = set(get_asset_tags(session, asset_info_id=asset_info_id)) return { "added": sorted(((after - current) & want)), "already_present": sorted(want & current), "total_tags": sorted(after), } def remove_tags_from_asset_info( session: Session, *, asset_info_id: str, tags: Sequence[str], ) -> dict: info = session.get(AssetInfo, asset_info_id) if not info: raise ValueError(f"AssetInfo {asset_info_id} not found") norm = normalize_tags(tags) if not norm: total = get_asset_tags(session, asset_info_id=asset_info_id) return {"removed": [], "not_present": [], "total_tags": total} existing = { tag_name for (tag_name,) in ( session.execute( sa.select(AssetInfoTag.tag_name).where(AssetInfoTag.asset_info_id == asset_info_id) ) ).all() } to_remove = sorted(set(t for t in norm if t in existing)) not_present = sorted(set(t for t in norm if t not in existing)) if to_remove: session.execute( delete(AssetInfoTag) .where( AssetInfoTag.asset_info_id == asset_info_id, AssetInfoTag.tag_name.in_(to_remove), ) ) session.flush() total = get_asset_tags(session, asset_info_id=asset_info_id) return {"removed": to_remove, "not_present": not_present, "total_tags": total} def remove_missing_tag_for_asset_id( session: Session, *, asset_id: str, ) -> None: session.execute( sa.delete(AssetInfoTag).where( AssetInfoTag.asset_info_id.in_(sa.select(AssetInfo.id).where(AssetInfo.asset_id == asset_id)), AssetInfoTag.tag_name == "missing", ) ) def set_asset_info_preview( session: Session, *, asset_info_id: str, preview_asset_id: str | None = None, ) -> None: """Set or clear preview_id and bump updated_at. Raises on unknown IDs.""" info = session.get(AssetInfo, asset_info_id) if not info: raise ValueError(f"AssetInfo {asset_info_id} not found") if preview_asset_id is None: info.preview_id = None else: # validate preview asset exists if not session.get(Asset, preview_asset_id): raise ValueError(f"Preview Asset {preview_asset_id} not found") info.preview_id = preview_asset_id info.updated_at = utcnow() session.flush()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/database/queries.py", "license": "GNU General Public License v3.0", "lines": 854, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:app/assets/database/tags.py
from typing import Iterable import sqlalchemy from sqlalchemy.orm import Session from sqlalchemy.dialects import sqlite from app.assets.helpers import normalize_tags, utcnow from app.assets.database.models import Tag, AssetInfoTag, AssetInfo def ensure_tags_exist(session: Session, names: Iterable[str], tag_type: str = "user") -> None: wanted = normalize_tags(list(names)) if not wanted: return rows = [{"name": n, "tag_type": tag_type} for n in list(dict.fromkeys(wanted))] ins = ( sqlite.insert(Tag) .values(rows) .on_conflict_do_nothing(index_elements=[Tag.name]) ) return session.execute(ins) def add_missing_tag_for_asset_id( session: Session, *, asset_id: str, origin: str = "automatic", ) -> None: select_rows = ( sqlalchemy.select( AssetInfo.id.label("asset_info_id"), sqlalchemy.literal("missing").label("tag_name"), sqlalchemy.literal(origin).label("origin"), sqlalchemy.literal(utcnow()).label("added_at"), ) .where(AssetInfo.asset_id == asset_id) .where( sqlalchemy.not_( sqlalchemy.exists().where((AssetInfoTag.asset_info_id == AssetInfo.id) & (AssetInfoTag.tag_name == "missing")) ) ) ) session.execute( sqlite.insert(AssetInfoTag) .from_select( ["asset_info_id", "tag_name", "origin", "added_at"], select_rows, ) .on_conflict_do_nothing(index_elements=[AssetInfoTag.asset_info_id, AssetInfoTag.tag_name]) ) def remove_missing_tag_for_asset_id( session: Session, *, asset_id: str, ) -> None: session.execute( sqlalchemy.delete(AssetInfoTag).where( AssetInfoTag.asset_info_id.in_(sqlalchemy.select(AssetInfo.id).where(AssetInfo.asset_id == asset_id)), AssetInfoTag.tag_name == "missing", ) )
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/database/tags.py", "license": "GNU General Public License v3.0", "lines": 56, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:app/assets/hashing.py
from blake3 import blake3 from typing import IO import os import asyncio DEFAULT_CHUNK = 8 * 1024 *1024 # 8MB # NOTE: this allows hashing different representations of a file-like object def blake3_hash( fp: str | IO[bytes], chunk_size: int = DEFAULT_CHUNK, ) -> str: """ Returns a BLAKE3 hex digest for ``fp``, which may be: - a filename (str/bytes) or PathLike - an open binary file object If ``fp`` is a file object, it must be opened in **binary** mode and support ``read``, ``seek``, and ``tell``. The function will seek to the start before reading and will attempt to restore the original position afterward. """ # duck typing to check if input is a file-like object if hasattr(fp, "read"): return _hash_file_obj(fp, chunk_size) with open(os.fspath(fp), "rb") as f: return _hash_file_obj(f, chunk_size) async def blake3_hash_async( fp: str | IO[bytes], chunk_size: int = DEFAULT_CHUNK, ) -> str: """Async wrapper for ``blake3_hash_sync``. Uses a worker thread so the event loop remains responsive. """ # If it is a path, open inside the worker thread to keep I/O off the loop. if hasattr(fp, "read"): return await asyncio.to_thread(blake3_hash, fp, chunk_size) def _worker() -> str: with open(os.fspath(fp), "rb") as f: return _hash_file_obj(f, chunk_size) return await asyncio.to_thread(_worker) def _hash_file_obj(file_obj: IO, chunk_size: int = DEFAULT_CHUNK) -> str: """ Hash an already-open binary file object by streaming in chunks. - Seeks to the beginning before reading (if supported). - Restores the original position afterward (if tell/seek are supported). """ if chunk_size <= 0: chunk_size = DEFAULT_CHUNK # in case file object is already open and not at the beginning, track so can be restored after hashing orig_pos = file_obj.tell() try: # seek to the beginning before reading if orig_pos != 0: file_obj.seek(0) h = blake3() while True: chunk = file_obj.read(chunk_size) if not chunk: break h.update(chunk) return h.hexdigest() finally: # restore original position in file object, if needed if orig_pos != 0: file_obj.seek(orig_pos)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/hashing.py", "license": "GNU General Public License v3.0", "lines": 62, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:app/assets/manager.py
import os import mimetypes import contextlib from typing import Sequence from app.database.db import create_session from app.assets.api import schemas_out, schemas_in from app.assets.database.queries import ( asset_exists_by_hash, asset_info_exists_for_asset_id, get_asset_by_hash, get_asset_info_by_id, fetch_asset_info_asset_and_tags, fetch_asset_info_and_asset, create_asset_info_for_existing_asset, touch_asset_info_by_id, update_asset_info_full, delete_asset_info_by_id, list_cache_states_by_asset_id, list_asset_infos_page, list_tags_with_usage, get_asset_tags, add_tags_to_asset_info, remove_tags_from_asset_info, pick_best_live_path, ingest_fs_asset, set_asset_info_preview, ) from app.assets.helpers import resolve_destination_from_tags, ensure_within_base from app.assets.database.models import Asset def _safe_sort_field(requested: str | None) -> str: if not requested: return "created_at" v = requested.lower() if v in {"name", "created_at", "updated_at", "size", "last_access_time"}: return v return "created_at" def _get_size_mtime_ns(path: str) -> tuple[int, int]: st = os.stat(path, follow_symlinks=True) return st.st_size, getattr(st, "st_mtime_ns", int(st.st_mtime * 1_000_000_000)) def _safe_filename(name: str | None, fallback: str) -> str: n = os.path.basename((name or "").strip() or fallback) if n: return n return fallback def asset_exists(*, asset_hash: str) -> bool: """ Check if an asset with a given hash exists in database. """ with create_session() as session: return asset_exists_by_hash(session, asset_hash=asset_hash) def list_assets( *, include_tags: Sequence[str] | None = None, exclude_tags: Sequence[str] | None = None, name_contains: str | None = None, metadata_filter: dict | None = None, limit: int = 20, offset: int = 0, sort: str = "created_at", order: str = "desc", owner_id: str = "", ) -> schemas_out.AssetsList: sort = _safe_sort_field(sort) order = "desc" if (order or "desc").lower() not in {"asc", "desc"} else order.lower() with create_session() as session: infos, tag_map, total = list_asset_infos_page( session, owner_id=owner_id, include_tags=include_tags, exclude_tags=exclude_tags, name_contains=name_contains, metadata_filter=metadata_filter, limit=limit, offset=offset, sort=sort, order=order, ) summaries: list[schemas_out.AssetSummary] = [] for info in infos: asset = info.asset tags = tag_map.get(info.id, []) summaries.append( schemas_out.AssetSummary( id=info.id, name=info.name, asset_hash=asset.hash if asset else None, size=int(asset.size_bytes) if asset else None, mime_type=asset.mime_type if asset else None, tags=tags, created_at=info.created_at, updated_at=info.updated_at, last_access_time=info.last_access_time, ) ) return schemas_out.AssetsList( assets=summaries, total=total, has_more=(offset + len(summaries)) < total, ) def get_asset( *, asset_info_id: str, owner_id: str = "", ) -> schemas_out.AssetDetail: with create_session() as session: res = fetch_asset_info_asset_and_tags(session, asset_info_id=asset_info_id, owner_id=owner_id) if not res: raise ValueError(f"AssetInfo {asset_info_id} not found") info, asset, tag_names = res preview_id = info.preview_id return schemas_out.AssetDetail( id=info.id, name=info.name, asset_hash=asset.hash if asset else None, size=int(asset.size_bytes) if asset and asset.size_bytes is not None else None, mime_type=asset.mime_type if asset else None, tags=tag_names, user_metadata=info.user_metadata or {}, preview_id=preview_id, created_at=info.created_at, last_access_time=info.last_access_time, ) def resolve_asset_content_for_download( *, asset_info_id: str, owner_id: str = "", ) -> tuple[str, str, str]: with create_session() as session: pair = fetch_asset_info_and_asset(session, asset_info_id=asset_info_id, owner_id=owner_id) if not pair: raise ValueError(f"AssetInfo {asset_info_id} not found") info, asset = pair states = list_cache_states_by_asset_id(session, asset_id=asset.id) abs_path = pick_best_live_path(states) if not abs_path: raise FileNotFoundError touch_asset_info_by_id(session, asset_info_id=asset_info_id) session.commit() ctype = asset.mime_type or mimetypes.guess_type(info.name or abs_path)[0] or "application/octet-stream" download_name = info.name or os.path.basename(abs_path) return abs_path, ctype, download_name def upload_asset_from_temp_path( spec: schemas_in.UploadAssetSpec, *, temp_path: str, client_filename: str | None = None, owner_id: str = "", expected_asset_hash: str | None = None, ) -> schemas_out.AssetCreated: """ Create new asset or update existing asset from a temporary file path. """ try: # NOTE: blake3 is not required right now, so this will fail if blake3 is not installed in local environment import app.assets.hashing as hashing digest = hashing.blake3_hash(temp_path) except Exception as e: raise RuntimeError(f"failed to hash uploaded file: {e}") asset_hash = "blake3:" + digest if expected_asset_hash and asset_hash != expected_asset_hash.strip().lower(): raise ValueError("HASH_MISMATCH") with create_session() as session: existing = get_asset_by_hash(session, asset_hash=asset_hash) if existing is not None: with contextlib.suppress(Exception): if temp_path and os.path.exists(temp_path): os.remove(temp_path) display_name = _safe_filename(spec.name or (client_filename or ""), fallback=digest) info = create_asset_info_for_existing_asset( session, asset_hash=asset_hash, name=display_name, user_metadata=spec.user_metadata or {}, tags=spec.tags or [], tag_origin="manual", owner_id=owner_id, ) tag_names = get_asset_tags(session, asset_info_id=info.id) session.commit() return schemas_out.AssetCreated( id=info.id, name=info.name, asset_hash=existing.hash, size=int(existing.size_bytes) if existing.size_bytes is not None else None, mime_type=existing.mime_type, tags=tag_names, user_metadata=info.user_metadata or {}, preview_id=info.preview_id, created_at=info.created_at, last_access_time=info.last_access_time, created_new=False, ) base_dir, subdirs = resolve_destination_from_tags(spec.tags) dest_dir = os.path.join(base_dir, *subdirs) if subdirs else base_dir os.makedirs(dest_dir, exist_ok=True) src_for_ext = (client_filename or spec.name or "").strip() _ext = os.path.splitext(os.path.basename(src_for_ext))[1] if src_for_ext else "" ext = _ext if 0 < len(_ext) <= 16 else "" hashed_basename = f"{digest}{ext}" dest_abs = os.path.abspath(os.path.join(dest_dir, hashed_basename)) ensure_within_base(dest_abs, base_dir) content_type = ( mimetypes.guess_type(os.path.basename(src_for_ext), strict=False)[0] or mimetypes.guess_type(hashed_basename, strict=False)[0] or "application/octet-stream" ) try: os.replace(temp_path, dest_abs) except Exception as e: raise RuntimeError(f"failed to move uploaded file into place: {e}") try: size_bytes, mtime_ns = _get_size_mtime_ns(dest_abs) except OSError as e: raise RuntimeError(f"failed to stat destination file: {e}") with create_session() as session: result = ingest_fs_asset( session, asset_hash=asset_hash, abs_path=dest_abs, size_bytes=size_bytes, mtime_ns=mtime_ns, mime_type=content_type, info_name=_safe_filename(spec.name or (client_filename or ""), fallback=digest), owner_id=owner_id, preview_id=None, user_metadata=spec.user_metadata or {}, tags=spec.tags, tag_origin="manual", require_existing_tags=False, ) info_id = result["asset_info_id"] if not info_id: raise RuntimeError("failed to create asset metadata") pair = fetch_asset_info_and_asset(session, asset_info_id=info_id, owner_id=owner_id) if not pair: raise RuntimeError("inconsistent DB state after ingest") info, asset = pair tag_names = get_asset_tags(session, asset_info_id=info.id) created_result = schemas_out.AssetCreated( id=info.id, name=info.name, asset_hash=asset.hash, size=int(asset.size_bytes), mime_type=asset.mime_type, tags=tag_names, user_metadata=info.user_metadata or {}, preview_id=info.preview_id, created_at=info.created_at, last_access_time=info.last_access_time, created_new=result["asset_created"], ) session.commit() return created_result def update_asset( *, asset_info_id: str, name: str | None = None, tags: list[str] | None = None, user_metadata: dict | None = None, owner_id: str = "", ) -> schemas_out.AssetUpdated: with create_session() as session: info_row = get_asset_info_by_id(session, asset_info_id=asset_info_id) if not info_row: raise ValueError(f"AssetInfo {asset_info_id} not found") if info_row.owner_id and info_row.owner_id != owner_id: raise PermissionError("not owner") info = update_asset_info_full( session, asset_info_id=asset_info_id, name=name, tags=tags, user_metadata=user_metadata, tag_origin="manual", asset_info_row=info_row, ) tag_names = get_asset_tags(session, asset_info_id=asset_info_id) result = schemas_out.AssetUpdated( id=info.id, name=info.name, asset_hash=info.asset.hash if info.asset else None, tags=tag_names, user_metadata=info.user_metadata or {}, updated_at=info.updated_at, ) session.commit() return result def set_asset_preview( *, asset_info_id: str, preview_asset_id: str | None = None, owner_id: str = "", ) -> schemas_out.AssetDetail: with create_session() as session: info_row = get_asset_info_by_id(session, asset_info_id=asset_info_id) if not info_row: raise ValueError(f"AssetInfo {asset_info_id} not found") if info_row.owner_id and info_row.owner_id != owner_id: raise PermissionError("not owner") set_asset_info_preview( session, asset_info_id=asset_info_id, preview_asset_id=preview_asset_id, ) res = fetch_asset_info_asset_and_tags(session, asset_info_id=asset_info_id, owner_id=owner_id) if not res: raise RuntimeError("State changed during preview update") info, asset, tags = res result = schemas_out.AssetDetail( id=info.id, name=info.name, asset_hash=asset.hash if asset else None, size=int(asset.size_bytes) if asset and asset.size_bytes is not None else None, mime_type=asset.mime_type if asset else None, tags=tags, user_metadata=info.user_metadata or {}, preview_id=info.preview_id, created_at=info.created_at, last_access_time=info.last_access_time, ) session.commit() return result def delete_asset_reference(*, asset_info_id: str, owner_id: str, delete_content_if_orphan: bool = True) -> bool: with create_session() as session: info_row = get_asset_info_by_id(session, asset_info_id=asset_info_id) asset_id = info_row.asset_id if info_row else None deleted = delete_asset_info_by_id(session, asset_info_id=asset_info_id, owner_id=owner_id) if not deleted: session.commit() return False if not delete_content_if_orphan or not asset_id: session.commit() return True still_exists = asset_info_exists_for_asset_id(session, asset_id=asset_id) if still_exists: session.commit() return True states = list_cache_states_by_asset_id(session, asset_id=asset_id) file_paths = [s.file_path for s in (states or []) if getattr(s, "file_path", None)] asset_row = session.get(Asset, asset_id) if asset_row is not None: session.delete(asset_row) session.commit() for p in file_paths: with contextlib.suppress(Exception): if p and os.path.isfile(p): os.remove(p) return True def create_asset_from_hash( *, hash_str: str, name: str, tags: list[str] | None = None, user_metadata: dict | None = None, owner_id: str = "", ) -> schemas_out.AssetCreated | None: canonical = hash_str.strip().lower() with create_session() as session: asset = get_asset_by_hash(session, asset_hash=canonical) if not asset: return None info = create_asset_info_for_existing_asset( session, asset_hash=canonical, name=_safe_filename(name, fallback=canonical.split(":", 1)[1]), user_metadata=user_metadata or {}, tags=tags or [], tag_origin="manual", owner_id=owner_id, ) tag_names = get_asset_tags(session, asset_info_id=info.id) result = schemas_out.AssetCreated( id=info.id, name=info.name, asset_hash=asset.hash, size=int(asset.size_bytes), mime_type=asset.mime_type, tags=tag_names, user_metadata=info.user_metadata or {}, preview_id=info.preview_id, created_at=info.created_at, last_access_time=info.last_access_time, created_new=False, ) session.commit() return result def add_tags_to_asset( *, asset_info_id: str, tags: list[str], origin: str = "manual", owner_id: str = "", ) -> schemas_out.TagsAdd: with create_session() as session: info_row = get_asset_info_by_id(session, asset_info_id=asset_info_id) if not info_row: raise ValueError(f"AssetInfo {asset_info_id} not found") if info_row.owner_id and info_row.owner_id != owner_id: raise PermissionError("not owner") data = add_tags_to_asset_info( session, asset_info_id=asset_info_id, tags=tags, origin=origin, create_if_missing=True, asset_info_row=info_row, ) session.commit() return schemas_out.TagsAdd(**data) def remove_tags_from_asset( *, asset_info_id: str, tags: list[str], owner_id: str = "", ) -> schemas_out.TagsRemove: with create_session() as session: info_row = get_asset_info_by_id(session, asset_info_id=asset_info_id) if not info_row: raise ValueError(f"AssetInfo {asset_info_id} not found") if info_row.owner_id and info_row.owner_id != owner_id: raise PermissionError("not owner") data = remove_tags_from_asset_info( session, asset_info_id=asset_info_id, tags=tags, ) session.commit() return schemas_out.TagsRemove(**data) def list_tags( prefix: str | None = None, limit: int = 100, offset: int = 0, order: str = "count_desc", include_zero: bool = True, owner_id: str = "", ) -> schemas_out.TagsList: limit = max(1, min(1000, limit)) offset = max(0, offset) with create_session() as session: rows, total = list_tags_with_usage( session, prefix=prefix, limit=limit, offset=offset, include_zero=include_zero, order=order, owner_id=owner_id, ) tags = [schemas_out.TagUsage(name=name, count=count, type=tag_type) for (name, tag_type, count) in rows] return schemas_out.TagsList(tags=tags, total=total, has_more=(offset + len(tags)) < total)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/manager.py", "license": "GNU General Public License v3.0", "lines": 450, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:app/assets/scanner.py
import contextlib import time import logging import os import sqlalchemy import folder_paths from app.database.db import create_session, dependencies_available from app.assets.helpers import ( collect_models_files, compute_relative_filename, fast_asset_file_check, get_name_and_tags_from_asset_path, list_tree,prefixes_for_root, escape_like_prefix, RootType ) from app.assets.database.tags import add_missing_tag_for_asset_id, ensure_tags_exist, remove_missing_tag_for_asset_id from app.assets.database.bulk_ops import seed_from_paths_batch from app.assets.database.models import Asset, AssetCacheState, AssetInfo def seed_assets(roots: tuple[RootType, ...], enable_logging: bool = False) -> None: """ Scan the given roots and seed the assets into the database. """ if not dependencies_available(): if enable_logging: logging.warning("Database dependencies not available, skipping assets scan") return t_start = time.perf_counter() created = 0 skipped_existing = 0 orphans_pruned = 0 paths: list[str] = [] try: existing_paths: set[str] = set() for r in roots: try: survivors: set[str] = _fast_db_consistency_pass(r, collect_existing_paths=True, update_missing_tags=True) if survivors: existing_paths.update(survivors) except Exception as e: logging.exception("fast DB scan failed for %s: %s", r, e) try: orphans_pruned = _prune_orphaned_assets(roots) except Exception as e: logging.exception("orphan pruning failed: %s", e) if "models" in roots: paths.extend(collect_models_files()) if "input" in roots: paths.extend(list_tree(folder_paths.get_input_directory())) if "output" in roots: paths.extend(list_tree(folder_paths.get_output_directory())) specs: list[dict] = [] tag_pool: set[str] = set() for p in paths: abs_p = os.path.abspath(p) if abs_p in existing_paths: skipped_existing += 1 continue try: stat_p = os.stat(abs_p, follow_symlinks=False) except OSError: continue # skip empty files if not stat_p.st_size: continue name, tags = get_name_and_tags_from_asset_path(abs_p) specs.append( { "abs_path": abs_p, "size_bytes": stat_p.st_size, "mtime_ns": getattr(stat_p, "st_mtime_ns", int(stat_p.st_mtime * 1_000_000_000)), "info_name": name, "tags": tags, "fname": compute_relative_filename(abs_p), } ) for t in tags: tag_pool.add(t) # if no file specs, nothing to do if not specs: return with create_session() as sess: if tag_pool: ensure_tags_exist(sess, tag_pool, tag_type="user") result = seed_from_paths_batch(sess, specs=specs, owner_id="") created += result["inserted_infos"] sess.commit() finally: if enable_logging: logging.info( "Assets scan(roots=%s) completed in %.3fs (created=%d, skipped_existing=%d, orphans_pruned=%d, total_seen=%d)", roots, time.perf_counter() - t_start, created, skipped_existing, orphans_pruned, len(paths), ) def _prune_orphaned_assets(roots: tuple[RootType, ...]) -> int: """Prune cache states outside configured prefixes, then delete orphaned seed assets.""" all_prefixes = [os.path.abspath(p) for r in roots for p in prefixes_for_root(r)] if not all_prefixes: return 0 def make_prefix_condition(prefix: str): base = prefix if prefix.endswith(os.sep) else prefix + os.sep escaped, esc = escape_like_prefix(base) return AssetCacheState.file_path.like(escaped + "%", escape=esc) matches_valid_prefix = sqlalchemy.or_(*[make_prefix_condition(p) for p in all_prefixes]) orphan_subq = ( sqlalchemy.select(Asset.id) .outerjoin(AssetCacheState, AssetCacheState.asset_id == Asset.id) .where(Asset.hash.is_(None), AssetCacheState.id.is_(None)) ).scalar_subquery() with create_session() as sess: sess.execute(sqlalchemy.delete(AssetCacheState).where(~matches_valid_prefix)) sess.execute(sqlalchemy.delete(AssetInfo).where(AssetInfo.asset_id.in_(orphan_subq))) result = sess.execute(sqlalchemy.delete(Asset).where(Asset.id.in_(orphan_subq))) sess.commit() return result.rowcount def _fast_db_consistency_pass( root: RootType, *, collect_existing_paths: bool = False, update_missing_tags: bool = False, ) -> set[str] | None: """Fast DB+FS pass for a root: - Toggle needs_verify per state using fast check - For hashed assets with at least one fast-ok state in this root: delete stale missing states - For seed assets with all states missing: delete Asset and its AssetInfos - Optionally add/remove 'missing' tags based on fast-ok in this root - Optionally return surviving absolute paths """ prefixes = prefixes_for_root(root) if not prefixes: return set() if collect_existing_paths else None conds = [] for p in prefixes: base = os.path.abspath(p) if not base.endswith(os.sep): base += os.sep escaped, esc = escape_like_prefix(base) conds.append(AssetCacheState.file_path.like(escaped + "%", escape=esc)) with create_session() as sess: rows = ( sess.execute( sqlalchemy.select( AssetCacheState.id, AssetCacheState.file_path, AssetCacheState.mtime_ns, AssetCacheState.needs_verify, AssetCacheState.asset_id, Asset.hash, Asset.size_bytes, ) .join(Asset, Asset.id == AssetCacheState.asset_id) .where(sqlalchemy.or_(*conds)) .order_by(AssetCacheState.asset_id.asc(), AssetCacheState.id.asc()) ) ).all() by_asset: dict[str, dict] = {} for sid, fp, mtime_db, needs_verify, aid, a_hash, a_size in rows: acc = by_asset.get(aid) if acc is None: acc = {"hash": a_hash, "size_db": int(a_size or 0), "states": []} by_asset[aid] = acc fast_ok = False try: exists = True fast_ok = fast_asset_file_check( mtime_db=mtime_db, size_db=acc["size_db"], stat_result=os.stat(fp, follow_symlinks=True), ) except FileNotFoundError: exists = False except OSError: exists = False acc["states"].append({ "sid": sid, "fp": fp, "exists": exists, "fast_ok": fast_ok, "needs_verify": bool(needs_verify), }) to_set_verify: list[int] = [] to_clear_verify: list[int] = [] stale_state_ids: list[int] = [] survivors: set[str] = set() for aid, acc in by_asset.items(): a_hash = acc["hash"] states = acc["states"] any_fast_ok = any(s["fast_ok"] for s in states) all_missing = all(not s["exists"] for s in states) for s in states: if not s["exists"]: continue if s["fast_ok"] and s["needs_verify"]: to_clear_verify.append(s["sid"]) if not s["fast_ok"] and not s["needs_verify"]: to_set_verify.append(s["sid"]) if a_hash is None: if states and all_missing: # remove seed Asset completely, if no valid AssetCache exists sess.execute(sqlalchemy.delete(AssetInfo).where(AssetInfo.asset_id == aid)) asset = sess.get(Asset, aid) if asset: sess.delete(asset) else: for s in states: if s["exists"]: survivors.add(os.path.abspath(s["fp"])) continue if any_fast_ok: # if Asset has at least one valid AssetCache record, remove any invalid AssetCache records for s in states: if not s["exists"]: stale_state_ids.append(s["sid"]) if update_missing_tags: with contextlib.suppress(Exception): remove_missing_tag_for_asset_id(sess, asset_id=aid) elif update_missing_tags: with contextlib.suppress(Exception): add_missing_tag_for_asset_id(sess, asset_id=aid, origin="automatic") for s in states: if s["exists"]: survivors.add(os.path.abspath(s["fp"])) if stale_state_ids: sess.execute(sqlalchemy.delete(AssetCacheState).where(AssetCacheState.id.in_(stale_state_ids))) if to_set_verify: sess.execute( sqlalchemy.update(AssetCacheState) .where(AssetCacheState.id.in_(to_set_verify)) .values(needs_verify=True) ) if to_clear_verify: sess.execute( sqlalchemy.update(AssetCacheState) .where(AssetCacheState.id.in_(to_clear_verify)) .values(needs_verify=False) ) sess.commit() return survivors if collect_existing_paths else None
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "app/assets/scanner.py", "license": "GNU General Public License v3.0", "lines": 236, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/lightricks/av_model.py
from typing import Tuple import torch import torch.nn as nn from comfy.ldm.lightricks.model import ( CrossAttention, FeedForward, AdaLayerNormSingle, PixArtAlphaTextProjection, LTXVModel, ) from comfy.ldm.lightricks.symmetric_patchifier import AudioPatchifier from comfy.ldm.lightricks.embeddings_connector import Embeddings1DConnector import comfy.ldm.common_dit class CompressedTimestep: """Store video timestep embeddings in compressed form using per-frame indexing.""" __slots__ = ('data', 'batch_size', 'num_frames', 'patches_per_frame', 'feature_dim') def __init__(self, tensor: torch.Tensor, patches_per_frame: int): """ tensor: [batch_size, num_tokens, feature_dim] tensor where num_tokens = num_frames * patches_per_frame patches_per_frame: Number of spatial patches per frame (height * width in latent space), or None to disable compression """ self.batch_size, num_tokens, self.feature_dim = tensor.shape # Check if compression is valid (num_tokens must be divisible by patches_per_frame) if patches_per_frame is not None and num_tokens % patches_per_frame == 0 and num_tokens >= patches_per_frame: self.patches_per_frame = patches_per_frame self.num_frames = num_tokens // patches_per_frame # Reshape to [batch, frames, patches_per_frame, feature_dim] and store one value per frame # All patches in a frame are identical, so we only keep the first one reshaped = tensor.view(self.batch_size, self.num_frames, patches_per_frame, self.feature_dim) self.data = reshaped[:, :, 0, :].contiguous() # [batch, frames, feature_dim] else: # Not divisible or too small - store directly without compression self.patches_per_frame = 1 self.num_frames = num_tokens self.data = tensor def expand(self): """Expand back to original tensor.""" if self.patches_per_frame == 1: return self.data # [batch, frames, feature_dim] -> [batch, frames, patches_per_frame, feature_dim] -> [batch, tokens, feature_dim] expanded = self.data.unsqueeze(2).expand(self.batch_size, self.num_frames, self.patches_per_frame, self.feature_dim) return expanded.reshape(self.batch_size, -1, self.feature_dim) def expand_for_computation(self, scale_shift_table: torch.Tensor, batch_size: int, indices: slice = slice(None, None)): """Compute ada values on compressed per-frame data, then expand spatially.""" num_ada_params = scale_shift_table.shape[0] # No compression - compute directly if self.patches_per_frame == 1: num_tokens = self.data.shape[1] dim_per_param = self.feature_dim // num_ada_params reshaped = self.data.reshape(batch_size, num_tokens, num_ada_params, dim_per_param)[:, :, indices, :] table_values = scale_shift_table[indices].unsqueeze(0).unsqueeze(0).to(device=self.data.device, dtype=self.data.dtype) ada_values = (table_values + reshaped).unbind(dim=2) return ada_values # Compressed: compute on per-frame data then expand spatially # Reshape: [batch, frames, feature_dim] -> [batch, frames, num_ada_params, dim_per_param] frame_reshaped = self.data.reshape(batch_size, self.num_frames, num_ada_params, -1)[:, :, indices, :] table_values = scale_shift_table[indices].unsqueeze(0).unsqueeze(0).to( device=self.data.device, dtype=self.data.dtype ) frame_ada = (table_values + frame_reshaped).unbind(dim=2) # Expand each ada parameter spatially: [batch, frames, dim] -> [batch, frames, patches, dim] -> [batch, tokens, dim] return tuple( frame_val.unsqueeze(2).expand(batch_size, self.num_frames, self.patches_per_frame, -1) .reshape(batch_size, -1, frame_val.shape[-1]) for frame_val in frame_ada ) class BasicAVTransformerBlock(nn.Module): def __init__( self, v_dim, a_dim, v_heads, a_heads, vd_head, ad_head, v_context_dim=None, a_context_dim=None, attn_precision=None, dtype=None, device=None, operations=None, ): super().__init__() self.attn_precision = attn_precision self.attn1 = CrossAttention( query_dim=v_dim, heads=v_heads, dim_head=vd_head, context_dim=None, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations, ) self.audio_attn1 = CrossAttention( query_dim=a_dim, heads=a_heads, dim_head=ad_head, context_dim=None, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations, ) self.attn2 = CrossAttention( query_dim=v_dim, context_dim=v_context_dim, heads=v_heads, dim_head=vd_head, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations, ) self.audio_attn2 = CrossAttention( query_dim=a_dim, context_dim=a_context_dim, heads=a_heads, dim_head=ad_head, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations, ) # Q: Video, K,V: Audio self.audio_to_video_attn = CrossAttention( query_dim=v_dim, context_dim=a_dim, heads=a_heads, dim_head=ad_head, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations, ) # Q: Audio, K,V: Video self.video_to_audio_attn = CrossAttention( query_dim=a_dim, context_dim=v_dim, heads=a_heads, dim_head=ad_head, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations, ) self.ff = FeedForward( v_dim, dim_out=v_dim, glu=True, dtype=dtype, device=device, operations=operations ) self.audio_ff = FeedForward( a_dim, dim_out=a_dim, glu=True, dtype=dtype, device=device, operations=operations ) self.scale_shift_table = nn.Parameter(torch.empty(6, v_dim, device=device, dtype=dtype)) self.audio_scale_shift_table = nn.Parameter( torch.empty(6, a_dim, device=device, dtype=dtype) ) self.scale_shift_table_a2v_ca_audio = nn.Parameter( torch.empty(5, a_dim, device=device, dtype=dtype) ) self.scale_shift_table_a2v_ca_video = nn.Parameter( torch.empty(5, v_dim, device=device, dtype=dtype) ) def get_ada_values( self, scale_shift_table: torch.Tensor, batch_size: int, timestep: torch.Tensor, indices: slice = slice(None, None) ): if isinstance(timestep, CompressedTimestep): return timestep.expand_for_computation(scale_shift_table, batch_size, indices) num_ada_params = scale_shift_table.shape[0] ada_values = ( scale_shift_table[indices].unsqueeze(0).unsqueeze(0).to(device=timestep.device, dtype=timestep.dtype) + timestep.reshape(batch_size, timestep.shape[1], num_ada_params, -1)[:, :, indices, :] ).unbind(dim=2) return ada_values def get_av_ca_ada_values( self, scale_shift_table: torch.Tensor, batch_size: int, scale_shift_timestep: torch.Tensor, gate_timestep: torch.Tensor, num_scale_shift_values: int = 4, ): scale_shift_ada_values = self.get_ada_values( scale_shift_table[:num_scale_shift_values, :], batch_size, scale_shift_timestep, ) gate_ada_values = self.get_ada_values( scale_shift_table[num_scale_shift_values:, :], batch_size, gate_timestep, ) return (*scale_shift_ada_values, *gate_ada_values) def forward( self, x: Tuple[torch.Tensor, torch.Tensor], v_context=None, a_context=None, attention_mask=None, v_timestep=None, a_timestep=None, v_pe=None, a_pe=None, v_cross_pe=None, a_cross_pe=None, v_cross_scale_shift_timestep=None, a_cross_scale_shift_timestep=None, v_cross_gate_timestep=None, a_cross_gate_timestep=None, transformer_options=None, self_attention_mask=None, ) -> Tuple[torch.Tensor, torch.Tensor]: run_vx = transformer_options.get("run_vx", True) run_ax = transformer_options.get("run_ax", True) vx, ax = x run_ax = run_ax and ax.numel() > 0 run_a2v = run_vx and transformer_options.get("a2v_cross_attn", True) and ax.numel() > 0 run_v2a = run_ax and transformer_options.get("v2a_cross_attn", True) # video if run_vx: # video self-attention vshift_msa, vscale_msa = (self.get_ada_values(self.scale_shift_table, vx.shape[0], v_timestep, slice(0, 2))) norm_vx = comfy.ldm.common_dit.rms_norm(vx) * (1 + vscale_msa) + vshift_msa del vshift_msa, vscale_msa attn1_out = self.attn1(norm_vx, pe=v_pe, mask=self_attention_mask, transformer_options=transformer_options) del norm_vx # video cross-attention vgate_msa = self.get_ada_values(self.scale_shift_table, vx.shape[0], v_timestep, slice(2, 3))[0] vx.addcmul_(attn1_out, vgate_msa) del vgate_msa, attn1_out vx.add_(self.attn2(comfy.ldm.common_dit.rms_norm(vx), context=v_context, mask=attention_mask, transformer_options=transformer_options)) # audio if run_ax: # audio self-attention ashift_msa, ascale_msa = (self.get_ada_values(self.audio_scale_shift_table, ax.shape[0], a_timestep, slice(0, 2))) norm_ax = comfy.ldm.common_dit.rms_norm(ax) * (1 + ascale_msa) + ashift_msa del ashift_msa, ascale_msa attn1_out = self.audio_attn1(norm_ax, pe=a_pe, transformer_options=transformer_options) del norm_ax # audio cross-attention agate_msa = self.get_ada_values(self.audio_scale_shift_table, ax.shape[0], a_timestep, slice(2, 3))[0] ax.addcmul_(attn1_out, agate_msa) del agate_msa, attn1_out ax.add_(self.audio_attn2(comfy.ldm.common_dit.rms_norm(ax), context=a_context, mask=attention_mask, transformer_options=transformer_options)) # video - audio cross attention. if run_a2v or run_v2a: vx_norm3 = comfy.ldm.common_dit.rms_norm(vx) ax_norm3 = comfy.ldm.common_dit.rms_norm(ax) # audio to video cross attention if run_a2v: scale_ca_audio_hidden_states_a2v, shift_ca_audio_hidden_states_a2v = self.get_ada_values( self.scale_shift_table_a2v_ca_audio[:4, :], ax.shape[0], a_cross_scale_shift_timestep)[:2] scale_ca_video_hidden_states_a2v_v, shift_ca_video_hidden_states_a2v_v = self.get_ada_values( self.scale_shift_table_a2v_ca_video[:4, :], vx.shape[0], v_cross_scale_shift_timestep)[:2] vx_scaled = vx_norm3 * (1 + scale_ca_video_hidden_states_a2v_v) + shift_ca_video_hidden_states_a2v_v ax_scaled = ax_norm3 * (1 + scale_ca_audio_hidden_states_a2v) + shift_ca_audio_hidden_states_a2v del scale_ca_video_hidden_states_a2v_v, shift_ca_video_hidden_states_a2v_v, scale_ca_audio_hidden_states_a2v, shift_ca_audio_hidden_states_a2v a2v_out = self.audio_to_video_attn(vx_scaled, context=ax_scaled, pe=v_cross_pe, k_pe=a_cross_pe, transformer_options=transformer_options) del vx_scaled, ax_scaled gate_out_a2v = self.get_ada_values(self.scale_shift_table_a2v_ca_video[4:, :], vx.shape[0], v_cross_gate_timestep)[0] vx.addcmul_(a2v_out, gate_out_a2v) del gate_out_a2v, a2v_out # video to audio cross attention if run_v2a: scale_ca_audio_hidden_states_v2a, shift_ca_audio_hidden_states_v2a = self.get_ada_values( self.scale_shift_table_a2v_ca_audio[:4, :], ax.shape[0], a_cross_scale_shift_timestep)[2:4] scale_ca_video_hidden_states_v2a, shift_ca_video_hidden_states_v2a = self.get_ada_values( self.scale_shift_table_a2v_ca_video[:4, :], vx.shape[0], v_cross_scale_shift_timestep)[2:4] ax_scaled = ax_norm3 * (1 + scale_ca_audio_hidden_states_v2a) + shift_ca_audio_hidden_states_v2a vx_scaled = vx_norm3 * (1 + scale_ca_video_hidden_states_v2a) + shift_ca_video_hidden_states_v2a del scale_ca_video_hidden_states_v2a, shift_ca_video_hidden_states_v2a, scale_ca_audio_hidden_states_v2a, shift_ca_audio_hidden_states_v2a v2a_out = self.video_to_audio_attn(ax_scaled, context=vx_scaled, pe=a_cross_pe, k_pe=v_cross_pe, transformer_options=transformer_options) del ax_scaled, vx_scaled gate_out_v2a = self.get_ada_values(self.scale_shift_table_a2v_ca_audio[4:, :], ax.shape[0], a_cross_gate_timestep)[0] ax.addcmul_(v2a_out, gate_out_v2a) del gate_out_v2a, v2a_out del vx_norm3, ax_norm3 # video feedforward if run_vx: vshift_mlp, vscale_mlp = self.get_ada_values(self.scale_shift_table, vx.shape[0], v_timestep, slice(3, 5)) vx_scaled = comfy.ldm.common_dit.rms_norm(vx) * (1 + vscale_mlp) + vshift_mlp del vshift_mlp, vscale_mlp ff_out = self.ff(vx_scaled) del vx_scaled vgate_mlp = self.get_ada_values(self.scale_shift_table, vx.shape[0], v_timestep, slice(5, 6))[0] vx.addcmul_(ff_out, vgate_mlp) del vgate_mlp, ff_out # audio feedforward if run_ax: ashift_mlp, ascale_mlp = self.get_ada_values(self.audio_scale_shift_table, ax.shape[0], a_timestep, slice(3, 5)) ax_scaled = comfy.ldm.common_dit.rms_norm(ax) * (1 + ascale_mlp) + ashift_mlp del ashift_mlp, ascale_mlp ff_out = self.audio_ff(ax_scaled) del ax_scaled agate_mlp = self.get_ada_values(self.audio_scale_shift_table, ax.shape[0], a_timestep, slice(5, 6))[0] ax.addcmul_(ff_out, agate_mlp) del agate_mlp, ff_out return vx, ax class LTXAVModel(LTXVModel): """LTXAV model for audio-video generation.""" def __init__( self, in_channels=128, audio_in_channels=128, cross_attention_dim=4096, audio_cross_attention_dim=2048, attention_head_dim=128, audio_attention_head_dim=64, num_attention_heads=32, audio_num_attention_heads=32, caption_channels=3840, num_layers=48, positional_embedding_theta=10000.0, positional_embedding_max_pos=[20, 2048, 2048], audio_positional_embedding_max_pos=[20], causal_temporal_positioning=False, vae_scale_factors=(8, 32, 32), use_middle_indices_grid=False, timestep_scale_multiplier=1000.0, av_ca_timestep_scale_multiplier=1.0, dtype=None, device=None, operations=None, **kwargs, ): # Store audio-specific parameters self.audio_in_channels = audio_in_channels self.audio_cross_attention_dim = audio_cross_attention_dim self.audio_attention_head_dim = audio_attention_head_dim self.audio_num_attention_heads = audio_num_attention_heads self.audio_positional_embedding_max_pos = audio_positional_embedding_max_pos # Calculate audio dimensions self.audio_inner_dim = audio_num_attention_heads * audio_attention_head_dim self.audio_out_channels = audio_in_channels # Audio-specific constants self.num_audio_channels = 8 self.audio_frequency_bins = 16 self.av_ca_timestep_scale_multiplier = av_ca_timestep_scale_multiplier super().__init__( in_channels=in_channels, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, caption_channels=caption_channels, num_layers=num_layers, positional_embedding_theta=positional_embedding_theta, positional_embedding_max_pos=positional_embedding_max_pos, causal_temporal_positioning=causal_temporal_positioning, vae_scale_factors=vae_scale_factors, use_middle_indices_grid=use_middle_indices_grid, timestep_scale_multiplier=timestep_scale_multiplier, dtype=dtype, device=device, operations=operations, **kwargs, ) def _init_model_components(self, device, dtype, **kwargs): """Initialize LTXAV-specific components.""" # Audio-specific projections self.audio_patchify_proj = self.operations.Linear( self.audio_in_channels, self.audio_inner_dim, bias=True, dtype=dtype, device=device ) # Audio-specific AdaLN self.audio_adaln_single = AdaLayerNormSingle( self.audio_inner_dim, use_additional_conditions=False, dtype=dtype, device=device, operations=self.operations, ) num_scale_shift_values = 4 self.av_ca_video_scale_shift_adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=False, embedding_coefficient=num_scale_shift_values, dtype=dtype, device=device, operations=self.operations, ) self.av_ca_a2v_gate_adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=False, embedding_coefficient=1, dtype=dtype, device=device, operations=self.operations, ) self.av_ca_audio_scale_shift_adaln_single = AdaLayerNormSingle( self.audio_inner_dim, use_additional_conditions=False, embedding_coefficient=num_scale_shift_values, dtype=dtype, device=device, operations=self.operations, ) self.av_ca_v2a_gate_adaln_single = AdaLayerNormSingle( self.audio_inner_dim, use_additional_conditions=False, embedding_coefficient=1, dtype=dtype, device=device, operations=self.operations, ) # Audio caption projection self.audio_caption_projection = PixArtAlphaTextProjection( in_features=self.caption_channels, hidden_size=self.audio_inner_dim, dtype=dtype, device=device, operations=self.operations, ) self.audio_embeddings_connector = Embeddings1DConnector( split_rope=True, double_precision_rope=True, dtype=dtype, device=device, operations=self.operations, ) self.video_embeddings_connector = Embeddings1DConnector( split_rope=True, double_precision_rope=True, dtype=dtype, device=device, operations=self.operations, ) def preprocess_text_embeds(self, context): if context.shape[-1] == self.caption_channels * 2: return context out_vid = self.video_embeddings_connector(context)[0] out_audio = self.audio_embeddings_connector(context)[0] return torch.concat((out_vid, out_audio), dim=-1) def _init_transformer_blocks(self, device, dtype, **kwargs): """Initialize transformer blocks for LTXAV.""" self.transformer_blocks = nn.ModuleList( [ BasicAVTransformerBlock( v_dim=self.inner_dim, a_dim=self.audio_inner_dim, v_heads=self.num_attention_heads, a_heads=self.audio_num_attention_heads, vd_head=self.attention_head_dim, ad_head=self.audio_attention_head_dim, v_context_dim=self.cross_attention_dim, a_context_dim=self.audio_cross_attention_dim, dtype=dtype, device=device, operations=self.operations, ) for _ in range(self.num_layers) ] ) def _init_output_components(self, device, dtype): """Initialize output components for LTXAV.""" # Video output components super()._init_output_components(device, dtype) # Audio output components self.audio_scale_shift_table = nn.Parameter( torch.empty(2, self.audio_inner_dim, dtype=dtype, device=device) ) self.audio_norm_out = self.operations.LayerNorm( self.audio_inner_dim, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device ) self.audio_proj_out = self.operations.Linear( self.audio_inner_dim, self.audio_out_channels, dtype=dtype, device=device ) self.a_patchifier = AudioPatchifier(1, start_end=True) def separate_audio_and_video_latents(self, x, audio_length): """Separate audio and video latents from combined input.""" # vx = x[:, : self.in_channels] # ax = x[:, self.in_channels :] # # ax = ax.reshape(ax.shape[0], -1) # ax = ax[:, : audio_length * self.num_audio_channels * self.audio_frequency_bins] # # ax = ax.reshape( # ax.shape[0], self.num_audio_channels, audio_length, self.audio_frequency_bins # ) vx = x[0] ax = x[1] if len(x) > 1 else torch.zeros( (vx.shape[0], self.num_audio_channels, 0, self.audio_frequency_bins), device=vx.device, dtype=vx.dtype ) return vx, ax def recombine_audio_and_video_latents(self, vx, ax, target_shape=None): if ax.numel() == 0: return vx else: return [vx, ax] """Recombine audio and video latents for output.""" # if ax.device != vx.device or ax.dtype != vx.dtype: # logging.warning("Audio and video latents are on different devices or dtypes.") # ax = ax.to(device=vx.device, dtype=vx.dtype) # logging.warning(f"Audio audio latent moved to device: {ax.device}, dtype: {ax.dtype}") # # ax = ax.reshape(ax.shape[0], -1) # # pad to f x h x w of the video latents # divisor = vx.shape[-1] * vx.shape[-2] * vx.shape[-3] # if target_shape is None: # repetitions = math.ceil(ax.shape[-1] / divisor) # else: # repetitions = target_shape[1] - vx.shape[1] # padded_len = repetitions * divisor # ax = F.pad(ax, (0, padded_len - ax.shape[-1])) # ax = ax.reshape(ax.shape[0], -1, vx.shape[-3], vx.shape[-2], vx.shape[-1]) # return torch.cat([vx, ax], dim=1) def _process_input(self, x, keyframe_idxs, denoise_mask, **kwargs): """Process input for LTXAV - separate audio and video, then patchify.""" audio_length = kwargs.get("audio_length", 0) # Separate audio and video latents vx, ax = self.separate_audio_and_video_latents(x, audio_length) has_spatial_mask = False if denoise_mask is not None: # check if any frame has spatial variation (inpainting) for frame_idx in range(denoise_mask.shape[2]): frame_mask = denoise_mask[0, 0, frame_idx] if frame_mask.numel() > 0 and frame_mask.min() != frame_mask.max(): has_spatial_mask = True break [vx, v_pixel_coords, additional_args] = super()._process_input( vx, keyframe_idxs, denoise_mask, **kwargs ) additional_args["has_spatial_mask"] = has_spatial_mask ax, a_latent_coords = self.a_patchifier.patchify(ax) ax = self.audio_patchify_proj(ax) # additional_args.update({"av_orig_shape": list(x.shape)}) return [vx, ax], [v_pixel_coords, a_latent_coords], additional_args def _prepare_timestep(self, timestep, batch_size, hidden_dtype, **kwargs): """Prepare timestep embeddings.""" # TODO: some code reuse is needed here. grid_mask = kwargs.get("grid_mask", None) if grid_mask is not None: timestep = timestep[:, grid_mask] timestep_scaled = timestep * self.timestep_scale_multiplier v_timestep, v_embedded_timestep = self.adaln_single( timestep_scaled.flatten(), {"resolution": None, "aspect_ratio": None}, batch_size=batch_size, hidden_dtype=hidden_dtype, ) # Calculate patches_per_frame from orig_shape: [batch, channels, frames, height, width] # Video tokens are arranged as (frames * height * width), so patches_per_frame = height * width orig_shape = kwargs.get("orig_shape") has_spatial_mask = kwargs.get("has_spatial_mask", None) v_patches_per_frame = None if not has_spatial_mask and orig_shape is not None and len(orig_shape) == 5: # orig_shape[3] = height, orig_shape[4] = width (in latent space) v_patches_per_frame = orig_shape[3] * orig_shape[4] # Reshape to [batch_size, num_tokens, dim] and compress for storage v_timestep = CompressedTimestep(v_timestep.view(batch_size, -1, v_timestep.shape[-1]), v_patches_per_frame) v_embedded_timestep = CompressedTimestep(v_embedded_timestep.view(batch_size, -1, v_embedded_timestep.shape[-1]), v_patches_per_frame) # Prepare audio timestep a_timestep = kwargs.get("a_timestep") if a_timestep is not None: a_timestep_scaled = a_timestep * self.timestep_scale_multiplier a_timestep_flat = a_timestep_scaled.flatten() timestep_flat = timestep_scaled.flatten() av_ca_factor = self.av_ca_timestep_scale_multiplier / self.timestep_scale_multiplier # Cross-attention timesteps - compress these too av_ca_audio_scale_shift_timestep, _ = self.av_ca_audio_scale_shift_adaln_single( a_timestep_flat, {"resolution": None, "aspect_ratio": None}, batch_size=batch_size, hidden_dtype=hidden_dtype, ) av_ca_video_scale_shift_timestep, _ = self.av_ca_video_scale_shift_adaln_single( timestep_flat, {"resolution": None, "aspect_ratio": None}, batch_size=batch_size, hidden_dtype=hidden_dtype, ) av_ca_a2v_gate_noise_timestep, _ = self.av_ca_a2v_gate_adaln_single( timestep_flat * av_ca_factor, {"resolution": None, "aspect_ratio": None}, batch_size=batch_size, hidden_dtype=hidden_dtype, ) av_ca_v2a_gate_noise_timestep, _ = self.av_ca_v2a_gate_adaln_single( a_timestep_flat * av_ca_factor, {"resolution": None, "aspect_ratio": None}, batch_size=batch_size, hidden_dtype=hidden_dtype, ) # Compress cross-attention timesteps (only video side, audio is too small to benefit) # v_patches_per_frame is None for spatial masks, set for temporal masks or no mask cross_av_timestep_ss = [ av_ca_audio_scale_shift_timestep.view(batch_size, -1, av_ca_audio_scale_shift_timestep.shape[-1]), CompressedTimestep(av_ca_video_scale_shift_timestep.view(batch_size, -1, av_ca_video_scale_shift_timestep.shape[-1]), v_patches_per_frame), # video - compressed if possible CompressedTimestep(av_ca_a2v_gate_noise_timestep.view(batch_size, -1, av_ca_a2v_gate_noise_timestep.shape[-1]), v_patches_per_frame), # video - compressed if possible av_ca_v2a_gate_noise_timestep.view(batch_size, -1, av_ca_v2a_gate_noise_timestep.shape[-1]), ] a_timestep, a_embedded_timestep = self.audio_adaln_single( a_timestep_flat, {"resolution": None, "aspect_ratio": None}, batch_size=batch_size, hidden_dtype=hidden_dtype, ) # Audio timesteps a_timestep = a_timestep.view(batch_size, -1, a_timestep.shape[-1]) a_embedded_timestep = a_embedded_timestep.view(batch_size, -1, a_embedded_timestep.shape[-1]) else: a_timestep = timestep_scaled a_embedded_timestep = kwargs.get("embedded_timestep") cross_av_timestep_ss = [] return [v_timestep, a_timestep, cross_av_timestep_ss], [ v_embedded_timestep, a_embedded_timestep, ] def _prepare_context(self, context, batch_size, x, attention_mask=None): vx = x[0] ax = x[1] v_context, a_context = torch.split( context, int(context.shape[-1] / 2), len(context.shape) - 1 ) v_context, attention_mask = super()._prepare_context( v_context, batch_size, vx, attention_mask ) if self.audio_caption_projection is not None: a_context = self.audio_caption_projection(a_context) a_context = a_context.view(batch_size, -1, ax.shape[-1]) return [v_context, a_context], attention_mask def _prepare_positional_embeddings(self, pixel_coords, frame_rate, x_dtype): v_pixel_coords = pixel_coords[0] v_pe = super()._prepare_positional_embeddings(v_pixel_coords, frame_rate, x_dtype) a_latent_coords = pixel_coords[1] a_pe = self._precompute_freqs_cis( a_latent_coords, dim=self.audio_inner_dim, out_dtype=x_dtype, max_pos=self.audio_positional_embedding_max_pos, use_middle_indices_grid=self.use_middle_indices_grid, num_attention_heads=self.audio_num_attention_heads, ) # calculate positional embeddings for the middle of the token duration, to use in av cross attention layers. max_pos = max( self.positional_embedding_max_pos[0], self.audio_positional_embedding_max_pos[0] ) v_pixel_coords = v_pixel_coords.to(torch.float32) v_pixel_coords[:, 0] = v_pixel_coords[:, 0] * (1.0 / frame_rate) av_cross_video_freq_cis = self._precompute_freqs_cis( v_pixel_coords[:, 0:1, :], dim=self.audio_cross_attention_dim, out_dtype=x_dtype, max_pos=[max_pos], use_middle_indices_grid=True, num_attention_heads=self.audio_num_attention_heads, ) av_cross_audio_freq_cis = self._precompute_freqs_cis( a_latent_coords[:, 0:1, :], dim=self.audio_cross_attention_dim, out_dtype=x_dtype, max_pos=[max_pos], use_middle_indices_grid=True, num_attention_heads=self.audio_num_attention_heads, ) return [(v_pe, av_cross_video_freq_cis), (a_pe, av_cross_audio_freq_cis)] def _process_transformer_blocks( self, x, context, attention_mask, timestep, pe, transformer_options={}, self_attention_mask=None, **kwargs ): vx = x[0] ax = x[1] v_context = context[0] a_context = context[1] v_timestep = timestep[0] a_timestep = timestep[1] v_pe, av_cross_video_freq_cis = pe[0] a_pe, av_cross_audio_freq_cis = pe[1] ( av_ca_audio_scale_shift_timestep, av_ca_video_scale_shift_timestep, av_ca_a2v_gate_noise_timestep, av_ca_v2a_gate_noise_timestep, ) = timestep[2] """Process transformer blocks for LTXAV.""" patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) # Process transformer blocks for i, block in enumerate(self.transformer_blocks): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} out["img"] = block( args["img"], v_context=args["v_context"], a_context=args["a_context"], attention_mask=args["attention_mask"], v_timestep=args["v_timestep"], a_timestep=args["a_timestep"], v_pe=args["v_pe"], a_pe=args["a_pe"], v_cross_pe=args["v_cross_pe"], a_cross_pe=args["a_cross_pe"], v_cross_scale_shift_timestep=args["v_cross_scale_shift_timestep"], a_cross_scale_shift_timestep=args["a_cross_scale_shift_timestep"], v_cross_gate_timestep=args["v_cross_gate_timestep"], a_cross_gate_timestep=args["a_cross_gate_timestep"], transformer_options=args["transformer_options"], self_attention_mask=args.get("self_attention_mask"), ) return out out = blocks_replace[("double_block", i)]( { "img": (vx, ax), "v_context": v_context, "a_context": a_context, "attention_mask": attention_mask, "v_timestep": v_timestep, "a_timestep": a_timestep, "v_pe": v_pe, "a_pe": a_pe, "v_cross_pe": av_cross_video_freq_cis, "a_cross_pe": av_cross_audio_freq_cis, "v_cross_scale_shift_timestep": av_ca_video_scale_shift_timestep, "a_cross_scale_shift_timestep": av_ca_audio_scale_shift_timestep, "v_cross_gate_timestep": av_ca_a2v_gate_noise_timestep, "a_cross_gate_timestep": av_ca_v2a_gate_noise_timestep, "transformer_options": transformer_options, "self_attention_mask": self_attention_mask, }, {"original_block": block_wrap}, ) vx, ax = out["img"] else: vx, ax = block( (vx, ax), v_context=v_context, a_context=a_context, attention_mask=attention_mask, v_timestep=v_timestep, a_timestep=a_timestep, v_pe=v_pe, a_pe=a_pe, v_cross_pe=av_cross_video_freq_cis, a_cross_pe=av_cross_audio_freq_cis, v_cross_scale_shift_timestep=av_ca_video_scale_shift_timestep, a_cross_scale_shift_timestep=av_ca_audio_scale_shift_timestep, v_cross_gate_timestep=av_ca_a2v_gate_noise_timestep, a_cross_gate_timestep=av_ca_v2a_gate_noise_timestep, transformer_options=transformer_options, self_attention_mask=self_attention_mask, ) return [vx, ax] def _process_output(self, x, embedded_timestep, keyframe_idxs, **kwargs): vx = x[0] ax = x[1] v_embedded_timestep = embedded_timestep[0] a_embedded_timestep = embedded_timestep[1] # Expand compressed video timestep if needed if isinstance(v_embedded_timestep, CompressedTimestep): v_embedded_timestep = v_embedded_timestep.expand() vx = super()._process_output(vx, v_embedded_timestep, keyframe_idxs, **kwargs) # Process audio output a_scale_shift_values = ( self.audio_scale_shift_table[None, None].to(device=a_embedded_timestep.device, dtype=a_embedded_timestep.dtype) + a_embedded_timestep[:, :, None] ) a_shift, a_scale = a_scale_shift_values[:, :, 0], a_scale_shift_values[:, :, 1] ax = self.audio_norm_out(ax) ax = ax * (1 + a_scale) + a_shift ax = self.audio_proj_out(ax) # Unpatchify audio ax = self.a_patchifier.unpatchify( ax, channels=self.num_audio_channels, freq=self.audio_frequency_bins ) # Recombine audio and video original_shape = kwargs.get("av_orig_shape") return self.recombine_audio_and_video_latents(vx, ax, original_shape) def forward( self, x, timestep, context, attention_mask=None, frame_rate=25, transformer_options={}, keyframe_idxs=None, **kwargs, ): """ Forward pass for LTXAV model. Args: x: Combined audio-video input tensor timestep: Tuple of (video_timestep, audio_timestep) or single timestep context: Context tensor (e.g., text embeddings) attention_mask: Attention mask tensor frame_rate: Frame rate for temporal processing transformer_options: Additional options for transformer blocks keyframe_idxs: Keyframe indices for temporal processing **kwargs: Additional keyword arguments including audio_length Returns: Combined audio-video output tensor """ # Handle timestep format if isinstance(timestep, (tuple, list)) and len(timestep) == 2: v_timestep, a_timestep = timestep kwargs["a_timestep"] = a_timestep timestep = v_timestep else: kwargs["a_timestep"] = timestep # Call parent forward method return super().forward( x, timestep, context, attention_mask, frame_rate, transformer_options, keyframe_idxs, **kwargs, )
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/lightricks/av_model.py", "license": "GNU General Public License v3.0", "lines": 794, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/lightricks/embeddings_connector.py
import math from typing import Optional import comfy.ldm.common_dit import torch from comfy.ldm.lightricks.model import ( CrossAttention, FeedForward, generate_freq_grid_np, interleaved_freqs_cis, split_freqs_cis, ) from torch import nn class BasicTransformerBlock1D(nn.Module): r""" A basic Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. attention_bias (: obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. upcast_attention (`bool`, *optional*): Whether to upcast the attention computation to float32. This is useful for mixed precision training. norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. standardization_norm (`str`, *optional*, defaults to `"layer_norm"`): The type of pre-normalization to use. Can be `"layer_norm"` or `"rms_norm"`. norm_eps (`float`, *optional*, defaults to 1e-5): Epsilon value for normalization layers. qk_norm (`str`, *optional*, defaults to None): Set to 'layer_norm' or `rms_norm` to perform query and key normalization. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer. ff_inner_dim (`int`, *optional*): Dimension of the inner feed-forward layer. If not provided, defaults to `dim * 4`. ff_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the feed-forward layer. attention_out_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the attention output layer. use_rope (`bool`, *optional*, defaults to `False`): Whether to use Rotary Position Embeddings (RoPE). ffn_dim_mult (`int`, *optional*, defaults to 4): Multiplier for the inner dimension of the feed-forward layer. """ def __init__( self, dim, n_heads, d_head, context_dim=None, attn_precision=None, dtype=None, device=None, operations=None, ): super().__init__() # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.attn1 = CrossAttention( query_dim=dim, heads=n_heads, dim_head=d_head, context_dim=None, dtype=dtype, device=device, operations=operations, ) # 3. Feed-forward self.ff = FeedForward( dim, dim_out=dim, glu=True, dtype=dtype, device=device, operations=operations, ) def forward(self, hidden_states, attention_mask=None, pe=None) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Normalization Before Self-Attention norm_hidden_states = comfy.ldm.common_dit.rms_norm(hidden_states) norm_hidden_states = norm_hidden_states.squeeze(1) # 2. Self-Attention attn_output = self.attn1(norm_hidden_states, mask=attention_mask, pe=pe) hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) # 3. Normalization before Feed-Forward norm_hidden_states = comfy.ldm.common_dit.rms_norm(hidden_states) # 4. Feed-forward ff_output = self.ff(norm_hidden_states) hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class Embeddings1DConnector(nn.Module): _supports_gradient_checkpointing = True def __init__( self, in_channels=128, cross_attention_dim=2048, attention_head_dim=128, num_attention_heads=30, num_layers=2, positional_embedding_theta=10000.0, positional_embedding_max_pos=[4096], causal_temporal_positioning=False, num_learnable_registers: Optional[int] = 128, dtype=None, device=None, operations=None, split_rope=False, double_precision_rope=False, **kwargs, ): super().__init__() self.dtype = dtype self.out_channels = in_channels self.num_attention_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim self.causal_temporal_positioning = causal_temporal_positioning self.positional_embedding_theta = positional_embedding_theta self.positional_embedding_max_pos = positional_embedding_max_pos self.split_rope = split_rope self.double_precision_rope = double_precision_rope self.transformer_1d_blocks = nn.ModuleList( [ BasicTransformerBlock1D( self.inner_dim, num_attention_heads, attention_head_dim, context_dim=cross_attention_dim, dtype=dtype, device=device, operations=operations, ) for _ in range(num_layers) ] ) inner_dim = num_attention_heads * attention_head_dim self.num_learnable_registers = num_learnable_registers if self.num_learnable_registers: self.learnable_registers = nn.Parameter( torch.empty( self.num_learnable_registers, inner_dim, dtype=dtype, device=device ) ) def get_fractional_positions(self, indices_grid): fractional_positions = torch.stack( [ indices_grid[:, i] / self.positional_embedding_max_pos[i] for i in range(1) ], dim=-1, ) return fractional_positions def precompute_freqs(self, indices_grid, spacing): source_dtype = indices_grid.dtype dtype = ( torch.float32 if source_dtype in (torch.bfloat16, torch.float16) else source_dtype ) fractional_positions = self.get_fractional_positions(indices_grid) indices = ( generate_freq_grid_np( self.positional_embedding_theta, indices_grid.shape[1], self.inner_dim, ) if self.double_precision_rope else self.generate_freq_grid(spacing, dtype, fractional_positions.device) ).to(device=fractional_positions.device) if spacing == "exp_2": freqs = ( (indices * fractional_positions.unsqueeze(-1)) .transpose(-1, -2) .flatten(2) ) else: freqs = ( (indices * (fractional_positions.unsqueeze(-1) * 2 - 1)) .transpose(-1, -2) .flatten(2) ) return freqs def generate_freq_grid(self, spacing, dtype, device): dim = self.inner_dim theta = self.positional_embedding_theta n_pos_dims = 1 n_elem = 2 * n_pos_dims # 2 for cos and sin e.g. x 3 = 6 start = 1 end = theta if spacing == "exp": indices = theta ** (torch.arange(0, dim, n_elem, device="cpu", dtype=torch.float32) / (dim - n_elem)) indices = indices.to(dtype=dtype, device=device) elif spacing == "exp_2": indices = 1.0 / theta ** (torch.arange(0, dim, n_elem, device=device) / dim) indices = indices.to(dtype=dtype) elif spacing == "linear": indices = torch.linspace( start, end, dim // n_elem, device=device, dtype=dtype ) elif spacing == "sqrt": indices = torch.linspace( start**2, end**2, dim // n_elem, device=device, dtype=dtype ).sqrt() indices = indices * math.pi / 2 return indices def precompute_freqs_cis(self, indices_grid, spacing="exp", out_dtype=None): dim = self.inner_dim n_elem = 2 # 2 because of cos and sin freqs = self.precompute_freqs(indices_grid, spacing) if self.split_rope: expected_freqs = dim // 2 current_freqs = freqs.shape[-1] pad_size = expected_freqs - current_freqs cos_freq, sin_freq = split_freqs_cis( freqs, pad_size, self.num_attention_heads ) else: cos_freq, sin_freq = interleaved_freqs_cis(freqs, dim % n_elem) return cos_freq.to(dtype=out_dtype), sin_freq.to(dtype=out_dtype), self.split_rope def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ): """ The [`Transformer2DModel`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. indices_grid (`torch.LongTensor` of shape `(batch size, 3, num latent pixels)`): attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input if self.num_learnable_registers: num_registers_duplications = math.ceil( max(1024, hidden_states.shape[1]) / self.num_learnable_registers ) learnable_registers = torch.tile( self.learnable_registers.to(hidden_states), (num_registers_duplications, 1) ) hidden_states = torch.cat((hidden_states, learnable_registers[hidden_states.shape[1]:].unsqueeze(0).repeat(hidden_states.shape[0], 1, 1)), dim=1) if attention_mask is not None: attention_mask = torch.zeros([1, 1, 1, hidden_states.shape[1]], dtype=attention_mask.dtype, device=attention_mask.device) indices_grid = torch.arange( hidden_states.shape[1], dtype=torch.float32, device=hidden_states.device ) indices_grid = indices_grid[None, None, :] freqs_cis = self.precompute_freqs_cis(indices_grid, out_dtype=hidden_states.dtype) # 2. Blocks for block_idx, block in enumerate(self.transformer_1d_blocks): hidden_states = block( hidden_states, attention_mask=attention_mask, pe=freqs_cis ) # 3. Output # if self.output_scale is not None: # hidden_states = hidden_states / self.output_scale hidden_states = comfy.ldm.common_dit.rms_norm(hidden_states) return hidden_states, attention_mask
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/lightricks/embeddings_connector.py", "license": "GNU General Public License v3.0", "lines": 262, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/lightricks/latent_upsampler.py
from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange def _rational_for_scale(scale: float) -> Tuple[int, int]: mapping = {0.75: (3, 4), 1.5: (3, 2), 2.0: (2, 1), 4.0: (4, 1)} if float(scale) not in mapping: raise ValueError( f"Unsupported spatial_scale {scale}. Choose from {list(mapping.keys())}" ) return mapping[float(scale)] class PixelShuffleND(nn.Module): def __init__(self, dims, upscale_factors=(2, 2, 2)): super().__init__() assert dims in [1, 2, 3], "dims must be 1, 2, or 3" self.dims = dims self.upscale_factors = upscale_factors def forward(self, x): if self.dims == 3: return rearrange( x, "b (c p1 p2 p3) d h w -> b c (d p1) (h p2) (w p3)", p1=self.upscale_factors[0], p2=self.upscale_factors[1], p3=self.upscale_factors[2], ) elif self.dims == 2: return rearrange( x, "b (c p1 p2) h w -> b c (h p1) (w p2)", p1=self.upscale_factors[0], p2=self.upscale_factors[1], ) elif self.dims == 1: return rearrange( x, "b (c p1) f h w -> b c (f p1) h w", p1=self.upscale_factors[0], ) class BlurDownsample(nn.Module): """ Anti-aliased spatial downsampling by integer stride using a fixed separable binomial kernel. Applies only on H,W. Works for dims=2 or dims=3 (per-frame). """ def __init__(self, dims: int, stride: int): super().__init__() assert dims in (2, 3) assert stride >= 1 and isinstance(stride, int) self.dims = dims self.stride = stride # 5x5 separable binomial kernel [1,4,6,4,1] (outer product), normalized k = torch.tensor([1.0, 4.0, 6.0, 4.0, 1.0]) k2d = k[:, None] @ k[None, :] k2d = (k2d / k2d.sum()).float() # shape (5,5) self.register_buffer("kernel", k2d[None, None, :, :]) # (1,1,5,5) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.stride == 1: return x def _apply_2d(x2d: torch.Tensor) -> torch.Tensor: # x2d: (B, C, H, W) B, C, H, W = x2d.shape weight = self.kernel.expand(C, 1, 5, 5) # depthwise x2d = F.conv2d( x2d, weight=weight, bias=None, stride=self.stride, padding=2, groups=C ) return x2d if self.dims == 2: return _apply_2d(x) else: # dims == 3: apply per-frame on H,W b, c, f, h, w = x.shape x = rearrange(x, "b c f h w -> (b f) c h w") x = _apply_2d(x) h2, w2 = x.shape[-2:] x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f, h=h2, w=w2) return x class SpatialRationalResampler(nn.Module): """ Fully-learned rational spatial scaling: up by 'num' via PixelShuffle, then anti-aliased downsample by 'den' using fixed blur + stride. Operates on H,W only. For dims==3, work per-frame for spatial scaling (temporal axis untouched). """ def __init__(self, mid_channels: int, scale: float): super().__init__() self.scale = float(scale) self.num, self.den = _rational_for_scale(self.scale) self.conv = nn.Conv2d( mid_channels, (self.num**2) * mid_channels, kernel_size=3, padding=1 ) self.pixel_shuffle = PixelShuffleND(2, upscale_factors=(self.num, self.num)) self.blur_down = BlurDownsample(dims=2, stride=self.den) def forward(self, x: torch.Tensor) -> torch.Tensor: b, c, f, h, w = x.shape x = rearrange(x, "b c f h w -> (b f) c h w") x = self.conv(x) x = self.pixel_shuffle(x) x = self.blur_down(x) x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f) return x class ResBlock(nn.Module): def __init__( self, channels: int, mid_channels: Optional[int] = None, dims: int = 3 ): super().__init__() if mid_channels is None: mid_channels = channels Conv = nn.Conv2d if dims == 2 else nn.Conv3d self.conv1 = Conv(channels, mid_channels, kernel_size=3, padding=1) self.norm1 = nn.GroupNorm(32, mid_channels) self.conv2 = Conv(mid_channels, channels, kernel_size=3, padding=1) self.norm2 = nn.GroupNorm(32, channels) self.activation = nn.SiLU() def forward(self, x: torch.Tensor) -> torch.Tensor: residual = x x = self.conv1(x) x = self.norm1(x) x = self.activation(x) x = self.conv2(x) x = self.norm2(x) x = self.activation(x + residual) return x class LatentUpsampler(nn.Module): """ Model to spatially upsample VAE latents. Args: in_channels (`int`): Number of channels in the input latent mid_channels (`int`): Number of channels in the middle layers num_blocks_per_stage (`int`): Number of ResBlocks to use in each stage (pre/post upsampling) dims (`int`): Number of dimensions for convolutions (2 or 3) spatial_upsample (`bool`): Whether to spatially upsample the latent temporal_upsample (`bool`): Whether to temporally upsample the latent """ def __init__( self, in_channels: int = 128, mid_channels: int = 512, num_blocks_per_stage: int = 4, dims: int = 3, spatial_upsample: bool = True, temporal_upsample: bool = False, spatial_scale: float = 2.0, rational_resampler: bool = False, ): super().__init__() self.in_channels = in_channels self.mid_channels = mid_channels self.num_blocks_per_stage = num_blocks_per_stage self.dims = dims self.spatial_upsample = spatial_upsample self.temporal_upsample = temporal_upsample self.spatial_scale = float(spatial_scale) self.rational_resampler = rational_resampler Conv = nn.Conv2d if dims == 2 else nn.Conv3d self.initial_conv = Conv(in_channels, mid_channels, kernel_size=3, padding=1) self.initial_norm = nn.GroupNorm(32, mid_channels) self.initial_activation = nn.SiLU() self.res_blocks = nn.ModuleList( [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)] ) if spatial_upsample and temporal_upsample: self.upsampler = nn.Sequential( nn.Conv3d(mid_channels, 8 * mid_channels, kernel_size=3, padding=1), PixelShuffleND(3), ) elif spatial_upsample: if rational_resampler: self.upsampler = SpatialRationalResampler( mid_channels=mid_channels, scale=self.spatial_scale ) else: self.upsampler = nn.Sequential( nn.Conv2d(mid_channels, 4 * mid_channels, kernel_size=3, padding=1), PixelShuffleND(2), ) elif temporal_upsample: self.upsampler = nn.Sequential( nn.Conv3d(mid_channels, 2 * mid_channels, kernel_size=3, padding=1), PixelShuffleND(1), ) else: raise ValueError( "Either spatial_upsample or temporal_upsample must be True" ) self.post_upsample_res_blocks = nn.ModuleList( [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)] ) self.final_conv = Conv(mid_channels, in_channels, kernel_size=3, padding=1) def forward(self, latent: torch.Tensor) -> torch.Tensor: b, c, f, h, w = latent.shape if self.dims == 2: x = rearrange(latent, "b c f h w -> (b f) c h w") x = self.initial_conv(x) x = self.initial_norm(x) x = self.initial_activation(x) for block in self.res_blocks: x = block(x) x = self.upsampler(x) for block in self.post_upsample_res_blocks: x = block(x) x = self.final_conv(x) x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f) else: x = self.initial_conv(latent) x = self.initial_norm(x) x = self.initial_activation(x) for block in self.res_blocks: x = block(x) if self.temporal_upsample: x = self.upsampler(x) x = x[:, :, 1:, :, :] else: if isinstance(self.upsampler, SpatialRationalResampler): x = self.upsampler(x) else: x = rearrange(x, "b c f h w -> (b f) c h w") x = self.upsampler(x) x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f) for block in self.post_upsample_res_blocks: x = block(x) x = self.final_conv(x) return x @classmethod def from_config(cls, config): return cls( in_channels=config.get("in_channels", 4), mid_channels=config.get("mid_channels", 128), num_blocks_per_stage=config.get("num_blocks_per_stage", 4), dims=config.get("dims", 2), spatial_upsample=config.get("spatial_upsample", True), temporal_upsample=config.get("temporal_upsample", False), spatial_scale=config.get("spatial_scale", 2.0), rational_resampler=config.get("rational_resampler", False), ) def config(self): return { "_class_name": "LatentUpsampler", "in_channels": self.in_channels, "mid_channels": self.mid_channels, "num_blocks_per_stage": self.num_blocks_per_stage, "dims": self.dims, "spatial_upsample": self.spatial_upsample, "temporal_upsample": self.temporal_upsample, "spatial_scale": self.spatial_scale, "rational_resampler": self.rational_resampler, }
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/lightricks/latent_upsampler.py", "license": "GNU General Public License v3.0", "lines": 246, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/lightricks/vae/audio_vae.py
import json from dataclasses import dataclass import math import torch import torchaudio import comfy.model_management import comfy.model_patcher import comfy.utils as utils from comfy.ldm.mmaudio.vae.distributions import DiagonalGaussianDistribution from comfy.ldm.lightricks.symmetric_patchifier import AudioPatchifier from comfy.ldm.lightricks.vae.causal_audio_autoencoder import ( CausalityAxis, CausalAudioAutoencoder, ) from comfy.ldm.lightricks.vocoders.vocoder import Vocoder LATENT_DOWNSAMPLE_FACTOR = 4 @dataclass(frozen=True) class AudioVAEComponentConfig: """Container for model component configuration extracted from metadata.""" autoencoder: dict vocoder: dict @classmethod def from_metadata(cls, metadata: dict) -> "AudioVAEComponentConfig": assert metadata is not None and "config" in metadata, "Metadata is required for audio VAE" raw_config = metadata["config"] if isinstance(raw_config, str): parsed_config = json.loads(raw_config) else: parsed_config = raw_config audio_config = parsed_config.get("audio_vae") vocoder_config = parsed_config.get("vocoder") assert audio_config is not None, "Audio VAE config is required for audio VAE" assert vocoder_config is not None, "Vocoder config is required for audio VAE" return cls(autoencoder=audio_config, vocoder=vocoder_config) class ModelDeviceManager: """Manages device placement and GPU residency for the composed model.""" def __init__(self, module: torch.nn.Module): load_device = comfy.model_management.get_torch_device() offload_device = comfy.model_management.vae_offload_device() self.patcher = comfy.model_patcher.ModelPatcher(module, load_device, offload_device) def ensure_model_loaded(self) -> None: comfy.model_management.free_memory( self.patcher.model_size(), self.patcher.load_device, ) comfy.model_management.load_model_gpu(self.patcher) def move_to_load_device(self, tensor: torch.Tensor) -> torch.Tensor: return tensor.to(self.patcher.load_device) @property def load_device(self): return self.patcher.load_device class AudioLatentNormalizer: """Applies per-channel statistics in patch space and restores original layout.""" def __init__(self, patchfier: AudioPatchifier, statistics_processor: torch.nn.Module): self.patchifier = patchfier self.statistics = statistics_processor def normalize(self, latents: torch.Tensor) -> torch.Tensor: channels = latents.shape[1] freq = latents.shape[3] patched, _ = self.patchifier.patchify(latents) normalized = self.statistics.normalize(patched) return self.patchifier.unpatchify(normalized, channels=channels, freq=freq) def denormalize(self, latents: torch.Tensor) -> torch.Tensor: channels = latents.shape[1] freq = latents.shape[3] patched, _ = self.patchifier.patchify(latents) denormalized = self.statistics.un_normalize(patched) return self.patchifier.unpatchify(denormalized, channels=channels, freq=freq) class AudioPreprocessor: """Prepares raw waveforms for the autoencoder by matching training conditions.""" def __init__(self, target_sample_rate: int, mel_bins: int, mel_hop_length: int, n_fft: int): self.target_sample_rate = target_sample_rate self.mel_bins = mel_bins self.mel_hop_length = mel_hop_length self.n_fft = n_fft def resample(self, waveform: torch.Tensor, source_rate: int) -> torch.Tensor: if source_rate == self.target_sample_rate: return waveform return torchaudio.functional.resample(waveform, source_rate, self.target_sample_rate) def waveform_to_mel( self, waveform: torch.Tensor, waveform_sample_rate: int, device ) -> torch.Tensor: waveform = self.resample(waveform, waveform_sample_rate) mel_transform = torchaudio.transforms.MelSpectrogram( sample_rate=self.target_sample_rate, n_fft=self.n_fft, win_length=self.n_fft, hop_length=self.mel_hop_length, f_min=0.0, f_max=self.target_sample_rate / 2.0, n_mels=self.mel_bins, window_fn=torch.hann_window, center=True, pad_mode="reflect", power=1.0, mel_scale="slaney", norm="slaney", ).to(device) mel = mel_transform(waveform) mel = torch.log(torch.clamp(mel, min=1e-5)) return mel.permute(0, 1, 3, 2).contiguous() class AudioVAE(torch.nn.Module): """High-level Audio VAE wrapper exposing encode and decode entry points.""" def __init__(self, state_dict: dict, metadata: dict): super().__init__() component_config = AudioVAEComponentConfig.from_metadata(metadata) vae_sd = utils.state_dict_prefix_replace(state_dict, {"audio_vae.": ""}, filter_keys=True) vocoder_sd = utils.state_dict_prefix_replace(state_dict, {"vocoder.": ""}, filter_keys=True) self.autoencoder = CausalAudioAutoencoder(config=component_config.autoencoder) self.vocoder = Vocoder(config=component_config.vocoder) self.autoencoder.load_state_dict(vae_sd, strict=False) self.vocoder.load_state_dict(vocoder_sd, strict=False) autoencoder_config = self.autoencoder.get_config() self.normalizer = AudioLatentNormalizer( AudioPatchifier( patch_size=1, audio_latent_downsample_factor=LATENT_DOWNSAMPLE_FACTOR, sample_rate=autoencoder_config["sampling_rate"], hop_length=autoencoder_config["mel_hop_length"], is_causal=autoencoder_config["is_causal"], ), self.autoencoder.per_channel_statistics, ) self.preprocessor = AudioPreprocessor( target_sample_rate=autoencoder_config["sampling_rate"], mel_bins=autoencoder_config["mel_bins"], mel_hop_length=autoencoder_config["mel_hop_length"], n_fft=autoencoder_config["n_fft"], ) self.device_manager = ModelDeviceManager(self) def encode(self, audio: dict) -> torch.Tensor: """Encode a waveform dictionary into normalized latent tensors.""" waveform = audio["waveform"] waveform_sample_rate = audio["sample_rate"] input_device = waveform.device # Ensure that Audio VAE is loaded on the correct device. self.device_manager.ensure_model_loaded() waveform = self.device_manager.move_to_load_device(waveform) expected_channels = self.autoencoder.encoder.in_channels if waveform.shape[1] != expected_channels: if waveform.shape[1] == 1: waveform = waveform.expand(-1, expected_channels, *waveform.shape[2:]) else: raise ValueError( f"Input audio must have {expected_channels} channels, got {waveform.shape[1]}" ) mel_spec = self.preprocessor.waveform_to_mel( waveform, waveform_sample_rate, device=self.device_manager.load_device ) latents = self.autoencoder.encode(mel_spec) posterior = DiagonalGaussianDistribution(latents) latent_mode = posterior.mode() normalized = self.normalizer.normalize(latent_mode) return normalized.to(input_device) def decode(self, latents: torch.Tensor) -> torch.Tensor: """Decode normalized latent tensors into an audio waveform.""" original_shape = latents.shape # Ensure that Audio VAE is loaded on the correct device. self.device_manager.ensure_model_loaded() latents = self.device_manager.move_to_load_device(latents) latents = self.normalizer.denormalize(latents) target_shape = self.target_shape_from_latents(original_shape) mel_spec = self.autoencoder.decode(latents, target_shape=target_shape) waveform = self.run_vocoder(mel_spec) return self.device_manager.move_to_load_device(waveform) def target_shape_from_latents(self, latents_shape): batch, _, time, _ = latents_shape target_length = time * LATENT_DOWNSAMPLE_FACTOR if self.autoencoder.causality_axis != CausalityAxis.NONE: target_length -= LATENT_DOWNSAMPLE_FACTOR - 1 return ( batch, self.autoencoder.decoder.out_ch, target_length, self.autoencoder.mel_bins, ) def num_of_latents_from_frames(self, frames_number: int, frame_rate: int) -> int: return math.ceil((float(frames_number) / frame_rate) * self.latents_per_second) def run_vocoder(self, mel_spec: torch.Tensor) -> torch.Tensor: audio_channels = self.autoencoder.decoder.out_ch vocoder_input = mel_spec.transpose(2, 3) if audio_channels == 1: vocoder_input = vocoder_input.squeeze(1) elif audio_channels != 2: raise ValueError(f"Unsupported audio_channels: {audio_channels}") return self.vocoder(vocoder_input) @property def sample_rate(self) -> int: return int(self.autoencoder.sampling_rate) @property def mel_hop_length(self) -> int: return int(self.autoencoder.mel_hop_length) @property def mel_bins(self) -> int: return int(self.autoencoder.mel_bins) @property def latent_channels(self) -> int: return int(self.autoencoder.decoder.z_channels) @property def latent_frequency_bins(self) -> int: return int(self.mel_bins // LATENT_DOWNSAMPLE_FACTOR) @property def latents_per_second(self) -> float: return self.sample_rate / self.mel_hop_length / LATENT_DOWNSAMPLE_FACTOR @property def output_sample_rate(self) -> int: output_rate = getattr(self.vocoder, "output_sample_rate", None) if output_rate is not None: return int(output_rate) upsample_factor = getattr(self.vocoder, "upsample_factor", None) if upsample_factor is None: raise AttributeError( "Vocoder is missing upsample_factor; cannot infer output sample rate" ) return int(self.sample_rate * upsample_factor / self.mel_hop_length) def memory_required(self, input_shape): return self.device_manager.patcher.model_size()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/lightricks/vae/audio_vae.py", "license": "GNU General Public License v3.0", "lines": 217, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/lightricks/vae/causal_audio_autoencoder.py
from __future__ import annotations import torch from torch import nn from torch.nn import functional as F from typing import Optional from enum import Enum from .pixel_norm import PixelNorm import comfy.ops import logging ops = comfy.ops.disable_weight_init class StringConvertibleEnum(Enum): """ Base enum class that provides string-to-enum conversion functionality. This mixin adds a str_to_enum() class method that handles conversion from strings, None, or existing enum instances with case-insensitive matching. """ @classmethod def str_to_enum(cls, value): """ Convert a string, enum instance, or None to the appropriate enum member. Args: value: Can be an enum instance of this class, a string, or None Returns: Enum member of this class Raises: ValueError: If the value cannot be converted to a valid enum member """ # Already an enum instance of this class if isinstance(value, cls): return value # None maps to NONE member if it exists if value is None: if hasattr(cls, "NONE"): return cls.NONE raise ValueError(f"{cls.__name__} does not have a NONE member to map None to") # String conversion (case-insensitive) if isinstance(value, str): value_lower = value.lower() # Try to match against enum values for member in cls: # Handle members with None values if member.value is None: if value_lower == "none": return member # Handle members with string values elif isinstance(member.value, str) and member.value.lower() == value_lower: return member # Build helpful error message with valid values valid_values = [] for member in cls: if member.value is None: valid_values.append("none") elif isinstance(member.value, str): valid_values.append(member.value) raise ValueError(f"Invalid {cls.__name__} string: '{value}'. " f"Valid values are: {valid_values}") raise ValueError( f"Cannot convert type {type(value).__name__} to {cls.__name__} enum. " f"Expected string, None, or {cls.__name__} instance." ) class AttentionType(StringConvertibleEnum): """Enum for specifying the attention mechanism type.""" VANILLA = "vanilla" LINEAR = "linear" NONE = "none" class CausalityAxis(StringConvertibleEnum): """Enum for specifying the causality axis in causal convolutions.""" NONE = None WIDTH = "width" HEIGHT = "height" WIDTH_COMPATIBILITY = "width-compatibility" def Normalize(in_channels, *, num_groups=32, normtype="group"): if normtype == "group": return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) elif normtype == "pixel": return PixelNorm(dim=1, eps=1e-6) else: raise ValueError(f"Invalid normalization type: {normtype}") class CausalConv2d(nn.Module): """ A causal 2D convolution. This layer ensures that the output at time `t` only depends on inputs at time `t` and earlier. It achieves this by applying asymmetric padding to the time dimension (width) before the convolution. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, causality_axis: CausalityAxis = CausalityAxis.HEIGHT, ): super().__init__() self.causality_axis = causality_axis # Ensure kernel_size and dilation are tuples kernel_size = nn.modules.utils._pair(kernel_size) dilation = nn.modules.utils._pair(dilation) # Calculate padding dimensions pad_h = (kernel_size[0] - 1) * dilation[0] pad_w = (kernel_size[1] - 1) * dilation[1] # The padding tuple for F.pad is (pad_left, pad_right, pad_top, pad_bottom) match self.causality_axis: case CausalityAxis.NONE: self.padding = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2) case CausalityAxis.WIDTH | CausalityAxis.WIDTH_COMPATIBILITY: self.padding = (pad_w, 0, pad_h // 2, pad_h - pad_h // 2) case CausalityAxis.HEIGHT: self.padding = (pad_w // 2, pad_w - pad_w // 2, pad_h, 0) case _: raise ValueError(f"Invalid causality_axis: {causality_axis}") # The internal convolution layer uses no padding, as we handle it manually self.conv = ops.Conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias, ) def forward(self, x): # Apply causal padding before convolution x = F.pad(x, self.padding) return self.conv(x) def make_conv2d( in_channels, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=True, causality_axis: Optional[CausalityAxis] = None, ): """ Create a 2D convolution layer that can be either causal or non-causal. Args: in_channels: Number of input channels out_channels: Number of output channels kernel_size: Size of the convolution kernel stride: Convolution stride padding: Padding (if None, will be calculated based on causal flag) dilation: Dilation rate groups: Number of groups for grouped convolution bias: Whether to use bias causality_axis: Dimension along which to apply causality. Returns: Either a regular Conv2d or CausalConv2d layer """ if causality_axis is not None: # For causal convolution, padding is handled internally by CausalConv2d return CausalConv2d(in_channels, out_channels, kernel_size, stride, dilation, groups, bias, causality_axis) else: # For non-causal convolution, use symmetric padding if not specified if padding is None: if isinstance(kernel_size, int): padding = kernel_size // 2 else: padding = tuple(k // 2 for k in kernel_size) return ops.Conv2d( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, ) class Upsample(nn.Module): def __init__(self, in_channels, with_conv, causality_axis: CausalityAxis = CausalityAxis.HEIGHT): super().__init__() self.with_conv = with_conv self.causality_axis = causality_axis if self.with_conv: self.conv = make_conv2d(in_channels, in_channels, kernel_size=3, stride=1, causality_axis=causality_axis) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) # Drop FIRST element in the causal axis to undo encoder's padding, while keeping the length 1 + 2 * n. # For example, if the input is [0, 1, 2], after interpolation, the output is [0, 0, 1, 1, 2, 2]. # The causal convolution will pad the first element as [-, -, 0, 0, 1, 1, 2, 2], # So the output elements rely on the following windows: # 0: [-,-,0] # 1: [-,0,0] # 2: [0,0,1] # 3: [0,1,1] # 4: [1,1,2] # 5: [1,2,2] # Notice that the first and second elements in the output rely only on the first element in the input, # while all other elements rely on two elements in the input. # So we can drop the first element to undo the padding (rather than the last element). # This is a no-op for non-causal convolutions. match self.causality_axis: case CausalityAxis.NONE: pass # x remains unchanged case CausalityAxis.HEIGHT: x = x[:, :, 1:, :] case CausalityAxis.WIDTH: x = x[:, :, :, 1:] case CausalityAxis.WIDTH_COMPATIBILITY: pass # x remains unchanged case _: raise ValueError(f"Invalid causality_axis: {self.causality_axis}") return x class Downsample(nn.Module): """ A downsampling layer that can use either a strided convolution or average pooling. Supports standard and causal padding for the convolutional mode. """ def __init__(self, in_channels, with_conv, causality_axis: CausalityAxis = CausalityAxis.WIDTH): super().__init__() self.with_conv = with_conv self.causality_axis = causality_axis if self.causality_axis != CausalityAxis.NONE and not self.with_conv: raise ValueError("causality is only supported when `with_conv=True`.") if self.with_conv: # Do time downsampling here # no asymmetric padding in torch conv, must do it ourselves self.conv = ops.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: # (pad_left, pad_right, pad_top, pad_bottom) match self.causality_axis: case CausalityAxis.NONE: pad = (0, 1, 0, 1) case CausalityAxis.WIDTH: pad = (2, 0, 0, 1) case CausalityAxis.HEIGHT: pad = (0, 1, 2, 0) case CausalityAxis.WIDTH_COMPATIBILITY: pad = (1, 0, 0, 1) case _: raise ValueError(f"Invalid causality_axis: {self.causality_axis}") x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: # This branch is only taken if with_conv=False, which implies causality_axis is NONE. x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__( self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512, norm_type="group", causality_axis: CausalityAxis = CausalityAxis.HEIGHT, ): super().__init__() self.causality_axis = causality_axis if self.causality_axis != CausalityAxis.NONE and norm_type == "group": raise ValueError("Causal ResnetBlock with GroupNorm is not supported.") self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels, normtype=norm_type) self.non_linearity = nn.SiLU() self.conv1 = make_conv2d(in_channels, out_channels, kernel_size=3, stride=1, causality_axis=causality_axis) if temb_channels > 0: self.temb_proj = ops.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels, normtype=norm_type) self.dropout = torch.nn.Dropout(dropout) self.conv2 = make_conv2d(out_channels, out_channels, kernel_size=3, stride=1, causality_axis=causality_axis) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = make_conv2d( in_channels, out_channels, kernel_size=3, stride=1, causality_axis=causality_axis ) else: self.nin_shortcut = make_conv2d( in_channels, out_channels, kernel_size=1, stride=1, causality_axis=causality_axis ) def forward(self, x, temb): h = x h = self.norm1(h) h = self.non_linearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(self.non_linearity(temb))[:, :, None, None] h = self.norm2(h) h = self.non_linearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x + h class AttnBlock(nn.Module): def __init__(self, in_channels, norm_type="group"): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels, normtype=norm_type) self.q = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) # compute attention b, c, h, w = q.shape q = q.reshape(b, c, h * w).contiguous() q = q.permute(0, 2, 1).contiguous() # b,hw,c k = k.reshape(b, c, h * w).contiguous() # b,c,hw w_ = torch.bmm(q, k).contiguous() # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] w_ = w_ * (int(c) ** (-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values v = v.reshape(b, c, h * w).contiguous() w_ = w_.permute(0, 2, 1).contiguous() # b,hw,hw (first hw of k, second of q) h_ = torch.bmm(v, w_).contiguous() # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] h_ = h_.reshape(b, c, h, w).contiguous() h_ = self.proj_out(h_) return x + h_ def make_attn(in_channels, attn_type="vanilla", norm_type="group"): # Convert string to enum if needed attn_type = AttentionType.str_to_enum(attn_type) if attn_type != AttentionType.NONE: logging.info(f"making attention of type '{attn_type.value}' with {in_channels} in_channels") else: logging.info(f"making identity attention with {in_channels} in_channels") match attn_type: case AttentionType.VANILLA: return AttnBlock(in_channels, norm_type=norm_type) case AttentionType.NONE: return nn.Identity(in_channels) case AttentionType.LINEAR: raise NotImplementedError(f"Attention type {attn_type.value} is not supported yet.") case _: raise ValueError(f"Unknown attention type: {attn_type}") class Encoder(nn.Module): def __init__( self, *, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, double_z=True, attn_type="vanilla", mid_block_add_attention=True, norm_type="group", causality_axis=CausalityAxis.WIDTH.value, **ignore_kwargs, ): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.z_channels = z_channels self.double_z = double_z self.norm_type = norm_type # Convert string to enum if needed (for config loading) causality_axis = CausalityAxis.str_to_enum(causality_axis) self.attn_type = AttentionType.str_to_enum(attn_type) # downsampling self.conv_in = make_conv2d( in_channels, self.ch, kernel_size=3, stride=1, causality_axis=causality_axis, ) self.non_linearity = nn.SiLU() curr_res = resolution in_ch_mult = (1,) + tuple(ch_mult) self.in_ch_mult = in_ch_mult self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch * in_ch_mult[i_level] block_out = ch * ch_mult[i_level] for _ in range(self.num_res_blocks): block.append( ResnetBlock( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout, norm_type=self.norm_type, causality_axis=causality_axis, ) ) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=self.attn_type, norm_type=self.norm_type)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = Downsample(block_in, resamp_with_conv, causality_axis=causality_axis) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout, norm_type=self.norm_type, causality_axis=causality_axis, ) if mid_block_add_attention: self.mid.attn_1 = make_attn(block_in, attn_type=self.attn_type, norm_type=self.norm_type) else: self.mid.attn_1 = nn.Identity() self.mid.block_2 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout, norm_type=self.norm_type, causality_axis=causality_axis, ) # end self.norm_out = Normalize(block_in, normtype=self.norm_type) self.conv_out = make_conv2d( block_in, 2 * z_channels if double_z else z_channels, kernel_size=3, stride=1, causality_axis=causality_axis, ) def forward(self, x): """ Forward pass through the encoder. Args: x: Input tensor of shape [batch, channels, time, n_mels] Returns: Encoded latent representation """ feature_maps = [self.conv_in(x)] # Process each resolution level (from high to low resolution) for resolution_level in range(self.num_resolutions): # Apply residual blocks at current resolution level for block_idx in range(self.num_res_blocks): # Apply ResNet block with optional timestep embedding current_features = self.down[resolution_level].block[block_idx](feature_maps[-1], temb=None) # Apply attention if configured for this resolution level if len(self.down[resolution_level].attn) > 0: current_features = self.down[resolution_level].attn[block_idx](current_features) # Store processed features feature_maps.append(current_features) # Downsample spatial dimensions (except at the final resolution level) if resolution_level != self.num_resolutions - 1: downsampled_features = self.down[resolution_level].downsample(feature_maps[-1]) feature_maps.append(downsampled_features) # === MIDDLE PROCESSING PHASE === # Take the lowest resolution features for middle processing bottleneck_features = feature_maps[-1] # Apply first middle ResNet block bottleneck_features = self.mid.block_1(bottleneck_features, temb=None) # Apply middle attention block bottleneck_features = self.mid.attn_1(bottleneck_features) # Apply second middle ResNet block bottleneck_features = self.mid.block_2(bottleneck_features, temb=None) # === OUTPUT PHASE === # Normalize the bottleneck features output_features = self.norm_out(bottleneck_features) # Apply non-linearity (SiLU activation) output_features = self.non_linearity(output_features) # Final convolution to produce latent representation # [batch, channels, time, n_mels] -> [batch, 2 * z_channels if double_z else z_channels, time, n_mels] return self.conv_out(output_features) class Decoder(nn.Module): def __init__( self, *, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, tanh_out=False, attn_type="vanilla", mid_block_add_attention=True, norm_type="group", causality_axis=CausalityAxis.WIDTH.value, **ignorekwargs, ): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.out_ch = out_ch self.give_pre_end = give_pre_end self.tanh_out = tanh_out self.norm_type = norm_type self.z_channels = z_channels # Convert string to enum if needed (for config loading) causality_axis = CausalityAxis.str_to_enum(causality_axis) self.attn_type = AttentionType.str_to_enum(attn_type) # compute block_in and curr_res at lowest res block_in = ch * ch_mult[self.num_resolutions - 1] curr_res = resolution // 2 ** (self.num_resolutions - 1) self.z_shape = (1, z_channels, curr_res, curr_res) # z to block_in self.conv_in = make_conv2d(z_channels, block_in, kernel_size=3, stride=1, causality_axis=causality_axis) self.non_linearity = nn.SiLU() # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout, norm_type=self.norm_type, causality_axis=causality_axis, ) if mid_block_add_attention: self.mid.attn_1 = make_attn(block_in, attn_type=self.attn_type, norm_type=self.norm_type) else: self.mid.attn_1 = nn.Identity() self.mid.block_2 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout, norm_type=self.norm_type, causality_axis=causality_axis, ) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch * ch_mult[i_level] for _ in range(self.num_res_blocks + 1): block.append( ResnetBlock( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout, norm_type=self.norm_type, causality_axis=causality_axis, ) ) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=self.attn_type, norm_type=self.norm_type)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv, causality_axis=causality_axis) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in, normtype=self.norm_type) self.conv_out = make_conv2d(block_in, out_ch, kernel_size=3, stride=1, causality_axis=causality_axis) def _adjust_output_shape(self, decoded_output, target_shape): """ Adjust output shape to match target dimensions for variable-length audio. This function handles the common case where decoded audio spectrograms need to be resized to match a specific target shape. Args: decoded_output: Tensor of shape (batch, channels, time, frequency) target_shape: Target shape tuple (batch, channels, time, frequency) Returns: Tensor adjusted to match target_shape exactly """ # Current output shape: (batch, channels, time, frequency) _, _, current_time, current_freq = decoded_output.shape _, target_channels, target_time, target_freq = target_shape # Step 1: Crop first to avoid exceeding target dimensions decoded_output = decoded_output[ :, :target_channels, : min(current_time, target_time), : min(current_freq, target_freq) ] # Step 2: Calculate padding needed for time and frequency dimensions time_padding_needed = target_time - decoded_output.shape[2] freq_padding_needed = target_freq - decoded_output.shape[3] # Step 3: Apply padding if needed if time_padding_needed > 0 or freq_padding_needed > 0: # PyTorch padding format: (pad_left, pad_right, pad_top, pad_bottom) # For audio: pad_left/right = frequency, pad_top/bottom = time padding = ( 0, max(freq_padding_needed, 0), # frequency padding (left, right) 0, max(time_padding_needed, 0), # time padding (top, bottom) ) decoded_output = F.pad(decoded_output, padding) # Step 4: Final safety crop to ensure exact target shape decoded_output = decoded_output[:, :target_channels, :target_time, :target_freq] return decoded_output def get_config(self): return { "ch": self.ch, "out_ch": self.out_ch, "ch_mult": self.ch_mult, "num_res_blocks": self.num_res_blocks, "in_channels": self.in_channels, "resolution": self.resolution, "z_channels": self.z_channels, } def forward(self, latent_features, target_shape=None): """ Decode latent features back to audio spectrograms. Args: latent_features: Encoded latent representation of shape (batch, channels, height, width) target_shape: Optional target output shape (batch, channels, time, frequency) If provided, output will be cropped/padded to match this shape Returns: Reconstructed audio spectrogram of shape (batch, channels, time, frequency) """ assert target_shape is not None, "Target shape is required for CausalAudioAutoencoder Decoder" # Transform latent features to decoder's internal feature dimension hidden_features = self.conv_in(latent_features) # Middle processing hidden_features = self.mid.block_1(hidden_features, temb=None) hidden_features = self.mid.attn_1(hidden_features) hidden_features = self.mid.block_2(hidden_features, temb=None) # Upsampling # Progressively increase spatial resolution from lowest to highest for resolution_level in reversed(range(self.num_resolutions)): # Apply residual blocks at current resolution level for block_index in range(self.num_res_blocks + 1): hidden_features = self.up[resolution_level].block[block_index](hidden_features, temb=None) if len(self.up[resolution_level].attn) > 0: hidden_features = self.up[resolution_level].attn[block_index](hidden_features) if resolution_level != 0: hidden_features = self.up[resolution_level].upsample(hidden_features) # Output if self.give_pre_end: # Return intermediate features before final processing (for debugging/analysis) decoded_output = hidden_features else: # Standard output path: normalize, activate, and convert to output channels # Final normalization layer hidden_features = self.norm_out(hidden_features) # Apply SiLU (Swish) activation function hidden_features = self.non_linearity(hidden_features) # Final convolution to map to output channels (typically 2 for stereo audio) decoded_output = self.conv_out(hidden_features) # Optional tanh activation to bound output values to [-1, 1] range if self.tanh_out: decoded_output = torch.tanh(decoded_output) # Adjust shape for audio data if target_shape is not None: decoded_output = self._adjust_output_shape(decoded_output, target_shape) return decoded_output class processor(nn.Module): def __init__(self): super().__init__() self.register_buffer("std-of-means", torch.empty(128)) self.register_buffer("mean-of-means", torch.empty(128)) def un_normalize(self, x): return (x * self.get_buffer("std-of-means").to(x)) + self.get_buffer("mean-of-means").to(x) def normalize(self, x): return (x - self.get_buffer("mean-of-means").to(x)) / self.get_buffer("std-of-means").to(x) class CausalAudioAutoencoder(nn.Module): def __init__(self, config=None): super().__init__() if config is None: config = self._guess_config() # Extract encoder and decoder configs from the new format model_config = config.get("model", {}).get("params", {}) variables_config = config.get("variables", {}) self.sampling_rate = variables_config.get( "sampling_rate", model_config.get("sampling_rate", config.get("sampling_rate", 16000)), ) encoder_config = model_config.get("encoder", model_config.get("ddconfig", {})) decoder_config = model_config.get("decoder", encoder_config) # Load mel spectrogram parameters self.mel_bins = encoder_config.get("mel_bins", 64) self.mel_hop_length = model_config.get("preprocessing", {}).get("stft", {}).get("hop_length", 160) self.n_fft = model_config.get("preprocessing", {}).get("stft", {}).get("filter_length", 1024) # Store causality configuration at VAE level (not just in encoder internals) causality_axis_value = encoder_config.get("causality_axis", CausalityAxis.WIDTH.value) self.causality_axis = CausalityAxis.str_to_enum(causality_axis_value) self.is_causal = self.causality_axis == CausalityAxis.HEIGHT self.encoder = Encoder(**encoder_config) self.decoder = Decoder(**decoder_config) self.per_channel_statistics = processor() def _guess_config(self): encoder_config = { # Required parameters - based on ltx-video-av-1679000 model metadata "ch": 128, "out_ch": 8, "ch_mult": [1, 2, 4], # Based on metadata: [1, 2, 4] not [1, 2, 4, 8] "num_res_blocks": 2, "attn_resolutions": [], # Based on metadata: empty list, no attention "dropout": 0.0, "resamp_with_conv": True, "in_channels": 2, # stereo "resolution": 256, "z_channels": 8, "double_z": True, "attn_type": "vanilla", "mid_block_add_attention": False, # Based on metadata: false "norm_type": "pixel", "causality_axis": "height", # Based on metadata "mel_bins": 64, # Based on metadata: mel_bins = 64 } decoder_config = { # Inherits encoder config, can override specific params **encoder_config, "out_ch": 2, # Stereo audio output (2 channels) "give_pre_end": False, "tanh_out": False, } config = { "_class_name": "CausalAudioAutoencoder", "sampling_rate": 16000, "model": { "params": { "encoder": encoder_config, "decoder": decoder_config, } }, } return config def get_config(self): return { "sampling_rate": self.sampling_rate, "mel_bins": self.mel_bins, "mel_hop_length": self.mel_hop_length, "n_fft": self.n_fft, "causality_axis": self.causality_axis.value, "is_causal": self.is_causal, } def encode(self, x): return self.encoder(x) def decode(self, x, target_shape=None): return self.decoder(x, target_shape=target_shape)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/lightricks/vae/causal_audio_autoencoder.py", "license": "GNU General Public License v3.0", "lines": 764, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_lt_audio.py
import folder_paths import comfy.utils import comfy.model_management import torch from comfy.ldm.lightricks.vae.audio_vae import AudioVAE from comfy_api.latest import ComfyExtension, io class LTXVAudioVAELoader(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="LTXVAudioVAELoader", display_name="LTXV Audio VAE Loader", category="audio", inputs=[ io.Combo.Input( "ckpt_name", options=folder_paths.get_filename_list("checkpoints"), tooltip="Audio VAE checkpoint to load.", ) ], outputs=[io.Vae.Output(display_name="Audio VAE")], ) @classmethod def execute(cls, ckpt_name: str) -> io.NodeOutput: ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True) return io.NodeOutput(AudioVAE(sd, metadata)) class LTXVAudioVAEEncode(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="LTXVAudioVAEEncode", display_name="LTXV Audio VAE Encode", category="audio", inputs=[ io.Audio.Input("audio", tooltip="The audio to be encoded."), io.Vae.Input( id="audio_vae", display_name="Audio VAE", tooltip="The Audio VAE model to use for encoding.", ), ], outputs=[io.Latent.Output(display_name="Audio Latent")], ) @classmethod def execute(cls, audio, audio_vae: AudioVAE) -> io.NodeOutput: audio_latents = audio_vae.encode(audio) return io.NodeOutput( { "samples": audio_latents, "sample_rate": int(audio_vae.sample_rate), "type": "audio", } ) class LTXVAudioVAEDecode(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="LTXVAudioVAEDecode", display_name="LTXV Audio VAE Decode", category="audio", inputs=[ io.Latent.Input("samples", tooltip="The latent to be decoded."), io.Vae.Input( id="audio_vae", display_name="Audio VAE", tooltip="The Audio VAE model used for decoding the latent.", ), ], outputs=[io.Audio.Output(display_name="Audio")], ) @classmethod def execute(cls, samples, audio_vae: AudioVAE) -> io.NodeOutput: audio_latent = samples["samples"] if audio_latent.is_nested: audio_latent = audio_latent.unbind()[-1] audio = audio_vae.decode(audio_latent).to(audio_latent.device) output_audio_sample_rate = audio_vae.output_sample_rate return io.NodeOutput( { "waveform": audio, "sample_rate": int(output_audio_sample_rate), } ) class LTXVEmptyLatentAudio(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="LTXVEmptyLatentAudio", display_name="LTXV Empty Latent Audio", category="latent/audio", inputs=[ io.Int.Input( "frames_number", default=97, min=1, max=1000, step=1, display_mode=io.NumberDisplay.number, tooltip="Number of frames.", ), io.Int.Input( "frame_rate", default=25, min=1, max=1000, step=1, display_mode=io.NumberDisplay.number, tooltip="Number of frames per second.", ), io.Int.Input( "batch_size", default=1, min=1, max=4096, display_mode=io.NumberDisplay.number, tooltip="The number of latent audio samples in the batch.", ), io.Vae.Input( id="audio_vae", display_name="Audio VAE", tooltip="The Audio VAE model to get configuration from.", ), ], outputs=[io.Latent.Output(display_name="Latent")], ) @classmethod def execute( cls, frames_number: int, frame_rate: int, batch_size: int, audio_vae: AudioVAE, ) -> io.NodeOutput: """Generate empty audio latents matching the reference pipeline structure.""" assert audio_vae is not None, "Audio VAE model is required" z_channels = audio_vae.latent_channels audio_freq = audio_vae.latent_frequency_bins sampling_rate = int(audio_vae.sample_rate) num_audio_latents = audio_vae.num_of_latents_from_frames(frames_number, frame_rate) audio_latents = torch.zeros( (batch_size, z_channels, num_audio_latents, audio_freq), device=comfy.model_management.intermediate_device(), ) return io.NodeOutput( { "samples": audio_latents, "sample_rate": sampling_rate, "type": "audio", } ) class LTXAVTextEncoderLoader(io.ComfyNode): @classmethod def define_schema(cls) -> io.Schema: return io.Schema( node_id="LTXAVTextEncoderLoader", display_name="LTXV Audio Text Encoder Loader", category="advanced/loaders", description="[Recipes]\n\nltxav: gemma 3 12B", inputs=[ io.Combo.Input( "text_encoder", options=folder_paths.get_filename_list("text_encoders"), ), io.Combo.Input( "ckpt_name", options=folder_paths.get_filename_list("checkpoints"), ), io.Combo.Input( "device", options=["default", "cpu"], advanced=True, ) ], outputs=[io.Clip.Output()], ) @classmethod def execute(cls, text_encoder, ckpt_name, device="default"): clip_type = comfy.sd.CLIPType.LTXV clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", text_encoder) clip_path2 = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) model_options = {} if device == "cpu": model_options["load_device"] = model_options["offload_device"] = torch.device("cpu") clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options) return io.NodeOutput(clip) class LTXVAudioExtension(ComfyExtension): async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ LTXVAudioVAELoader, LTXVAudioVAEEncode, LTXVAudioVAEDecode, LTXVEmptyLatentAudio, LTXAVTextEncoderLoader, ] async def comfy_entrypoint() -> ComfyExtension: return LTXVAudioExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_lt_audio.py", "license": "GNU General Public License v3.0", "lines": 197, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_extras/nodes_lt_upsampler.py
from comfy import model_management import math class LTXVLatentUpsampler: """ Upsamples a video latent by a factor of 2. """ @classmethod def INPUT_TYPES(s): return { "required": { "samples": ("LATENT",), "upscale_model": ("LATENT_UPSCALE_MODEL",), "vae": ("VAE",), } } RETURN_TYPES = ("LATENT",) FUNCTION = "upsample_latent" CATEGORY = "latent/video" EXPERIMENTAL = True def upsample_latent( self, samples: dict, upscale_model, vae, ) -> tuple: """ Upsample the input latent using the provided model. Args: samples (dict): Input latent samples upscale_model (LatentUpsampler): Loaded upscale model vae: VAE model for normalization auto_tiling (bool): Whether to automatically tile the input for processing Returns: tuple: Tuple containing the upsampled latent """ device = model_management.get_torch_device() memory_required = model_management.module_size(upscale_model) model_dtype = next(upscale_model.parameters()).dtype latents = samples["samples"] input_dtype = latents.dtype memory_required += math.prod(latents.shape) * 3000.0 # TODO: more accurate model_management.free_memory(memory_required, device) try: upscale_model.to(device) # TODO: use the comfy model management system. latents = latents.to(dtype=model_dtype, device=device) """Upsample latents without tiling.""" latents = vae.first_stage_model.per_channel_statistics.un_normalize(latents) upsampled_latents = upscale_model(latents) finally: upscale_model.cpu() upsampled_latents = vae.first_stage_model.per_channel_statistics.normalize( upsampled_latents ) upsampled_latents = upsampled_latents.to(dtype=input_dtype, device=model_management.intermediate_device()) return_dict = samples.copy() return_dict["samples"] = upsampled_latents return_dict.pop("noise_mask", None) return (return_dict,) NODE_CLASS_MAPPINGS = { "LTXVLatentUpsampler": LTXVLatentUpsampler, }
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_lt_upsampler.py", "license": "GNU General Public License v3.0", "lines": 61, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api/latest/_util/image_types.py
from io import BytesIO class SVG: """Stores SVG representations via a list of BytesIO objects.""" def __init__(self, data: list[BytesIO]): self.data = data def combine(self, other: 'SVG') -> 'SVG': return SVG(self.data + other.data) @staticmethod def combine_all(svgs: list['SVG']) -> 'SVG': all_svgs_list: list[BytesIO] = [] for svg_item in svgs: all_svgs_list.extend(svg_item.data) return SVG(all_svgs_list)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api/latest/_util/image_types.py", "license": "GNU General Public License v3.0", "lines": 13, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/text_encoders/jina_clip_2.py
# Jina CLIP v2 and Jina Embeddings v3 both use their modified XLM-RoBERTa architecture. Reference implementation: # Jina CLIP v2 (both text and vision): https://huggingface.co/jinaai/jina-clip-implementation/blob/39e6a55ae971b59bea6e44675d237c99762e7ee2/modeling_clip.py # Jina XLM-RoBERTa (text only): http://huggingface.co/jinaai/xlm-roberta-flash-implementation/blob/2b6bc3f30750b3a9648fe9b63448c09920efe9be/modeling_xlm_roberta.py from dataclasses import dataclass import torch from torch import nn as nn from torch.nn import functional as F import comfy.model_management import comfy.ops from comfy import sd1_clip from .spiece_tokenizer import SPieceTokenizer class JinaClip2Tokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer = tokenizer_data.get("spiece_model", None) # The official NewBie uses max_length=8000, but Jina Embeddings v3 actually supports 8192 super().__init__(tokenizer, pad_with_end=False, embedding_size=1024, embedding_key='jina_clip_2', tokenizer_class=SPieceTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=False, max_length=8192, min_length=1, pad_token=1, end_token=2, tokenizer_args={"add_bos": True, "add_eos": True}, tokenizer_data=tokenizer_data) def state_dict(self): return {"spiece_model": self.tokenizer.serialize_model()} class JinaClip2TokenizerWrapper(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, tokenizer=JinaClip2Tokenizer, name="jina_clip_2") # https://huggingface.co/jinaai/jina-embeddings-v3/blob/343dbf534c76fe845f304fa5c2d1fd87e1e78918/config.json @dataclass class XLMRobertaConfig: vocab_size: int = 250002 type_vocab_size: int = 1 hidden_size: int = 1024 num_hidden_layers: int = 24 num_attention_heads: int = 16 rotary_emb_base: float = 20000.0 intermediate_size: int = 4096 hidden_act: str = "gelu" hidden_dropout_prob: float = 0.1 attention_probs_dropout_prob: float = 0.1 layer_norm_eps: float = 1e-05 bos_token_id: int = 0 eos_token_id: int = 2 pad_token_id: int = 1 class XLMRobertaEmbeddings(nn.Module): def __init__(self, config, device=None, dtype=None, ops=None): super().__init__() embed_dim = config.hidden_size self.word_embeddings = ops.Embedding(config.vocab_size, embed_dim, padding_idx=config.pad_token_id, device=device, dtype=dtype) self.token_type_embeddings = ops.Embedding(config.type_vocab_size, embed_dim, device=device, dtype=dtype) def forward(self, input_ids=None, embeddings=None): if input_ids is not None and embeddings is None: embeddings = self.word_embeddings(input_ids) if embeddings is not None: token_type_ids = torch.zeros(embeddings.shape[1], device=embeddings.device, dtype=torch.int32) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = embeddings + token_type_embeddings return embeddings class RotaryEmbedding(nn.Module): def __init__(self, dim, base, device=None): super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) self._seq_len_cached = 0 self._cos_cached = None self._sin_cached = None def _update_cos_sin_cache(self, seqlen, device=None, dtype=None): if seqlen > self._seq_len_cached or self._cos_cached is None or self._cos_cached.device != device or self._cos_cached.dtype != dtype: self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq.to(device=t.device)) emb = torch.cat((freqs, freqs), dim=-1) self._cos_cached = emb.cos().to(dtype) self._sin_cached = emb.sin().to(dtype) def forward(self, q, k): batch, seqlen, heads, head_dim = q.shape self._update_cos_sin_cache(seqlen, device=q.device, dtype=q.dtype) cos = self._cos_cached[:seqlen].view(1, seqlen, 1, head_dim) sin = self._sin_cached[:seqlen].view(1, seqlen, 1, head_dim) def rotate_half(x): size = x.shape[-1] // 2 x1, x2 = x[..., :size], x[..., size:] return torch.cat((-x2, x1), dim=-1) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class MHA(nn.Module): def __init__(self, config, device=None, dtype=None, ops=None): super().__init__() embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = embed_dim // config.num_attention_heads self.rotary_emb = RotaryEmbedding(self.head_dim, config.rotary_emb_base, device=device) self.Wqkv = ops.Linear(embed_dim, 3 * embed_dim, device=device, dtype=dtype) self.out_proj = ops.Linear(embed_dim, embed_dim, device=device, dtype=dtype) def forward(self, x, mask=None, optimized_attention=None): qkv = self.Wqkv(x) batch_size, seq_len, _ = qkv.shape qkv = qkv.view(batch_size, seq_len, 3, self.num_heads, self.head_dim) q, k, v = qkv.unbind(2) q, k = self.rotary_emb(q, k) # NHD -> HND q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) out = optimized_attention(q, k, v, heads=self.num_heads, mask=mask, skip_reshape=True) return self.out_proj(out) class MLP(nn.Module): def __init__(self, config, device=None, dtype=None, ops=None): super().__init__() self.fc1 = ops.Linear(config.hidden_size, config.intermediate_size, device=device, dtype=dtype) self.activation = F.gelu self.fc2 = ops.Linear(config.intermediate_size, config.hidden_size, device=device, dtype=dtype) def forward(self, x): x = self.fc1(x) x = self.activation(x) x = self.fc2(x) return x class Block(nn.Module): def __init__(self, config, device=None, dtype=None, ops=None): super().__init__() self.mixer = MHA(config, device=device, dtype=dtype, ops=ops) self.dropout1 = nn.Dropout(config.hidden_dropout_prob) self.norm1 = ops.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device, dtype=dtype) self.mlp = MLP(config, device=device, dtype=dtype, ops=ops) self.dropout2 = nn.Dropout(config.hidden_dropout_prob) self.norm2 = ops.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device, dtype=dtype) def forward(self, hidden_states, mask=None, optimized_attention=None): mixer_out = self.mixer(hidden_states, mask=mask, optimized_attention=optimized_attention) hidden_states = self.norm1(self.dropout1(mixer_out) + hidden_states) mlp_out = self.mlp(hidden_states) hidden_states = self.norm2(self.dropout2(mlp_out) + hidden_states) return hidden_states class XLMRobertaEncoder(nn.Module): def __init__(self, config, device=None, dtype=None, ops=None): super().__init__() self.layers = nn.ModuleList([Block(config, device=device, dtype=dtype, ops=ops) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask=None): optimized_attention = comfy.ldm.modules.attention.optimized_attention_for_device(hidden_states.device, mask=attention_mask is not None, small_input=True) for layer in self.layers: hidden_states = layer(hidden_states, mask=attention_mask, optimized_attention=optimized_attention) return hidden_states class XLMRobertaModel_(nn.Module): def __init__(self, config, device=None, dtype=None, ops=None): super().__init__() self.embeddings = XLMRobertaEmbeddings(config, device=device, dtype=dtype, ops=ops) self.emb_ln = ops.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device, dtype=dtype) self.emb_drop = nn.Dropout(config.hidden_dropout_prob) self.encoder = XLMRobertaEncoder(config, device=device, dtype=dtype, ops=ops) def forward(self, input_ids, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, embeds_info=[]): x = self.embeddings(input_ids=input_ids, embeddings=embeds) x = self.emb_ln(x) x = self.emb_drop(x) mask = None if attention_mask is not None: mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, 1, attention_mask.shape[-1])) mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max) sequence_output = self.encoder(x, attention_mask=mask) # Mean pool, see https://huggingface.co/jinaai/jina-clip-implementation/blob/39e6a55ae971b59bea6e44675d237c99762e7ee2/hf_model.py pooled_output = None if attention_mask is None: pooled_output = sequence_output.mean(dim=1) else: attention_mask = attention_mask.to(sequence_output.dtype) pooled_output = (sequence_output * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(dim=-1, keepdim=True) # Intermediate output is not yet implemented, use None for placeholder return sequence_output, None, pooled_output class XLMRobertaModel(nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() self.config = XLMRobertaConfig(**config_dict) self.model = XLMRobertaModel_(self.config, device=device, dtype=dtype, ops=operations) self.num_layers = self.config.num_hidden_layers def get_input_embeddings(self): return self.model.embeddings.word_embeddings def set_input_embeddings(self, embeddings): self.model.embeddings.word_embeddings = embeddings def forward(self, *args, **kwargs): return self.model(*args, **kwargs) class JinaClip2TextModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__(device=device, dtype=dtype, textmodel_json_config={}, model_class=XLMRobertaModel, special_tokens={"start": 0, "end": 2, "pad": 1}, enable_attention_masks=True, return_attention_masks=True, model_options=model_options) class JinaClip2TextModelWrapper(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__(device=device, dtype=dtype, clip_model=JinaClip2TextModel, name="jina_clip_2", model_options=model_options)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/jina_clip_2.py", "license": "GNU General Public License v3.0", "lines": 179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/text_encoders/newbie.py
import torch import comfy.model_management import comfy.text_encoders.jina_clip_2 import comfy.text_encoders.lumina2 class NewBieTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): self.gemma = comfy.text_encoders.lumina2.Gemma3_4BTokenizer(embedding_directory=embedding_directory, tokenizer_data={"spiece_model": tokenizer_data["gemma_spiece_model"]}) self.jina = comfy.text_encoders.jina_clip_2.JinaClip2Tokenizer(embedding_directory=embedding_directory, tokenizer_data={"spiece_model": tokenizer_data["jina_spiece_model"]}) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} out["gemma"] = self.gemma.tokenize_with_weights(text, return_word_ids, **kwargs) out["jina"] = self.jina.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): raise NotImplementedError def state_dict(self): return {} class NewBieTEModel(torch.nn.Module): def __init__(self, dtype_gemma=None, device="cpu", dtype=None, model_options={}): super().__init__() dtype_gemma = comfy.model_management.pick_weight_dtype(dtype_gemma, dtype, device) self.gemma = comfy.text_encoders.lumina2.Gemma3_4BModel(device=device, dtype=dtype_gemma, model_options=model_options) self.jina = comfy.text_encoders.jina_clip_2.JinaClip2TextModel(device=device, dtype=dtype, model_options=model_options) self.dtypes = {dtype, dtype_gemma} def set_clip_options(self, options): self.gemma.set_clip_options(options) self.jina.set_clip_options(options) def reset_clip_options(self): self.gemma.reset_clip_options() self.jina.reset_clip_options() def encode_token_weights(self, token_weight_pairs): token_weight_pairs_gemma = token_weight_pairs["gemma"] token_weight_pairs_jina = token_weight_pairs["jina"] gemma_out, gemma_pooled, gemma_extra = self.gemma.encode_token_weights(token_weight_pairs_gemma) jina_out, jina_pooled, jina_extra = self.jina.encode_token_weights(token_weight_pairs_jina) return gemma_out, jina_pooled, gemma_extra def load_sd(self, sd): if "model.layers.0.self_attn.q_norm.weight" in sd: return self.gemma.load_sd(sd) else: return self.jina.load_sd(sd) def te(dtype_llama=None, llama_quantization_metadata=None): class NewBieTEModel_(NewBieTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["llama_quantization_metadata"] = llama_quantization_metadata super().__init__(dtype_gemma=dtype_llama, device=device, dtype=dtype, model_options=model_options) return NewBieTEModel_
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/newbie.py", "license": "GNU General Public License v3.0", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:tests/execution/test_jobs.py
"""Unit tests for comfy_execution/jobs.py""" from comfy_execution.jobs import ( JobStatus, is_previewable, normalize_queue_item, normalize_history_item, normalize_output_item, normalize_outputs, get_outputs_summary, apply_sorting, has_3d_extension, ) class TestJobStatus: """Test JobStatus constants.""" def test_status_values(self): """Status constants should have expected string values.""" assert JobStatus.PENDING == 'pending' assert JobStatus.IN_PROGRESS == 'in_progress' assert JobStatus.COMPLETED == 'completed' assert JobStatus.FAILED == 'failed' assert JobStatus.CANCELLED == 'cancelled' def test_all_contains_all_statuses(self): """ALL should contain all status values.""" assert JobStatus.PENDING in JobStatus.ALL assert JobStatus.IN_PROGRESS in JobStatus.ALL assert JobStatus.COMPLETED in JobStatus.ALL assert JobStatus.FAILED in JobStatus.ALL assert JobStatus.CANCELLED in JobStatus.ALL assert len(JobStatus.ALL) == 5 class TestIsPreviewable: """Unit tests for is_previewable()""" def test_previewable_media_types(self): """Images, video, audio, 3d, text media types should be previewable.""" for media_type in ['images', 'video', 'audio', '3d', 'text']: assert is_previewable(media_type, {}) is True def test_non_previewable_media_types(self): """Other media types should not be previewable.""" for media_type in ['latents', 'metadata', 'files']: assert is_previewable(media_type, {}) is False def test_3d_extensions_previewable(self): """3D file extensions should be previewable regardless of media_type.""" for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']: item = {'filename': f'model{ext}'} assert is_previewable('files', item) is True def test_3d_extensions_case_insensitive(self): """3D extension check should be case insensitive.""" item = {'filename': 'MODEL.GLB'} assert is_previewable('files', item) is True def test_video_format_previewable(self): """Items with video/ format should be previewable.""" item = {'format': 'video/mp4'} assert is_previewable('files', item) is True def test_audio_format_previewable(self): """Items with audio/ format should be previewable.""" item = {'format': 'audio/wav'} assert is_previewable('files', item) is True def test_other_format_not_previewable(self): """Items with other format should not be previewable.""" item = {'format': 'application/json'} assert is_previewable('files', item) is False class TestGetOutputsSummary: """Unit tests for get_outputs_summary()""" def test_empty_outputs(self): """Empty outputs should return 0 count and None preview.""" count, preview = get_outputs_summary({}) assert count == 0 assert preview is None def test_counts_across_multiple_nodes(self): """Outputs from multiple nodes should all be counted.""" outputs = { 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]}, 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]}, 'node3': {'images': [ {'filename': 'c.png', 'type': 'output'}, {'filename': 'd.png', 'type': 'output'} ]} } count, preview = get_outputs_summary(outputs) assert count == 4 def test_skips_animated_key_and_non_list_values(self): """The 'animated' key and non-list values should be skipped.""" outputs = { 'node1': { 'images': [{'filename': 'test.png', 'type': 'output'}], 'animated': [True], # Should skip due to key name 'metadata': 'string', # Should skip due to non-list 'count': 42 # Should skip due to non-list } } count, preview = get_outputs_summary(outputs) assert count == 1 def test_preview_prefers_type_output(self): """Items with type='output' should be preferred for preview.""" outputs = { 'node1': { 'images': [ {'filename': 'temp.png', 'type': 'temp'}, {'filename': 'output.png', 'type': 'output'} ] } } count, preview = get_outputs_summary(outputs) assert count == 2 assert preview['filename'] == 'output.png' def test_preview_fallback_when_no_output_type(self): """If no type='output', should use first previewable.""" outputs = { 'node1': { 'images': [ {'filename': 'temp1.png', 'type': 'temp'}, {'filename': 'temp2.png', 'type': 'temp'} ] } } count, preview = get_outputs_summary(outputs) assert preview['filename'] == 'temp1.png' def test_non_previewable_media_types_counted_but_no_preview(self): """Non-previewable media types should be counted but not used as preview.""" outputs = { 'node1': { 'latents': [ {'filename': 'latent1.safetensors'}, {'filename': 'latent2.safetensors'} ] } } count, preview = get_outputs_summary(outputs) assert count == 2 assert preview is None def test_previewable_media_types(self): """Images, video, and audio media types should be previewable.""" for media_type in ['images', 'video', 'audio']: outputs = { 'node1': { media_type: [{'filename': 'test.file', 'type': 'output'}] } } count, preview = get_outputs_summary(outputs) assert preview is not None, f"{media_type} should be previewable" def test_3d_files_previewable(self): """3D file extensions should be previewable.""" for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']: outputs = { 'node1': { 'files': [{'filename': f'model{ext}', 'type': 'output'}] } } count, preview = get_outputs_summary(outputs) assert preview is not None, f"3D file {ext} should be previewable" def test_format_mime_type_previewable(self): """Files with video/ or audio/ format should be previewable.""" for fmt in ['video/x-custom', 'audio/x-custom']: outputs = { 'node1': { 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}] } } count, preview = get_outputs_summary(outputs) assert preview is not None, f"Format {fmt} should be previewable" def test_preview_enriched_with_node_metadata(self): """Preview should include nodeId, mediaType, and original fields.""" outputs = { 'node123': { 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}] } } count, preview = get_outputs_summary(outputs) assert preview['nodeId'] == 'node123' assert preview['mediaType'] == 'images' assert preview['subfolder'] == 'outputs' def test_string_3d_filename_creates_preview(self): """String items with 3D extensions should synthesize a preview (Preview3D node output). Only the .glb counts — nulls and non-file strings are excluded.""" outputs = { 'node1': { 'result': ['preview3d_abc123.glb', None, None] } } count, preview = get_outputs_summary(outputs) assert count == 1 assert preview is not None assert preview['filename'] == 'preview3d_abc123.glb' assert preview['mediaType'] == '3d' assert preview['nodeId'] == 'node1' assert preview['type'] == 'output' def test_string_non_3d_filename_no_preview(self): """String items without 3D extensions should not create a preview.""" outputs = { 'node1': { 'result': ['data.json', None] } } count, preview = get_outputs_summary(outputs) assert count == 0 assert preview is None def test_string_3d_filename_used_as_fallback(self): """String 3D preview should be used when no dict items are previewable.""" outputs = { 'node1': { 'latents': [{'filename': 'latent.safetensors'}], }, 'node2': { 'result': ['model.glb', None] } } count, preview = get_outputs_summary(outputs) assert preview is not None assert preview['filename'] == 'model.glb' assert preview['mediaType'] == '3d' class TestHas3DExtension: """Unit tests for has_3d_extension()""" def test_recognized_extensions(self): for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']: assert has_3d_extension(f'model{ext}') is True def test_case_insensitive(self): assert has_3d_extension('MODEL.GLB') is True assert has_3d_extension('Scene.GLTF') is True def test_non_3d_extensions(self): for name in ['photo.png', 'video.mp4', 'data.json', 'model']: assert has_3d_extension(name) is False class TestApplySorting: """Unit tests for apply_sorting()""" def test_sort_by_create_time_desc(self): """Default sort by create_time descending.""" jobs = [ {'id': 'a', 'create_time': 100}, {'id': 'b', 'create_time': 300}, {'id': 'c', 'create_time': 200}, ] result = apply_sorting(jobs, 'created_at', 'desc') assert [j['id'] for j in result] == ['b', 'c', 'a'] def test_sort_by_create_time_asc(self): """Sort by create_time ascending.""" jobs = [ {'id': 'a', 'create_time': 100}, {'id': 'b', 'create_time': 300}, {'id': 'c', 'create_time': 200}, ] result = apply_sorting(jobs, 'created_at', 'asc') assert [j['id'] for j in result] == ['a', 'c', 'b'] def test_sort_by_execution_duration(self): """Sort by execution_duration should order by duration.""" jobs = [ {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s ] result = apply_sorting(jobs, 'execution_duration', 'desc') assert [j['id'] for j in result] == ['a', 'c', 'b'] def test_sort_with_none_values(self): """Jobs with None values should sort as 0.""" jobs = [ {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None}, {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, ] result = apply_sorting(jobs, 'execution_duration', 'asc') assert result[0]['id'] == 'b' # None treated as 0, comes first class TestNormalizeQueueItem: """Unit tests for normalize_queue_item()""" def test_basic_normalization(self): """Queue item should be normalized to job dict.""" item = ( 10, # priority/number 'prompt-123', # prompt_id {'nodes': {}}, # prompt { 'create_time': 1234567890, 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}} }, # extra_data ['node1'], # outputs_to_execute ) job = normalize_queue_item(item, JobStatus.PENDING) assert job['id'] == 'prompt-123' assert job['status'] == 'pending' assert job['priority'] == 10 assert job['create_time'] == 1234567890 assert 'execution_start_time' not in job assert 'execution_end_time' not in job assert 'execution_error' not in job assert 'preview_output' not in job assert job['outputs_count'] == 0 assert job['workflow_id'] == 'workflow-abc' class TestNormalizeHistoryItem: """Unit tests for normalize_history_item()""" def test_completed_job(self): """Completed history item should have correct status and times from messages.""" history_item = { 'prompt': ( 5, # priority 'prompt-456', {'nodes': {}}, { 'create_time': 1234567890000, 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}} }, ['node1'], ), 'status': { 'status_str': 'success', 'completed': True, 'messages': [ ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}), ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}), ] }, 'outputs': {}, } job = normalize_history_item('prompt-456', history_item) assert job['id'] == 'prompt-456' assert job['status'] == 'completed' assert job['priority'] == 5 assert job['execution_start_time'] == 1234567890500 assert job['execution_end_time'] == 1234567893000 assert job['workflow_id'] == 'workflow-xyz' def test_failed_job(self): """Failed history item should have failed status and error from messages.""" history_item = { 'prompt': ( 5, 'prompt-789', {'nodes': {}}, {'create_time': 1234567890000}, ['node1'], ), 'status': { 'status_str': 'error', 'completed': False, 'messages': [ ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}), ('execution_error', { 'prompt_id': 'prompt-789', 'node_id': '5', 'node_type': 'KSampler', 'exception_message': 'CUDA out of memory', 'exception_type': 'RuntimeError', 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'], 'timestamp': 1234567891000, }) ] }, 'outputs': {}, } job = normalize_history_item('prompt-789', history_item) assert job['status'] == 'failed' assert job['execution_start_time'] == 1234567890500 assert job['execution_end_time'] == 1234567891000 assert job['execution_error']['node_id'] == '5' assert job['execution_error']['node_type'] == 'KSampler' assert job['execution_error']['exception_message'] == 'CUDA out of memory' def test_cancelled_job(self): """Cancelled/interrupted history item should have cancelled status.""" history_item = { 'prompt': ( 5, 'prompt-cancelled', {'nodes': {}}, {'create_time': 1234567890000}, ['node1'], ), 'status': { 'status_str': 'error', 'completed': False, 'messages': [ ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}), ('execution_interrupted', { 'prompt_id': 'prompt-cancelled', 'node_id': '5', 'node_type': 'KSampler', 'executed': ['1', '2', '3'], 'timestamp': 1234567891000, }) ] }, 'outputs': {}, } job = normalize_history_item('prompt-cancelled', history_item) assert job['status'] == 'cancelled' assert job['execution_start_time'] == 1234567890500 assert job['execution_end_time'] == 1234567891000 # Cancelled jobs should not have execution_error set assert 'execution_error' not in job def test_include_outputs(self): """When include_outputs=True, should include full output data.""" history_item = { 'prompt': ( 5, 'prompt-123', {'nodes': {'1': {}}}, {'create_time': 1234567890, 'client_id': 'abc'}, ['node1'], ), 'status': {'status_str': 'success', 'completed': True, 'messages': []}, 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}}, } job = normalize_history_item('prompt-123', history_item, include_outputs=True) assert 'outputs' in job assert 'workflow' in job assert 'execution_status' in job assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}} assert job['workflow'] == { 'prompt': {'nodes': {'1': {}}}, 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'}, } def test_include_outputs_normalizes_3d_strings(self): """Detail view should transform string 3D filenames into file output dicts.""" history_item = { 'prompt': ( 5, 'prompt-3d', {'nodes': {}}, {'create_time': 1234567890}, ['node1'], ), 'status': {'status_str': 'success', 'completed': True, 'messages': []}, 'outputs': { 'node1': { 'result': ['preview3d_abc123.glb', None, None] } }, } job = normalize_history_item('prompt-3d', history_item, include_outputs=True) assert job['outputs_count'] == 1 result_items = job['outputs']['node1']['result'] assert len(result_items) == 1 assert result_items[0] == { 'filename': 'preview3d_abc123.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d', } def test_include_outputs_preserves_dict_items(self): """Detail view normalization should pass dict items through unchanged.""" history_item = { 'prompt': ( 5, 'prompt-img', {'nodes': {}}, {'create_time': 1234567890}, ['node1'], ), 'status': {'status_str': 'success', 'completed': True, 'messages': []}, 'outputs': { 'node1': { 'images': [ {'filename': 'photo.png', 'type': 'output', 'subfolder': ''}, ] } }, } job = normalize_history_item('prompt-img', history_item, include_outputs=True) assert job['outputs_count'] == 1 assert job['outputs']['node1']['images'] == [ {'filename': 'photo.png', 'type': 'output', 'subfolder': ''}, ] class TestNormalizeOutputItem: """Unit tests for normalize_output_item()""" def test_none_returns_none(self): assert normalize_output_item(None) is None def test_string_3d_extension_synthesizes_dict(self): result = normalize_output_item('model.glb') assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'} def test_string_non_3d_extension_returns_none(self): assert normalize_output_item('data.json') is None def test_string_no_extension_returns_none(self): assert normalize_output_item('camera_info_string') is None def test_dict_passes_through(self): item = {'filename': 'test.png', 'type': 'output'} assert normalize_output_item(item) is item def test_other_types_return_none(self): assert normalize_output_item(42) is None assert normalize_output_item(True) is None class TestNormalizeOutputs: """Unit tests for normalize_outputs()""" def test_empty_outputs(self): assert normalize_outputs({}) == {} def test_dict_items_pass_through(self): outputs = { 'node1': { 'images': [{'filename': 'a.png', 'type': 'output'}], } } result = normalize_outputs(outputs) assert result == outputs def test_3d_string_synthesized(self): outputs = { 'node1': { 'result': ['model.glb', None, None], } } result = normalize_outputs(outputs) assert result == { 'node1': { 'result': [ {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}, ], } } def test_animated_key_preserved(self): outputs = { 'node1': { 'images': [{'filename': 'a.png', 'type': 'output'}], 'animated': [True], } } result = normalize_outputs(outputs) assert result['node1']['animated'] == [True] def test_non_dict_node_outputs_preserved(self): outputs = {'node1': 'unexpected_value'} result = normalize_outputs(outputs) assert result == {'node1': 'unexpected_value'} def test_none_items_filtered_but_other_types_preserved(self): outputs = { 'node1': { 'result': ['data.json', None, [1, 2, 3]], } } result = normalize_outputs(outputs) assert result == { 'node1': { 'result': ['data.json', [1, 2, 3]], } }
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests/execution/test_jobs.py", "license": "GNU General Public License v3.0", "lines": 523, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/execution_test/preview_method_override_test.py
""" Unit tests for Queue-specific Preview Method Override feature. Tests the preview method override functionality: - LatentPreviewMethod.from_string() method - set_preview_method() function in latent_preview.py - default_preview_method variable - Integration with args.preview_method """ import pytest from comfy.cli_args import args, LatentPreviewMethod from latent_preview import set_preview_method, default_preview_method class TestLatentPreviewMethodFromString: """Test LatentPreviewMethod.from_string() classmethod.""" @pytest.mark.parametrize("value,expected", [ ("auto", LatentPreviewMethod.Auto), ("latent2rgb", LatentPreviewMethod.Latent2RGB), ("taesd", LatentPreviewMethod.TAESD), ("none", LatentPreviewMethod.NoPreviews), ]) def test_valid_values_return_enum(self, value, expected): """Valid string values should return corresponding enum.""" assert LatentPreviewMethod.from_string(value) == expected @pytest.mark.parametrize("invalid", [ "invalid", "TAESD", # Case sensitive "AUTO", # Case sensitive "Latent2RGB", # Case sensitive "latent", "", "default", # default is special, not a method ]) def test_invalid_values_return_none(self, invalid): """Invalid string values should return None.""" assert LatentPreviewMethod.from_string(invalid) is None class TestLatentPreviewMethodEnumValues: """Test LatentPreviewMethod enum has expected values.""" def test_enum_values(self): """Verify enum values match expected strings.""" assert LatentPreviewMethod.NoPreviews.value == "none" assert LatentPreviewMethod.Auto.value == "auto" assert LatentPreviewMethod.Latent2RGB.value == "latent2rgb" assert LatentPreviewMethod.TAESD.value == "taesd" def test_enum_count(self): """Verify exactly 4 preview methods exist.""" assert len(LatentPreviewMethod) == 4 class TestSetPreviewMethod: """Test set_preview_method() function from latent_preview.py.""" def setup_method(self): """Store original value before each test.""" self.original = args.preview_method def teardown_method(self): """Restore original value after each test.""" args.preview_method = self.original def test_override_with_taesd(self): """'taesd' should set args.preview_method to TAESD.""" set_preview_method("taesd") assert args.preview_method == LatentPreviewMethod.TAESD def test_override_with_latent2rgb(self): """'latent2rgb' should set args.preview_method to Latent2RGB.""" set_preview_method("latent2rgb") assert args.preview_method == LatentPreviewMethod.Latent2RGB def test_override_with_auto(self): """'auto' should set args.preview_method to Auto.""" set_preview_method("auto") assert args.preview_method == LatentPreviewMethod.Auto def test_override_with_none_value(self): """'none' should set args.preview_method to NoPreviews.""" set_preview_method("none") assert args.preview_method == LatentPreviewMethod.NoPreviews def test_default_restores_original(self): """'default' should restore to default_preview_method.""" # First override to something else set_preview_method("taesd") assert args.preview_method == LatentPreviewMethod.TAESD # Then use 'default' to restore set_preview_method("default") assert args.preview_method == default_preview_method def test_none_param_restores_original(self): """None parameter should restore to default_preview_method.""" # First override to something else set_preview_method("taesd") assert args.preview_method == LatentPreviewMethod.TAESD # Then use None to restore set_preview_method(None) assert args.preview_method == default_preview_method def test_empty_string_restores_original(self): """Empty string should restore to default_preview_method.""" set_preview_method("taesd") set_preview_method("") assert args.preview_method == default_preview_method def test_invalid_value_restores_original(self): """Invalid value should restore to default_preview_method.""" set_preview_method("taesd") set_preview_method("invalid_method") assert args.preview_method == default_preview_method def test_case_sensitive_invalid_restores(self): """Case-mismatched values should restore to default.""" set_preview_method("taesd") set_preview_method("TAESD") # Wrong case assert args.preview_method == default_preview_method class TestDefaultPreviewMethod: """Test default_preview_method module variable.""" def test_default_is_not_none(self): """default_preview_method should not be None.""" assert default_preview_method is not None def test_default_is_enum_member(self): """default_preview_method should be a LatentPreviewMethod enum.""" assert isinstance(default_preview_method, LatentPreviewMethod) def test_default_matches_args_initial(self): """default_preview_method should match CLI default or user setting.""" # This tests that default_preview_method was captured at module load # After set_preview_method(None), args should equal default original = args.preview_method set_preview_method("taesd") set_preview_method(None) assert args.preview_method == default_preview_method args.preview_method = original class TestArgsPreviewMethodModification: """Test args.preview_method can be modified correctly.""" def setup_method(self): """Store original value before each test.""" self.original = args.preview_method def teardown_method(self): """Restore original value after each test.""" args.preview_method = self.original def test_args_accepts_all_enum_values(self): """args.preview_method should accept all LatentPreviewMethod values.""" for method in LatentPreviewMethod: args.preview_method = method assert args.preview_method == method def test_args_modification_and_restoration(self): """args.preview_method should be modifiable and restorable.""" original = args.preview_method args.preview_method = LatentPreviewMethod.TAESD assert args.preview_method == LatentPreviewMethod.TAESD args.preview_method = original assert args.preview_method == original class TestExecutionFlow: """Test the execution flow pattern used in execution.py.""" def setup_method(self): """Store original value before each test.""" self.original = args.preview_method def teardown_method(self): """Restore original value after each test.""" args.preview_method = self.original def test_sequential_executions_with_different_methods(self): """Simulate multiple queue executions with different preview methods.""" # Execution 1: taesd set_preview_method("taesd") assert args.preview_method == LatentPreviewMethod.TAESD # Execution 2: none set_preview_method("none") assert args.preview_method == LatentPreviewMethod.NoPreviews # Execution 3: default (restore) set_preview_method("default") assert args.preview_method == default_preview_method # Execution 4: auto set_preview_method("auto") assert args.preview_method == LatentPreviewMethod.Auto # Execution 5: no override (None) set_preview_method(None) assert args.preview_method == default_preview_method def test_override_then_default_pattern(self): """Test the pattern: override -> execute -> next call restores.""" # First execution with override set_preview_method("latent2rgb") assert args.preview_method == LatentPreviewMethod.Latent2RGB # Second execution without override restores default set_preview_method(None) assert args.preview_method == default_preview_method def test_extra_data_simulation(self): """Simulate extra_data.get('preview_method') patterns.""" # Simulate: extra_data = {"preview_method": "taesd"} extra_data = {"preview_method": "taesd"} set_preview_method(extra_data.get("preview_method")) assert args.preview_method == LatentPreviewMethod.TAESD # Simulate: extra_data = {} extra_data = {} set_preview_method(extra_data.get("preview_method")) assert args.preview_method == default_preview_method # Simulate: extra_data = {"preview_method": "default"} extra_data = {"preview_method": "default"} set_preview_method(extra_data.get("preview_method")) assert args.preview_method == default_preview_method class TestRealWorldScenarios: """Tests using real-world prompt data patterns.""" def setup_method(self): """Store original value before each test.""" self.original = args.preview_method def teardown_method(self): """Restore original value after each test.""" args.preview_method = self.original def test_captured_prompt_without_preview_method(self): """ Test with captured prompt that has no preview_method. Based on: tests-unit/execution_test/fixtures/default_prompt.json """ # Real captured extra_data structure (preview_method absent) extra_data = { "extra_pnginfo": {"workflow": {}}, "client_id": "271314f0dabd48e5aaa488ed7a4ceb0d", "create_time": 1765416558179 } set_preview_method(extra_data.get("preview_method")) assert args.preview_method == default_preview_method def test_captured_prompt_with_preview_method_taesd(self): """Test captured prompt with preview_method: taesd.""" extra_data = { "extra_pnginfo": {"workflow": {}}, "client_id": "271314f0dabd48e5aaa488ed7a4ceb0d", "preview_method": "taesd" } set_preview_method(extra_data.get("preview_method")) assert args.preview_method == LatentPreviewMethod.TAESD def test_captured_prompt_with_preview_method_none(self): """Test captured prompt with preview_method: none (disable preview).""" extra_data = { "extra_pnginfo": {"workflow": {}}, "client_id": "test-client", "preview_method": "none" } set_preview_method(extra_data.get("preview_method")) assert args.preview_method == LatentPreviewMethod.NoPreviews def test_captured_prompt_with_preview_method_latent2rgb(self): """Test captured prompt with preview_method: latent2rgb.""" extra_data = { "extra_pnginfo": {"workflow": {}}, "client_id": "test-client", "preview_method": "latent2rgb" } set_preview_method(extra_data.get("preview_method")) assert args.preview_method == LatentPreviewMethod.Latent2RGB def test_captured_prompt_with_preview_method_auto(self): """Test captured prompt with preview_method: auto.""" extra_data = { "extra_pnginfo": {"workflow": {}}, "client_id": "test-client", "preview_method": "auto" } set_preview_method(extra_data.get("preview_method")) assert args.preview_method == LatentPreviewMethod.Auto def test_captured_prompt_with_preview_method_default(self): """Test captured prompt with preview_method: default (use CLI setting).""" # First set to something else set_preview_method("taesd") assert args.preview_method == LatentPreviewMethod.TAESD # Then simulate a prompt with "default" extra_data = { "extra_pnginfo": {"workflow": {}}, "client_id": "test-client", "preview_method": "default" } set_preview_method(extra_data.get("preview_method")) assert args.preview_method == default_preview_method def test_sequential_queue_with_different_preview_methods(self): """ Simulate real queue scenario: multiple prompts with different settings. This tests the actual usage pattern in ComfyUI. """ # Queue 1: User wants TAESD preview extra_data_1 = {"client_id": "client-1", "preview_method": "taesd"} set_preview_method(extra_data_1.get("preview_method")) assert args.preview_method == LatentPreviewMethod.TAESD # Queue 2: User wants no preview (faster execution) extra_data_2 = {"client_id": "client-2", "preview_method": "none"} set_preview_method(extra_data_2.get("preview_method")) assert args.preview_method == LatentPreviewMethod.NoPreviews # Queue 3: User doesn't specify (use server default) extra_data_3 = {"client_id": "client-3"} set_preview_method(extra_data_3.get("preview_method")) assert args.preview_method == default_preview_method # Queue 4: User explicitly wants default extra_data_4 = {"client_id": "client-4", "preview_method": "default"} set_preview_method(extra_data_4.get("preview_method")) assert args.preview_method == default_preview_method # Queue 5: User wants latent2rgb extra_data_5 = {"client_id": "client-5", "preview_method": "latent2rgb"} set_preview_method(extra_data_5.get("preview_method")) assert args.preview_method == LatentPreviewMethod.Latent2RGB
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/execution_test/preview_method_override_test.py", "license": "GNU General Public License v3.0", "lines": 279, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests/execution/test_preview_method.py
""" E2E tests for Queue-specific Preview Method Override feature. Tests actual execution with different preview_method values. Requires a running ComfyUI server with models. Usage: COMFYUI_SERVER=http://localhost:8988 pytest test_preview_method_e2e.py -v -m preview_method Note: These tests execute actual image generation and wait for completion. Tests verify preview image transmission based on preview_method setting. """ import os import json import pytest import uuid import time import random import websocket import urllib.request from pathlib import Path # Server configuration SERVER_URL = os.environ.get("COMFYUI_SERVER", "http://localhost:8988") SERVER_HOST = SERVER_URL.replace("http://", "").replace("https://", "") # Use existing inference graph fixture GRAPH_FILE = Path(__file__).parent.parent / "inference" / "graphs" / "default_graph_sdxl1_0.json" def is_server_running() -> bool: """Check if ComfyUI server is running.""" try: request = urllib.request.Request(f"{SERVER_URL}/system_stats") with urllib.request.urlopen(request, timeout=2.0): return True except Exception: return False def prepare_graph_for_test(graph: dict, steps: int = 5) -> dict: """Prepare graph for testing: randomize seeds and reduce steps.""" adapted = json.loads(json.dumps(graph)) # Deep copy for node_id, node in adapted.items(): inputs = node.get("inputs", {}) # Handle both "seed" and "noise_seed" (used by KSamplerAdvanced) if "seed" in inputs: inputs["seed"] = random.randint(0, 2**32 - 1) if "noise_seed" in inputs: inputs["noise_seed"] = random.randint(0, 2**32 - 1) # Reduce steps for faster testing (default 20 -> 5) if "steps" in inputs: inputs["steps"] = steps return adapted # Alias for backward compatibility randomize_seed = prepare_graph_for_test class PreviewMethodClient: """Client for testing preview_method with WebSocket execution tracking.""" def __init__(self, server_address: str): self.server_address = server_address self.client_id = str(uuid.uuid4()) self.ws = None def connect(self): """Connect to WebSocket.""" self.ws = websocket.WebSocket() self.ws.settimeout(120) # 2 minute timeout for sampling self.ws.connect(f"ws://{self.server_address}/ws?clientId={self.client_id}") def close(self): """Close WebSocket connection.""" if self.ws: self.ws.close() def queue_prompt(self, prompt: dict, extra_data: dict = None) -> dict: """Queue a prompt and return response with prompt_id.""" data = { "prompt": prompt, "client_id": self.client_id, "extra_data": extra_data or {} } req = urllib.request.Request( f"http://{self.server_address}/prompt", data=json.dumps(data).encode("utf-8"), headers={"Content-Type": "application/json"} ) return json.loads(urllib.request.urlopen(req).read()) def wait_for_execution(self, prompt_id: str, timeout: float = 120.0) -> dict: """ Wait for execution to complete via WebSocket. Returns: dict with keys: completed, error, preview_count, execution_time """ result = { "completed": False, "error": None, "preview_count": 0, "execution_time": 0.0 } start_time = time.time() self.ws.settimeout(timeout) try: while True: out = self.ws.recv() elapsed = time.time() - start_time if isinstance(out, str): message = json.loads(out) msg_type = message.get("type") data = message.get("data", {}) if data.get("prompt_id") != prompt_id: continue if msg_type == "executing": if data.get("node") is None: # Execution complete result["completed"] = True result["execution_time"] = elapsed break elif msg_type == "execution_error": result["error"] = data result["execution_time"] = elapsed break elif msg_type == "progress": # Progress update during sampling pass elif isinstance(out, bytes): # Binary data = preview image result["preview_count"] += 1 except websocket.WebSocketTimeoutException: result["error"] = "Timeout waiting for execution" result["execution_time"] = time.time() - start_time return result def load_graph() -> dict: """Load the SDXL graph fixture with randomized seed.""" with open(GRAPH_FILE) as f: graph = json.load(f) return randomize_seed(graph) # Avoid caching # Skip all tests if server is not running pytestmark = [ pytest.mark.skipif( not is_server_running(), reason=f"ComfyUI server not running at {SERVER_URL}" ), pytest.mark.preview_method, pytest.mark.execution, ] @pytest.fixture def client(): """Create and connect a test client.""" c = PreviewMethodClient(SERVER_HOST) c.connect() yield c c.close() @pytest.fixture def graph(): """Load the test graph.""" return load_graph() class TestPreviewMethodExecution: """Test actual execution with different preview methods.""" def test_execution_with_latent2rgb(self, client, graph): """ Execute with preview_method=latent2rgb. Should complete and potentially receive preview images. """ extra_data = {"preview_method": "latent2rgb"} response = client.queue_prompt(graph, extra_data) assert "prompt_id" in response result = client.wait_for_execution(response["prompt_id"]) # Should complete (may error if model missing, but that's separate) assert result["completed"] or result["error"] is not None # Execution should take some time (sampling) if result["completed"]: assert result["execution_time"] > 0.5, "Execution too fast - likely didn't run" # latent2rgb should produce previews print(f"latent2rgb: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 def test_execution_with_taesd(self, client, graph): """ Execute with preview_method=taesd. TAESD provides higher quality previews. """ extra_data = {"preview_method": "taesd"} response = client.queue_prompt(graph, extra_data) assert "prompt_id" in response result = client.wait_for_execution(response["prompt_id"]) assert result["completed"] or result["error"] is not None if result["completed"]: assert result["execution_time"] > 0.5 # taesd should also produce previews print(f"taesd: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 def test_execution_with_none_preview(self, client, graph): """ Execute with preview_method=none. No preview images should be generated. """ extra_data = {"preview_method": "none"} response = client.queue_prompt(graph, extra_data) assert "prompt_id" in response result = client.wait_for_execution(response["prompt_id"]) assert result["completed"] or result["error"] is not None if result["completed"]: # With "none", should receive no preview images assert result["preview_count"] == 0, \ f"Expected no previews with 'none', got {result['preview_count']}" print(f"none: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 def test_execution_with_default(self, client, graph): """ Execute with preview_method=default. Should use server's CLI default setting. """ extra_data = {"preview_method": "default"} response = client.queue_prompt(graph, extra_data) assert "prompt_id" in response result = client.wait_for_execution(response["prompt_id"]) assert result["completed"] or result["error"] is not None if result["completed"]: print(f"default: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 def test_execution_without_preview_method(self, client, graph): """ Execute without preview_method in extra_data. Should use server's default preview method. """ extra_data = {} # No preview_method response = client.queue_prompt(graph, extra_data) assert "prompt_id" in response result = client.wait_for_execution(response["prompt_id"]) assert result["completed"] or result["error"] is not None if result["completed"]: print(f"(no override): {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 class TestPreviewMethodComparison: """Compare preview behavior between different methods.""" def test_none_vs_latent2rgb_preview_count(self, client, graph): """ Compare preview counts: 'none' should have 0, others should have >0. This is the key verification that preview_method actually works. """ results = {} # Run with none (randomize seed to avoid caching) graph_none = randomize_seed(graph) extra_data_none = {"preview_method": "none"} response = client.queue_prompt(graph_none, extra_data_none) results["none"] = client.wait_for_execution(response["prompt_id"]) # Run with latent2rgb (randomize seed again) graph_rgb = randomize_seed(graph) extra_data_rgb = {"preview_method": "latent2rgb"} response = client.queue_prompt(graph_rgb, extra_data_rgb) results["latent2rgb"] = client.wait_for_execution(response["prompt_id"]) # Verify both completed assert results["none"]["completed"], f"'none' execution failed: {results['none']['error']}" assert results["latent2rgb"]["completed"], f"'latent2rgb' execution failed: {results['latent2rgb']['error']}" # Key assertion: 'none' should have 0 previews assert results["none"]["preview_count"] == 0, \ f"'none' should have 0 previews, got {results['none']['preview_count']}" # 'latent2rgb' should have at least 1 preview (depends on steps) assert results["latent2rgb"]["preview_count"] > 0, \ f"'latent2rgb' should have >0 previews, got {results['latent2rgb']['preview_count']}" print("\nPreview count comparison:") # noqa: T201 print(f" none: {results['none']['preview_count']} previews") # noqa: T201 print(f" latent2rgb: {results['latent2rgb']['preview_count']} previews") # noqa: T201 class TestPreviewMethodSequential: """Test sequential execution with different preview methods.""" def test_sequential_different_methods(self, client, graph): """ Execute multiple prompts sequentially with different preview methods. Each should complete independently with correct preview behavior. """ methods = ["latent2rgb", "none", "default"] results = [] for method in methods: # Randomize seed for each execution to avoid caching graph_run = randomize_seed(graph) extra_data = {"preview_method": method} response = client.queue_prompt(graph_run, extra_data) result = client.wait_for_execution(response["prompt_id"]) results.append({ "method": method, "completed": result["completed"], "preview_count": result["preview_count"], "execution_time": result["execution_time"], "error": result["error"] }) # All should complete or have clear errors for r in results: assert r["completed"] or r["error"] is not None, \ f"Method {r['method']} neither completed nor errored" # "none" should have zero previews if completed none_result = next(r for r in results if r["method"] == "none") if none_result["completed"]: assert none_result["preview_count"] == 0, \ f"'none' should have 0 previews, got {none_result['preview_count']}" print("\nSequential execution results:") # noqa: T201 for r in results: status = "✓" if r["completed"] else f"✗ ({r['error']})" print(f" {r['method']}: {status}, {r['preview_count']} previews, {r['execution_time']:.2f}s") # noqa: T201
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests/execution/test_preview_method.py", "license": "GNU General Public License v3.0", "lines": 281, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:comfy_extras/nodes_wanmove.py
import nodes import node_helpers import torch import torchvision.transforms.functional as TF import comfy.model_management import comfy.utils import numpy as np from typing_extensions import override from comfy_api.latest import ComfyExtension, io from comfy_extras.nodes_wan import parse_json_tracks # https://github.com/ali-vilab/Wan-Move/blob/main/wan/modules/trajectory.py from PIL import Image, ImageDraw SKIP_ZERO = False def get_pos_emb( pos_k: torch.Tensor, # A 1D tensor containing positions for which to generate embeddings. pos_emb_dim: int, theta_func: callable = lambda i, d: torch.pow(10000, torch.mul(2, torch.div(i.to(torch.float32), d))), #Function to compute thetas based on position and embedding dimensions. device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.float32, ) -> torch.Tensor: # The position embeddings (batch_size, pos_emb_dim) assert pos_emb_dim % 2 == 0, "The dimension of position embeddings must be even." pos_k = pos_k.to(device, dtype) if SKIP_ZERO: pos_k = pos_k + 1 batch_size = pos_k.size(0) denominator = torch.arange(0, pos_emb_dim // 2, device=device, dtype=dtype) # Expand denominator to match the shape needed for broadcasting denominator_expanded = denominator.view(1, -1).expand(batch_size, -1) thetas = theta_func(denominator_expanded, pos_emb_dim) # Ensure pos_k is in the correct shape for broadcasting pos_k_expanded = pos_k.view(-1, 1).to(dtype) sin_thetas = torch.sin(torch.div(pos_k_expanded, thetas)) cos_thetas = torch.cos(torch.div(pos_k_expanded, thetas)) # Concatenate sine and cosine embeddings along the last dimension pos_emb = torch.cat([sin_thetas, cos_thetas], dim=-1) return pos_emb def create_pos_embeddings( pred_tracks: torch.Tensor, # the predicted tracks, [T, N, 2] pred_visibility: torch.Tensor, # the predicted visibility [T, N] downsample_ratios: list[int], # the ratios for downsampling time, height, and width height: int, # the height of the feature map width: int, # the width of the feature map track_num: int = -1, # the number of tracks to use t_down_strategy: str = "sample", # the strategy for downsampling time dimension ): assert t_down_strategy in ["sample", "average"], "Invalid strategy for downsampling time dimension." t, n, _ = pred_tracks.shape t_down, h_down, w_down = downsample_ratios track_pos = - torch.ones(n, (t-1) // t_down + 1, 2, dtype=torch.long) if track_num == -1: track_num = n tracks_idx = torch.randperm(n)[:track_num] tracks = pred_tracks[:, tracks_idx] visibility = pred_visibility[:, tracks_idx] for t_idx in range(0, t, t_down): if t_down_strategy == "sample" or t_idx == 0: cur_tracks = tracks[t_idx] # [N, 2] cur_visibility = visibility[t_idx] # [N] else: cur_tracks = tracks[t_idx:t_idx+t_down].mean(dim=0) cur_visibility = torch.any(visibility[t_idx:t_idx+t_down], dim=0) for i in range(track_num): if not cur_visibility[i] or cur_tracks[i][0] < 0 or cur_tracks[i][1] < 0 or cur_tracks[i][0] >= width or cur_tracks[i][1] >= height: continue x, y = cur_tracks[i] x, y = int(x // w_down), int(y // h_down) track_pos[i, t_idx // t_down, 0], track_pos[i, t_idx // t_down, 1] = y, x return track_pos # the position embeddings, [N, T', 2], 2 = height, width def replace_feature( vae_feature: torch.Tensor, # [B, C', T', H', W'] track_pos: torch.Tensor, # [B, N, T', 2] strength: float = 1.0 ) -> torch.Tensor: b, _, t, h, w = vae_feature.shape assert b == track_pos.shape[0], "Batch size mismatch." n = track_pos.shape[1] # Shuffle the trajectory order track_pos = track_pos[:, torch.randperm(n), :, :] # Extract coordinates at time steps ≥ 1 and generate a valid mask current_pos = track_pos[:, :, 1:, :] # [B, N, T-1, 2] mask = (current_pos[..., 0] >= 0) & (current_pos[..., 1] >= 0) # [B, N, T-1] # Get all valid indices valid_indices = mask.nonzero(as_tuple=False) # [num_valid, 3] num_valid = valid_indices.shape[0] if num_valid == 0: return vae_feature # Decompose valid indices into each dimension batch_idx = valid_indices[:, 0] track_idx = valid_indices[:, 1] t_rel = valid_indices[:, 2] t_target = t_rel + 1 # Convert to original time step indices # Extract target position coordinates h_target = current_pos[batch_idx, track_idx, t_rel, 0].long() # Ensure integer indices w_target = current_pos[batch_idx, track_idx, t_rel, 1].long() # Extract source position coordinates (t=0) h_source = track_pos[batch_idx, track_idx, 0, 0].long() w_source = track_pos[batch_idx, track_idx, 0, 1].long() # Get source features and assign to target positions src_features = vae_feature[batch_idx, :, 0, h_source, w_source] dst_features = vae_feature[batch_idx, :, t_target, h_target, w_target] vae_feature[batch_idx, :, t_target, h_target, w_target] = dst_features + (src_features - dst_features) * strength return vae_feature # Visualize functions def _draw_gradient_polyline_on_overlay(overlay, line_width, points, start_color, opacity=1.0): draw = ImageDraw.Draw(overlay, 'RGBA') points = points[::-1] # Compute total length total_length = 0 segment_lengths = [] for i in range(len(points) - 1): dx = points[i + 1][0] - points[i][0] dy = points[i + 1][1] - points[i][1] length = (dx * dx + dy * dy) ** 0.5 segment_lengths.append(length) total_length += length if total_length == 0: return accumulated_length = 0 # Draw the gradient polyline for idx, (start_point, end_point) in enumerate(zip(points[:-1], points[1:])): segment_length = segment_lengths[idx] steps = max(int(segment_length), 1) for i in range(steps): current_length = accumulated_length + (i / steps) * segment_length ratio = current_length / total_length alpha = int(255 * (1 - ratio) * opacity) color = (*start_color, alpha) x = int(start_point[0] + (end_point[0] - start_point[0]) * i / steps) y = int(start_point[1] + (end_point[1] - start_point[1]) * i / steps) dynamic_line_width = max(int(line_width * (1 - ratio)), 1) draw.line([(x, y), (x + 1, y)], fill=color, width=dynamic_line_width) accumulated_length += segment_length def add_weighted(rgb, track): rgb = np.array(rgb) # [H, W, C] "RGB" track = np.array(track) # [H, W, C] "RGBA" alpha = track[:, :, 3] / 255.0 alpha = np.stack([alpha] * 3, axis=-1) blend_img = track[:, :, :3] * alpha + rgb * (1 - alpha) return Image.fromarray(blend_img.astype(np.uint8)) def draw_tracks_on_video(video, tracks, visibility=None, track_frame=24, circle_size=12, opacity=0.5, line_width=16): color_map = [(102, 153, 255), (0, 255, 255), (255, 255, 0), (255, 102, 204), (0, 255, 0)] video = video.byte().cpu().numpy() # (81, 480, 832, 3) tracks = tracks[0].long().detach().cpu().numpy() if visibility is not None: visibility = visibility[0].detach().cpu().numpy() num_frames, height, width = video.shape[:3] num_tracks = tracks.shape[1] alpha_opacity = int(255 * opacity) output_frames = [] for t in range(num_frames): frame_rgb = video[t].astype(np.float32) # Create a single RGBA overlay for all tracks in this frame overlay = Image.new("RGBA", (width, height), (0, 0, 0, 0)) draw_overlay = ImageDraw.Draw(overlay) polyline_data = [] # Draw all circles on a single overlay for n in range(num_tracks): if visibility is not None and visibility[t, n] == 0: continue track_coord = tracks[t, n] color = color_map[n % len(color_map)] circle_color = color + (alpha_opacity,) draw_overlay.ellipse((track_coord[0] - circle_size, track_coord[1] - circle_size, track_coord[0] + circle_size, track_coord[1] + circle_size), fill=circle_color ) # Store polyline data for batch processing tracks_coord = tracks[max(t - track_frame, 0):t + 1, n] if len(tracks_coord) > 1: polyline_data.append((tracks_coord, color)) # Blend circles overlay once overlay_np = np.array(overlay) alpha = overlay_np[:, :, 3:4] / 255.0 frame_rgb = overlay_np[:, :, :3] * alpha + frame_rgb * (1 - alpha) # Draw all polylines on a single overlay if polyline_data: polyline_overlay = Image.new("RGBA", (width, height), (0, 0, 0, 0)) for tracks_coord, color in polyline_data: _draw_gradient_polyline_on_overlay(polyline_overlay, line_width, tracks_coord, color, opacity) # Blend polylines overlay once polyline_np = np.array(polyline_overlay) alpha = polyline_np[:, :, 3:4] / 255.0 frame_rgb = polyline_np[:, :, :3] * alpha + frame_rgb * (1 - alpha) output_frames.append(Image.fromarray(frame_rgb.astype(np.uint8))) return output_frames class WanMoveVisualizeTracks(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="WanMoveVisualizeTracks", category="conditioning/video_models", inputs=[ io.Image.Input("images"), io.Tracks.Input("tracks", optional=True), io.Int.Input("line_resolution", default=24, min=1, max=1024), io.Int.Input("circle_size", default=12, min=1, max=128, advanced=True), io.Float.Input("opacity", default=0.75, min=0.0, max=1.0, step=0.01), io.Int.Input("line_width", default=16, min=1, max=128, advanced=True), ], outputs=[ io.Image.Output(), ], ) @classmethod def execute(cls, images, line_resolution, circle_size, opacity, line_width, tracks=None) -> io.NodeOutput: if tracks is None: return io.NodeOutput(images) track_path = tracks["track_path"].unsqueeze(0) track_visibility = tracks["track_visibility"].unsqueeze(0) images_in = images * 255.0 if images_in.shape[0] != track_path.shape[1]: repeat_count = track_path.shape[1] // images.shape[0] images_in = images_in.repeat(repeat_count, 1, 1, 1) track_video = draw_tracks_on_video(images_in, track_path, track_visibility, track_frame=line_resolution, circle_size=circle_size, opacity=opacity, line_width=line_width) track_video = torch.stack([TF.to_tensor(frame) for frame in track_video], dim=0).movedim(1, -1).float() return io.NodeOutput(track_video.to(comfy.model_management.intermediate_device())) class WanMoveTracksFromCoords(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="WanMoveTracksFromCoords", category="conditioning/video_models", inputs=[ io.String.Input("track_coords", force_input=True, default="[]", optional=True), io.Mask.Input("track_mask", optional=True), ], outputs=[ io.Tracks.Output(), io.Int.Output(display_name="track_length"), ], ) @classmethod def execute(cls, track_coords, track_mask=None) -> io.NodeOutput: device=comfy.model_management.intermediate_device() tracks_data = parse_json_tracks(track_coords) track_length = len(tracks_data[0]) track_list = [ [[track[frame]['x'], track[frame]['y']] for track in tracks_data] for frame in range(len(tracks_data[0])) ] tracks = torch.tensor(track_list, dtype=torch.float32, device=device) # [frames, num_tracks, 2] num_tracks = tracks.shape[-2] if track_mask is None: track_visibility = torch.ones((track_length, num_tracks), dtype=torch.bool, device=device) else: track_visibility = (track_mask > 0).any(dim=(1, 2)).unsqueeze(-1) out_track_info = {} out_track_info["track_path"] = tracks out_track_info["track_visibility"] = track_visibility return io.NodeOutput(out_track_info, track_length) class GenerateTracks(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="GenerateTracks", search_aliases=["motion paths", "camera movement", "trajectory"], category="conditioning/video_models", inputs=[ io.Int.Input("width", default=832, min=16, max=4096, step=16), io.Int.Input("height", default=480, min=16, max=4096, step=16), io.Float.Input("start_x", default=0.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized X coordinate (0-1) for start position."), io.Float.Input("start_y", default=0.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized Y coordinate (0-1) for start position."), io.Float.Input("end_x", default=1.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized X coordinate (0-1) for end position."), io.Float.Input("end_y", default=1.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized Y coordinate (0-1) for end position."), io.Int.Input("num_frames", default=81, min=1, max=1024), io.Int.Input("num_tracks", default=5, min=1, max=100), io.Float.Input("track_spread", default=0.025, min=0.0, max=1.0, step=0.001, tooltip="Normalized distance between tracks. Tracks are spread perpendicular to the motion direction."), io.Boolean.Input("bezier", default=False, tooltip="Enable Bezier curve path using the mid point as control point."), io.Float.Input("mid_x", default=0.5, min=0.0, max=1.0, step=0.01, tooltip="Normalized X control point for Bezier curve. Only used when 'bezier' is enabled."), io.Float.Input("mid_y", default=0.5, min=0.0, max=1.0, step=0.01, tooltip="Normalized Y control point for Bezier curve. Only used when 'bezier' is enabled."), io.Combo.Input( "interpolation", options=["linear", "ease_in", "ease_out", "ease_in_out", "constant"], tooltip="Controls the timing/speed of movement along the path.", ), io.Mask.Input("track_mask", optional=True, tooltip="Optional mask to indicate visible frames."), ], outputs=[ io.Tracks.Output(), io.Int.Output(display_name="track_length"), ], ) @classmethod def execute(cls, width, height, start_x, start_y, mid_x, mid_y, end_x, end_y, num_frames, num_tracks, track_spread, bezier=False, interpolation="linear", track_mask=None) -> io.NodeOutput: device = comfy.model_management.intermediate_device() track_length = num_frames # normalized coordinates to pixel coordinates start_x_px = start_x * width start_y_px = start_y * height mid_x_px = mid_x * width mid_y_px = mid_y * height end_x_px = end_x * width end_y_px = end_y * height track_spread_px = track_spread * (width + height) / 2 # Use average of width/height for spread to keep it proportional t = torch.linspace(0, 1, num_frames, device=device) if interpolation == "constant": # All points stay at start position interp_values = torch.zeros_like(t) elif interpolation == "linear": interp_values = t elif interpolation == "ease_in": interp_values = t ** 2 elif interpolation == "ease_out": interp_values = 1 - (1 - t) ** 2 elif interpolation == "ease_in_out": interp_values = t * t * (3 - 2 * t) if bezier: # apply interpolation to t for timing control along the bezier path t_interp = interp_values one_minus_t = 1 - t_interp x_positions = one_minus_t ** 2 * start_x_px + 2 * one_minus_t * t_interp * mid_x_px + t_interp ** 2 * end_x_px y_positions = one_minus_t ** 2 * start_y_px + 2 * one_minus_t * t_interp * mid_y_px + t_interp ** 2 * end_y_px tangent_x = 2 * one_minus_t * (mid_x_px - start_x_px) + 2 * t_interp * (end_x_px - mid_x_px) tangent_y = 2 * one_minus_t * (mid_y_px - start_y_px) + 2 * t_interp * (end_y_px - mid_y_px) else: # calculate base x and y positions for each frame (center track) x_positions = start_x_px + (end_x_px - start_x_px) * interp_values y_positions = start_y_px + (end_y_px - start_y_px) * interp_values # For non-bezier, tangent is constant (direction from start to end) tangent_x = torch.full_like(t, end_x_px - start_x_px) tangent_y = torch.full_like(t, end_y_px - start_y_px) track_list = [] for frame_idx in range(num_frames): # Calculate perpendicular direction at this frame tx = tangent_x[frame_idx].item() ty = tangent_y[frame_idx].item() length = (tx ** 2 + ty ** 2) ** 0.5 if length > 0: # Perpendicular unit vector (rotate 90 degrees) perp_x = -ty / length perp_y = tx / length else: # If tangent is zero, spread horizontally perp_x = 1.0 perp_y = 0.0 frame_tracks = [] for track_idx in range(num_tracks): # center tracks around the main path offset ranges from -(num_tracks-1)/2 to +(num_tracks-1)/2 offset = (track_idx - (num_tracks - 1) / 2) * track_spread_px track_x = x_positions[frame_idx].item() + perp_x * offset track_y = y_positions[frame_idx].item() + perp_y * offset frame_tracks.append([track_x, track_y]) track_list.append(frame_tracks) tracks = torch.tensor(track_list, dtype=torch.float32, device=device) # [frames, num_tracks, 2] if track_mask is None: track_visibility = torch.ones((track_length, num_tracks), dtype=torch.bool, device=device) else: track_visibility = (track_mask > 0).any(dim=(1, 2)).unsqueeze(-1) out_track_info = {} out_track_info["track_path"] = tracks out_track_info["track_visibility"] = track_visibility return io.NodeOutput(out_track_info, track_length) class WanMoveConcatTrack(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="WanMoveConcatTrack", category="conditioning/video_models", inputs=[ io.Tracks.Input("tracks_1"), io.Tracks.Input("tracks_2", optional=True), ], outputs=[ io.Tracks.Output(), ], ) @classmethod def execute(cls, tracks_1=None, tracks_2=None) -> io.NodeOutput: if tracks_2 is None: return io.NodeOutput(tracks_1) tracks_out = torch.cat([tracks_1["track_path"], tracks_2["track_path"]], dim=1) # Concatenate along the track dimension mask_out = torch.cat([tracks_1["track_visibility"], tracks_2["track_visibility"]], dim=-1) out_track_info = {} out_track_info["track_path"] = tracks_out out_track_info["track_visibility"] = mask_out return io.NodeOutput(out_track_info) class WanMoveTrackToVideo(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="WanMoveTrackToVideo", category="conditioning/video_models", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), io.Vae.Input("vae"), io.Tracks.Input("tracks", optional=True), io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01, tooltip="Strength of the track conditioning."), io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), io.Int.Input("batch_size", default=1, min=1, max=4096), io.Image.Input("start_image"), io.ClipVisionOutput.Input("clip_vision_output", optional=True), ], outputs=[ io.Conditioning.Output(display_name="positive"), io.Conditioning.Output(display_name="negative"), io.Latent.Output(display_name="latent"), ], ) @classmethod def execute(cls, positive, negative, vae, width, height, length, batch_size, strength, tracks=None, start_image=None, clip_vision_output=None) -> io.NodeOutput: device=comfy.model_management.intermediate_device() latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=device) if start_image is not None: start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) image = torch.ones((length, height, width, start_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) * 0.5 image[:start_image.shape[0]] = start_image concat_latent_image = vae.encode(image[:, :, :, :3]) mask = torch.ones((1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0 if tracks is not None and strength > 0.0: tracks_path = tracks["track_path"][:length] # [T, N, 2] num_tracks = tracks_path.shape[-2] track_visibility = tracks.get("track_visibility", torch.ones((length, num_tracks), dtype=torch.bool, device=device)) track_pos = create_pos_embeddings(tracks_path, track_visibility, [4, 8, 8], height, width, track_num=num_tracks) track_pos = comfy.utils.resize_to_batch_size(track_pos.unsqueeze(0), batch_size) concat_latent_image_pos = replace_feature(concat_latent_image, track_pos, strength) else: concat_latent_image_pos = concat_latent_image positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image_pos, "concat_mask": mask}) negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) if clip_vision_output is not None: positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) out_latent = {} out_latent["samples"] = latent return io.NodeOutput(positive, negative, out_latent) class WanMoveExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ WanMoveTrackToVideo, WanMoveTracksFromCoords, WanMoveConcatTrack, WanMoveVisualizeTracks, GenerateTracks, ] async def comfy_entrypoint() -> WanMoveExtension: return WanMoveExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_wanmove.py", "license": "GNU General Public License v3.0", "lines": 435, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/kandinsky5/model.py
import torch from torch import nn import math import comfy.ldm.common_dit from comfy.ldm.modules.attention import optimized_attention from comfy.ldm.flux.math import apply_rope1 from comfy.ldm.flux.layers import EmbedND def attention(q, k, v, heads, transformer_options={}): return optimized_attention( q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), heads=heads, skip_reshape=True, transformer_options=transformer_options ) def apply_scale_shift_norm(norm, x, scale, shift): return torch.addcmul(shift, norm(x), scale + 1.0) def apply_gate_sum(x, out, gate): return torch.addcmul(x, gate, out) def get_shift_scale_gate(params): shift, scale, gate = torch.chunk(params, 3, dim=-1) return tuple(x.unsqueeze(1) for x in (shift, scale, gate)) def get_freqs(dim, max_period=10000.0): return torch.exp(-math.log(max_period) * torch.arange(start=0, end=dim, dtype=torch.float32) / dim) class TimeEmbeddings(nn.Module): def __init__(self, model_dim, time_dim, max_period=10000.0, operation_settings=None): super().__init__() assert model_dim % 2 == 0 self.model_dim = model_dim self.max_period = max_period self.register_buffer("freqs", get_freqs(model_dim // 2, max_period), persistent=False) operations = operation_settings.get("operations") self.in_layer = operations.Linear(model_dim, time_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.activation = nn.SiLU() self.out_layer = operations.Linear(time_dim, time_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, timestep, dtype): args = torch.outer(timestep, self.freqs.to(device=timestep.device)) time_embed = torch.cat([torch.cos(args), torch.sin(args)], dim=-1).to(dtype) time_embed = self.out_layer(self.activation(self.in_layer(time_embed))) return time_embed class TextEmbeddings(nn.Module): def __init__(self, text_dim, model_dim, operation_settings=None): super().__init__() operations = operation_settings.get("operations") self.in_layer = operations.Linear(text_dim, model_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.norm = operations.LayerNorm(model_dim, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, text_embed): text_embed = self.in_layer(text_embed) return self.norm(text_embed).type_as(text_embed) class VisualEmbeddings(nn.Module): def __init__(self, visual_dim, model_dim, patch_size, operation_settings=None): super().__init__() self.patch_size = patch_size operations = operation_settings.get("operations") self.in_layer = operations.Linear(visual_dim, model_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, x): x = x.movedim(1, -1) # B C T H W -> B T H W C B, T, H, W, dim = x.shape pt, ph, pw = self.patch_size x = x.view( B, T // pt, pt, H // ph, ph, W // pw, pw, dim, ).permute(0, 1, 3, 5, 2, 4, 6, 7).flatten(4, 7) return self.in_layer(x) class Modulation(nn.Module): def __init__(self, time_dim, model_dim, num_params, operation_settings=None): super().__init__() self.activation = nn.SiLU() self.out_layer = operation_settings.get("operations").Linear(time_dim, num_params * model_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, x): return self.out_layer(self.activation(x)) class SelfAttention(nn.Module): def __init__(self, num_channels, head_dim, operation_settings=None): super().__init__() assert num_channels % head_dim == 0 self.num_heads = num_channels // head_dim self.head_dim = head_dim operations = operation_settings.get("operations") self.to_query = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.to_key = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.to_value = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.query_norm = operations.RMSNorm(head_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.key_norm = operations.RMSNorm(head_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.out_layer = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.num_chunks = 2 def _compute_qk(self, x, freqs, proj_fn, norm_fn): result = proj_fn(x).view(*x.shape[:-1], self.num_heads, -1) return apply_rope1(norm_fn(result), freqs) def _forward(self, x, freqs, transformer_options={}): q = self._compute_qk(x, freqs, self.to_query, self.query_norm) k = self._compute_qk(x, freqs, self.to_key, self.key_norm) v = self.to_value(x).view(*x.shape[:-1], self.num_heads, -1) out = attention(q, k, v, self.num_heads, transformer_options=transformer_options) return self.out_layer(out) def _forward_chunked(self, x, freqs, transformer_options={}): def process_chunks(proj_fn, norm_fn): x_chunks = torch.chunk(x, self.num_chunks, dim=1) freqs_chunks = torch.chunk(freqs, self.num_chunks, dim=1) chunks = [] for x_chunk, freqs_chunk in zip(x_chunks, freqs_chunks): chunks.append(self._compute_qk(x_chunk, freqs_chunk, proj_fn, norm_fn)) return torch.cat(chunks, dim=1) q = process_chunks(self.to_query, self.query_norm) k = process_chunks(self.to_key, self.key_norm) v = self.to_value(x).view(*x.shape[:-1], self.num_heads, -1) out = attention(q, k, v, self.num_heads, transformer_options=transformer_options) return self.out_layer(out) def forward(self, x, freqs, transformer_options={}): if x.shape[1] > 8192: return self._forward_chunked(x, freqs, transformer_options=transformer_options) else: return self._forward(x, freqs, transformer_options=transformer_options) class CrossAttention(SelfAttention): def get_qkv(self, x, context): q = self.to_query(x).view(*x.shape[:-1], self.num_heads, -1) k = self.to_key(context).view(*context.shape[:-1], self.num_heads, -1) v = self.to_value(context).view(*context.shape[:-1], self.num_heads, -1) return q, k, v def forward(self, x, context, transformer_options={}): q, k, v = self.get_qkv(x, context) out = attention(self.query_norm(q), self.key_norm(k), v, self.num_heads, transformer_options=transformer_options) return self.out_layer(out) class FeedForward(nn.Module): def __init__(self, dim, ff_dim, operation_settings=None): super().__init__() operations = operation_settings.get("operations") self.in_layer = operations.Linear(dim, ff_dim, bias=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.activation = nn.GELU() self.out_layer = operations.Linear(ff_dim, dim, bias=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.num_chunks = 4 def _forward(self, x): return self.out_layer(self.activation(self.in_layer(x))) def _forward_chunked(self, x): chunks = torch.chunk(x, self.num_chunks, dim=1) output_chunks = [] for chunk in chunks: output_chunks.append(self._forward(chunk)) return torch.cat(output_chunks, dim=1) def forward(self, x): if x.shape[1] > 8192: return self._forward_chunked(x) else: return self._forward(x) class OutLayer(nn.Module): def __init__(self, model_dim, time_dim, visual_dim, patch_size, operation_settings=None): super().__init__() self.patch_size = patch_size self.modulation = Modulation(time_dim, model_dim, 2, operation_settings=operation_settings) operations = operation_settings.get("operations") self.norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.out_layer = operations.Linear(model_dim, math.prod(patch_size) * visual_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, visual_embed, time_embed): B, T, H, W, _ = visual_embed.shape shift, scale = torch.chunk(self.modulation(time_embed), 2, dim=-1) scale = scale[:, None, None, None, :] shift = shift[:, None, None, None, :] visual_embed = apply_scale_shift_norm(self.norm, visual_embed, scale, shift) x = self.out_layer(visual_embed) out_dim = x.shape[-1] // (self.patch_size[0] * self.patch_size[1] * self.patch_size[2]) x = x.view( B, T, H, W, out_dim, self.patch_size[0], self.patch_size[1], self.patch_size[2] ) return x.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(2, 3).flatten(3, 4).flatten(4, 5) class TransformerEncoderBlock(nn.Module): def __init__(self, model_dim, time_dim, ff_dim, head_dim, operation_settings=None): super().__init__() self.text_modulation = Modulation(time_dim, model_dim, 6, operation_settings=operation_settings) operations = operation_settings.get("operations") self.self_attention_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.self_attention = SelfAttention(model_dim, head_dim, operation_settings=operation_settings) self.feed_forward_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.feed_forward = FeedForward(model_dim, ff_dim, operation_settings=operation_settings) def forward(self, x, time_embed, freqs, transformer_options={}): self_attn_params, ff_params = torch.chunk(self.text_modulation(time_embed), 2, dim=-1) shift, scale, gate = get_shift_scale_gate(self_attn_params) out = apply_scale_shift_norm(self.self_attention_norm, x, scale, shift) out = self.self_attention(out, freqs, transformer_options=transformer_options) x = apply_gate_sum(x, out, gate) shift, scale, gate = get_shift_scale_gate(ff_params) out = apply_scale_shift_norm(self.feed_forward_norm, x, scale, shift) out = self.feed_forward(out) x = apply_gate_sum(x, out, gate) return x class TransformerDecoderBlock(nn.Module): def __init__(self, model_dim, time_dim, ff_dim, head_dim, operation_settings=None): super().__init__() self.visual_modulation = Modulation(time_dim, model_dim, 9, operation_settings=operation_settings) operations = operation_settings.get("operations") self.self_attention_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.self_attention = SelfAttention(model_dim, head_dim, operation_settings=operation_settings) self.cross_attention_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.cross_attention = CrossAttention(model_dim, head_dim, operation_settings=operation_settings) self.feed_forward_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.feed_forward = FeedForward(model_dim, ff_dim, operation_settings=operation_settings) def forward(self, visual_embed, text_embed, time_embed, freqs, transformer_options={}): self_attn_params, cross_attn_params, ff_params = torch.chunk(self.visual_modulation(time_embed), 3, dim=-1) # self attention shift, scale, gate = get_shift_scale_gate(self_attn_params) visual_out = apply_scale_shift_norm(self.self_attention_norm, visual_embed, scale, shift) visual_out = self.self_attention(visual_out, freqs, transformer_options=transformer_options) visual_embed = apply_gate_sum(visual_embed, visual_out, gate) # cross attention shift, scale, gate = get_shift_scale_gate(cross_attn_params) visual_out = apply_scale_shift_norm(self.cross_attention_norm, visual_embed, scale, shift) visual_out = self.cross_attention(visual_out, text_embed, transformer_options=transformer_options) visual_embed = apply_gate_sum(visual_embed, visual_out, gate) # feed forward shift, scale, gate = get_shift_scale_gate(ff_params) visual_out = apply_scale_shift_norm(self.feed_forward_norm, visual_embed, scale, shift) visual_out = self.feed_forward(visual_out) visual_embed = apply_gate_sum(visual_embed, visual_out, gate) return visual_embed class Kandinsky5(nn.Module): def __init__( self, in_visual_dim=16, out_visual_dim=16, in_text_dim=3584, in_text_dim2=768, time_dim=512, model_dim=1792, ff_dim=7168, visual_embed_dim=132, patch_size=(1, 2, 2), num_text_blocks=2, num_visual_blocks=32, axes_dims=(16, 24, 24), rope_scale_factor=(1.0, 2.0, 2.0), dtype=None, device=None, operations=None, **kwargs ): super().__init__() head_dim = sum(axes_dims) self.rope_scale_factor = rope_scale_factor self.in_visual_dim = in_visual_dim self.model_dim = model_dim self.patch_size = patch_size self.visual_embed_dim = visual_embed_dim self.dtype = dtype self.device = device operation_settings = {"operations": operations, "device": device, "dtype": dtype} self.time_embeddings = TimeEmbeddings(model_dim, time_dim, operation_settings=operation_settings) self.text_embeddings = TextEmbeddings(in_text_dim, model_dim, operation_settings=operation_settings) self.pooled_text_embeddings = TextEmbeddings(in_text_dim2, time_dim, operation_settings=operation_settings) self.visual_embeddings = VisualEmbeddings(visual_embed_dim, model_dim, patch_size, operation_settings=operation_settings) self.text_transformer_blocks = nn.ModuleList( [TransformerEncoderBlock(model_dim, time_dim, ff_dim, head_dim, operation_settings=operation_settings) for _ in range(num_text_blocks)] ) self.visual_transformer_blocks = nn.ModuleList( [TransformerDecoderBlock(model_dim, time_dim, ff_dim, head_dim, operation_settings=operation_settings) for _ in range(num_visual_blocks)] ) self.out_layer = OutLayer(model_dim, time_dim, out_visual_dim, patch_size, operation_settings=operation_settings) self.rope_embedder_3d = EmbedND(dim=head_dim, theta=10000.0, axes_dim=axes_dims) self.rope_embedder_1d = EmbedND(dim=head_dim, theta=10000.0, axes_dim=[head_dim]) def rope_encode_1d(self, seq_len, seq_start=0, steps=None, device=None, dtype=None, transformer_options={}): steps = seq_len if steps is None else steps seq_ids = torch.linspace(seq_start, seq_start + (seq_len - 1), steps=steps, device=device, dtype=dtype) seq_ids = seq_ids.reshape(-1, 1).unsqueeze(0) # Shape: (1, steps, 1) freqs = self.rope_embedder_1d(seq_ids).movedim(1, 2) return freqs def rope_encode_3d(self, t, h, w, t_start=0, steps_t=None, steps_h=None, steps_w=None, device=None, dtype=None, transformer_options={}): patch_size = self.patch_size t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) h_len = ((h + (patch_size[1] // 2)) // patch_size[1]) w_len = ((w + (patch_size[2] // 2)) // patch_size[2]) if steps_t is None: steps_t = t_len if steps_h is None: steps_h = h_len if steps_w is None: steps_w = w_len h_start = 0 w_start = 0 rope_options = transformer_options.get("rope_options", None) if rope_options is not None: t_len = (t_len - 1.0) * rope_options.get("scale_t", 1.0) + 1.0 h_len = (h_len - 1.0) * rope_options.get("scale_y", 1.0) + 1.0 w_len = (w_len - 1.0) * rope_options.get("scale_x", 1.0) + 1.0 t_start += rope_options.get("shift_t", 0.0) h_start += rope_options.get("shift_y", 0.0) w_start += rope_options.get("shift_x", 0.0) else: rope_scale_factor = self.rope_scale_factor if self.model_dim == 4096: # pro video model uses different rope scaling at higher resolutions if h * w >= 14080: rope_scale_factor = (1.0, 3.16, 3.16) t_len = (t_len - 1.0) / rope_scale_factor[0] + 1.0 h_len = (h_len - 1.0) / rope_scale_factor[1] + 1.0 w_len = (w_len - 1.0) / rope_scale_factor[2] + 1.0 img_ids = torch.zeros((steps_t, steps_h, steps_w, 3), device=device, dtype=dtype) img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(t_start, t_start + (t_len - 1), steps=steps_t, device=device, dtype=dtype).reshape(-1, 1, 1) img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(h_start, h_start + (h_len - 1), steps=steps_h, device=device, dtype=dtype).reshape(1, -1, 1) img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(w_start, w_start + (w_len - 1), steps=steps_w, device=device, dtype=dtype).reshape(1, 1, -1) img_ids = img_ids.reshape(1, -1, img_ids.shape[-1]) freqs = self.rope_embedder_3d(img_ids).movedim(1, 2) return freqs def forward_orig(self, x, timestep, context, y, freqs, freqs_text, transformer_options={}, **kwargs): patches_replace = transformer_options.get("patches_replace", {}) context = self.text_embeddings(context) time_embed = self.time_embeddings(timestep, x.dtype) + self.pooled_text_embeddings(y) for block in self.text_transformer_blocks: context = block(context, time_embed, freqs_text, transformer_options=transformer_options) visual_embed = self.visual_embeddings(x) visual_shape = visual_embed.shape[:-1] visual_embed = visual_embed.flatten(1, -2) blocks_replace = patches_replace.get("dit", {}) transformer_options["total_blocks"] = len(self.visual_transformer_blocks) transformer_options["block_type"] = "double" for i, block in enumerate(self.visual_transformer_blocks): transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): return block(x=args["x"], context=args["context"], time_embed=args["time_embed"], freqs=args["freqs"], transformer_options=args.get("transformer_options")) visual_embed = blocks_replace[("double_block", i)]({"x": visual_embed, "context": context, "time_embed": time_embed, "freqs": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap})["x"] else: visual_embed = block(visual_embed, context, time_embed, freqs=freqs, transformer_options=transformer_options) visual_embed = visual_embed.reshape(*visual_shape, -1) return self.out_layer(visual_embed, time_embed) def _forward(self, x, timestep, context, y, time_dim_replace=None, transformer_options={}, **kwargs): original_dims = x.ndim if original_dims == 4: x = x.unsqueeze(2) bs, c, t_len, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) if time_dim_replace is not None: time_dim_replace = comfy.ldm.common_dit.pad_to_patch_size(time_dim_replace, self.patch_size) x[:, :time_dim_replace.shape[1], :time_dim_replace.shape[2]] = time_dim_replace freqs = self.rope_encode_3d(t_len, h, w, device=x.device, dtype=x.dtype, transformer_options=transformer_options) freqs_text = self.rope_encode_1d(context.shape[1], device=x.device, dtype=x.dtype, transformer_options=transformer_options) out = self.forward_orig(x, timestep, context, y, freqs, freqs_text, transformer_options=transformer_options, **kwargs) if original_dims == 4: out = out.squeeze(2) return out def forward(self, x, timestep, context, y, time_dim_replace=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( self._forward, self, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) ).execute(x, timestep, context, y, time_dim_replace=time_dim_replace, transformer_options=transformer_options, **kwargs)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/kandinsky5/model.py", "license": "GNU General Public License v3.0", "lines": 334, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/text_encoders/kandinsky5.py
from comfy import sd1_clip from .qwen_image import QwenImageTokenizer, QwenImageTEModel from .llama import Qwen25_7BVLI class Kandinsky5Tokenizer(QwenImageTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) self.llama_template = "<|im_start|>system\nYou are a prompt engineer. Describe the video in detail.\nDescribe how the camera moves or shakes, describe the zoom and view angle, whether it follows the objects.\nDescribe the location of the video, main characters or objects and their action.\nDescribe the dynamism of the video and presented actions.\nName the visual style of the video: whether it is a professional footage, user generated content, some kind of animation, video game or screen content.\nDescribe the visual effects, postprocessing and transitions if they are presented in the video.\nPay attention to the order of key actions shown in the scene.<|im_end|>\n<|im_start|>user\n{}<|im_end|>" self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = super().tokenize_with_weights(text, return_word_ids, **kwargs) out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) return out class Kandinsky5TokenizerImage(Kandinsky5Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) self.llama_template = "<|im_start|>system\nYou are a promt engineer. Describe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>" class Qwen25_7BVLIModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-1, dtype=None, attention_mask=True, model_options={}): llama_quantization_metadata = model_options.get("llama_quantization_metadata", None) if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=Qwen25_7BVLI, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class Kandinsky5TEModel(QwenImageTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): super(QwenImageTEModel, self).__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options) self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options) def encode_token_weights(self, token_weight_pairs): cond, p, extra = super().encode_token_weights(token_weight_pairs, template_end=-1) l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs["l"]) return cond, l_pooled, extra def set_clip_options(self, options): super().set_clip_options(options) self.clip_l.set_clip_options(options) def reset_clip_options(self): super().reset_clip_options() self.clip_l.reset_clip_options() def load_sd(self, sd): if "text_model.encoder.layers.1.mlp.fc1.weight" in sd: return self.clip_l.load_sd(sd) else: return super().load_sd(sd) def te(dtype_llama=None, llama_quantization_metadata=None): class Kandinsky5TEModel_(Kandinsky5TEModel): def __init__(self, device="cpu", dtype=None, model_options={}): if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["llama_quantization_metadata"] = llama_quantization_metadata if dtype_llama is not None: dtype = dtype_llama super().__init__(device=device, dtype=dtype, model_options=model_options) return Kandinsky5TEModel_
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/kandinsky5.py", "license": "GNU General Public License v3.0", "lines": 52, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_extras/nodes_kandinsky5.py
import nodes import node_helpers import torch import comfy.model_management import comfy.utils from typing_extensions import override from comfy_api.latest import ComfyExtension, io class Kandinsky5ImageToVideo(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="Kandinsky5ImageToVideo", category="conditioning/video_models", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), io.Vae.Input("vae"), io.Int.Input("width", default=768, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("height", default=512, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=4), io.Int.Input("batch_size", default=1, min=1, max=4096), io.Image.Input("start_image", optional=True), ], outputs=[ io.Conditioning.Output(display_name="positive"), io.Conditioning.Output(display_name="negative"), io.Latent.Output(display_name="latent", tooltip="Empty video latent"), io.Latent.Output(display_name="cond_latent", tooltip="Clean encoded start images, used to replace the noisy start of the model output latents"), ], ) @classmethod def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) cond_latent_out = {} if start_image is not None: start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) encoded = vae.encode(start_image[:, :, :, :3]) cond_latent_out["samples"] = encoded mask = torch.ones((1, 1, latent.shape[2], latent.shape[-2], latent.shape[-1]), device=start_image.device, dtype=start_image.dtype) mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0 positive = node_helpers.conditioning_set_values(positive, {"time_dim_replace": encoded, "concat_mask": mask}) negative = node_helpers.conditioning_set_values(negative, {"time_dim_replace": encoded, "concat_mask": mask}) out_latent = {} out_latent["samples"] = latent return io.NodeOutput(positive, negative, out_latent, cond_latent_out) def adaptive_mean_std_normalization(source, reference, clump_mean_low=0.3, clump_mean_high=0.35, clump_std_low=0.35, clump_std_high=0.5): source_mean = source.mean(dim=(1, 3, 4), keepdim=True) # mean over C, H, W source_std = source.std(dim=(1, 3, 4), keepdim=True) # std over C, H, W reference_mean = torch.clamp(reference.mean(), source_mean - clump_mean_low, source_mean + clump_mean_high) reference_std = torch.clamp(reference.std(), source_std - clump_std_low, source_std + clump_std_high) # normalization normalized = (source - source_mean) / (source_std + 1e-8) normalized = normalized * reference_std + reference_mean return normalized class NormalizeVideoLatentStart(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="NormalizeVideoLatentStart", category="conditioning/video_models", description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.", inputs=[ io.Latent.Input("latent"), io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"), io.Int.Input("reference_frame_count", default=5, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames after the start frames to use as reference"), ], outputs=[ io.Latent.Output(display_name="latent"), ], ) @classmethod def execute(cls, latent, start_frame_count, reference_frame_count) -> io.NodeOutput: if latent["samples"].shape[2] <= 1: return io.NodeOutput(latent) s = latent.copy() samples = latent["samples"].clone() first_frames = samples[:, :, :start_frame_count] reference_frames_data = samples[:, :, start_frame_count:start_frame_count+min(reference_frame_count, samples.shape[2]-1)] normalized_first_frames = adaptive_mean_std_normalization(first_frames, reference_frames_data) samples[:, :, :start_frame_count] = normalized_first_frames s["samples"] = samples return io.NodeOutput(s) class CLIPTextEncodeKandinsky5(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="CLIPTextEncodeKandinsky5", search_aliases=["kandinsky prompt"], category="advanced/conditioning/kandinsky5", inputs=[ io.Clip.Input("clip"), io.String.Input("clip_l", multiline=True, dynamic_prompts=True), io.String.Input("qwen25_7b", multiline=True, dynamic_prompts=True), ], outputs=[ io.Conditioning.Output(), ], ) @classmethod def execute(cls, clip, clip_l, qwen25_7b) -> io.NodeOutput: tokens = clip.tokenize(clip_l) tokens["qwen25_7b"] = clip.tokenize(qwen25_7b)["qwen25_7b"] return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) class Kandinsky5Extension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ Kandinsky5ImageToVideo, NormalizeVideoLatentStart, CLIPTextEncodeKandinsky5, ] async def comfy_entrypoint() -> Kandinsky5Extension: return Kandinsky5Extension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_kandinsky5.py", "license": "GNU General Public License v3.0", "lines": 113, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_extras/nodes_logic.py
from __future__ import annotations from typing import TypedDict from typing_extensions import override from comfy_api.latest import ComfyExtension, io from comfy_api.latest import _io # sentinel for missing inputs MISSING = object() class SwitchNode(io.ComfyNode): @classmethod def define_schema(cls): template = io.MatchType.Template("switch") return io.Schema( node_id="ComfySwitchNode", display_name="Switch", category="logic", is_experimental=True, inputs=[ io.Boolean.Input("switch"), io.MatchType.Input("on_false", template=template, lazy=True), io.MatchType.Input("on_true", template=template, lazy=True), ], outputs=[ io.MatchType.Output(template=template, display_name="output"), ], ) @classmethod def check_lazy_status(cls, switch, on_false=None, on_true=None): if switch and on_true is None: return ["on_true"] if not switch and on_false is None: return ["on_false"] @classmethod def execute(cls, switch, on_true, on_false) -> io.NodeOutput: return io.NodeOutput(on_true if switch else on_false) class SoftSwitchNode(io.ComfyNode): @classmethod def define_schema(cls): template = io.MatchType.Template("switch") return io.Schema( node_id="ComfySoftSwitchNode", display_name="Soft Switch", category="logic", is_experimental=True, inputs=[ io.Boolean.Input("switch"), io.MatchType.Input("on_false", template=template, lazy=True, optional=True), io.MatchType.Input("on_true", template=template, lazy=True, optional=True), ], outputs=[ io.MatchType.Output(template=template, display_name="output"), ], ) @classmethod def check_lazy_status(cls, switch, on_false=MISSING, on_true=MISSING): # We use MISSING instead of None, as None is passed for connected-but-unevaluated inputs. # This trick allows us to ignore the value of the switch and still be able to run execute(). # One of the inputs may be missing, in which case we need to evaluate the other input if on_false is MISSING: return ["on_true"] if on_true is MISSING: return ["on_false"] # Normal lazy switch operation if switch and on_true is None: return ["on_true"] if not switch and on_false is None: return ["on_false"] @classmethod def validate_inputs(cls, switch, on_false=MISSING, on_true=MISSING): # This check happens before check_lazy_status(), so we can eliminate the case where # both inputs are missing. if on_false is MISSING and on_true is MISSING: return "At least one of on_false or on_true must be connected to Switch node" return True @classmethod def execute(cls, switch, on_true=MISSING, on_false=MISSING) -> io.NodeOutput: if on_true is MISSING: return io.NodeOutput(on_false) if on_false is MISSING: return io.NodeOutput(on_true) return io.NodeOutput(on_true if switch else on_false) class CustomComboNode(io.ComfyNode): """ Frontend node that allows user to write their own options for a combo. This is here to make sure the node has a backend-representation to avoid some annoyances. """ @classmethod def define_schema(cls): return io.Schema( node_id="CustomCombo", display_name="Custom Combo", category="utils", is_experimental=True, inputs=[io.Combo.Input("choice", options=[])], outputs=[ io.String.Output(display_name="STRING"), io.Int.Output(display_name="INDEX"), ], accept_all_inputs=True, ) @classmethod def validate_inputs(cls, choice: io.Combo.Type, index: int = 0, **kwargs) -> bool: # NOTE: DO NOT DO THIS unless you want to skip validation entirely on the node's inputs. # I am doing that here because the widgets (besides the combo dropdown) on this node are fully frontend defined. # I need to skip checking that the chosen combo option is in the options list, since those are defined by the user. return True @classmethod def execute(cls, choice: io.Combo.Type, index: int = 0, **kwargs) -> io.NodeOutput: return io.NodeOutput(choice, index) class DCTestNode(io.ComfyNode): class DCValues(TypedDict): combo: str string: str integer: int image: io.Image.Type subcombo: dict[str] @classmethod def define_schema(cls): return io.Schema( node_id="DCTestNode", display_name="DCTest", category="logic", is_output_node=True, inputs=[io.DynamicCombo.Input("combo", options=[ io.DynamicCombo.Option("option1", [io.String.Input("string")]), io.DynamicCombo.Option("option2", [io.Int.Input("integer")]), io.DynamicCombo.Option("option3", [io.Image.Input("image")]), io.DynamicCombo.Option("option4", [ io.DynamicCombo.Input("subcombo", options=[ io.DynamicCombo.Option("opt1", [io.Float.Input("float_x"), io.Float.Input("float_y")]), io.DynamicCombo.Option("opt2", [io.Mask.Input("mask1", optional=True)]), ]) ])] )], outputs=[io.AnyType.Output()], ) @classmethod def execute(cls, combo: DCValues) -> io.NodeOutput: combo_val = combo["combo"] if combo_val == "option1": return io.NodeOutput(combo["string"]) elif combo_val == "option2": return io.NodeOutput(combo["integer"]) elif combo_val == "option3": return io.NodeOutput(combo["image"]) elif combo_val == "option4": return io.NodeOutput(f"{combo['subcombo']}") else: raise ValueError(f"Invalid combo: {combo_val}") class AutogrowNamesTestNode(io.ComfyNode): @classmethod def define_schema(cls): template = _io.Autogrow.TemplateNames(input=io.Float.Input("float"), names=["a", "b", "c"]) return io.Schema( node_id="AutogrowNamesTestNode", display_name="AutogrowNamesTest", category="logic", inputs=[ _io.Autogrow.Input("autogrow", template=template) ], outputs=[io.String.Output()], ) @classmethod def execute(cls, autogrow: _io.Autogrow.Type) -> io.NodeOutput: vals = list(autogrow.values()) combined = ",".join([str(x) for x in vals]) return io.NodeOutput(combined) class AutogrowPrefixTestNode(io.ComfyNode): @classmethod def define_schema(cls): template = _io.Autogrow.TemplatePrefix(input=io.Float.Input("float"), prefix="float", min=1, max=10) return io.Schema( node_id="AutogrowPrefixTestNode", display_name="AutogrowPrefixTest", category="logic", inputs=[ _io.Autogrow.Input("autogrow", template=template) ], outputs=[io.String.Output()], ) @classmethod def execute(cls, autogrow: _io.Autogrow.Type) -> io.NodeOutput: vals = list(autogrow.values()) combined = ",".join([str(x) for x in vals]) return io.NodeOutput(combined) class ComboOutputTestNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="ComboOptionTestNode", display_name="ComboOptionTest", category="logic", inputs=[io.Combo.Input("combo", options=["option1", "option2", "option3"]), io.Combo.Input("combo2", options=["option4", "option5", "option6"])], outputs=[io.Combo.Output(), io.Combo.Output()], ) @classmethod def execute(cls, combo: io.Combo.Type, combo2: io.Combo.Type) -> io.NodeOutput: return io.NodeOutput(combo, combo2) class ConvertStringToComboNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="ConvertStringToComboNode", search_aliases=["string to dropdown", "text to combo"], display_name="Convert String to Combo", category="logic", inputs=[io.String.Input("string")], outputs=[io.Combo.Output()], ) @classmethod def execute(cls, string: str) -> io.NodeOutput: return io.NodeOutput(string) class InvertBooleanNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="InvertBooleanNode", search_aliases=["not", "toggle", "negate", "flip boolean"], display_name="Invert Boolean", category="logic", inputs=[io.Boolean.Input("boolean")], outputs=[io.Boolean.Output()], ) @classmethod def execute(cls, boolean: bool) -> io.NodeOutput: return io.NodeOutput(not boolean) class LogicExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ SwitchNode, CustomComboNode, # SoftSwitchNode, # ConvertStringToComboNode, # DCTestNode, # AutogrowNamesTestNode, # AutogrowPrefixTestNode, # ComboOutputTestNode, # InvertBooleanNode, ] async def comfy_entrypoint() -> LogicExtension: return LogicExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_logic.py", "license": "GNU General Public License v3.0", "lines": 242, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/ldm/lumina/controlnet.py
import torch from torch import nn from .model import JointTransformerBlock class ZImageControlTransformerBlock(JointTransformerBlock): def __init__( self, layer_id: int, dim: int, n_heads: int, n_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, qk_norm: bool, modulation=True, block_id=0, operation_settings=None, ): super().__init__(layer_id, dim, n_heads, n_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, modulation, z_image_modulation=True, operation_settings=operation_settings) self.block_id = block_id if block_id == 0: self.before_proj = operation_settings.get("operations").Linear(self.dim, self.dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.after_proj = operation_settings.get("operations").Linear(self.dim, self.dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, c, x, **kwargs): if self.block_id == 0: c = self.before_proj(c) + x c = super().forward(c, **kwargs) c_skip = self.after_proj(c) return c_skip, c class ZImage_Control(torch.nn.Module): def __init__( self, dim: int = 3840, n_heads: int = 30, n_kv_heads: int = 30, multiple_of: int = 256, ffn_dim_multiplier: float = (8.0 / 3.0), norm_eps: float = 1e-5, qk_norm: bool = True, n_control_layers=6, control_in_dim=16, additional_in_dim=0, broken=False, refiner_control=False, dtype=None, device=None, operations=None, **kwargs ): super().__init__() operation_settings = {"operations": operations, "device": device, "dtype": dtype} self.broken = broken self.additional_in_dim = additional_in_dim self.control_in_dim = control_in_dim n_refiner_layers = 2 self.n_control_layers = n_control_layers self.control_layers = nn.ModuleList( [ ZImageControlTransformerBlock( i, dim, n_heads, n_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, block_id=i, operation_settings=operation_settings, ) for i in range(self.n_control_layers) ] ) all_x_embedder = {} patch_size = 2 f_patch_size = 1 x_embedder = operations.Linear(f_patch_size * patch_size * patch_size * (self.control_in_dim + self.additional_in_dim), dim, bias=True, device=device, dtype=dtype) all_x_embedder[f"{patch_size}-{f_patch_size}"] = x_embedder self.refiner_control = refiner_control self.control_all_x_embedder = nn.ModuleDict(all_x_embedder) if self.refiner_control: self.control_noise_refiner = nn.ModuleList( [ ZImageControlTransformerBlock( layer_id, dim, n_heads, n_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, block_id=layer_id, operation_settings=operation_settings, ) for layer_id in range(n_refiner_layers) ] ) else: self.control_noise_refiner = nn.ModuleList( [ JointTransformerBlock( layer_id, dim, n_heads, n_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, modulation=True, z_image_modulation=True, operation_settings=operation_settings, ) for layer_id in range(n_refiner_layers) ] ) def forward(self, cap_feats, control_context, x_freqs_cis, adaln_input): patch_size = 2 f_patch_size = 1 pH = pW = patch_size B, C, H, W = control_context.shape control_context = self.control_all_x_embedder[f"{patch_size}-{f_patch_size}"](control_context.view(B, C, H // pH, pH, W // pW, pW).permute(0, 2, 4, 3, 5, 1).flatten(3).flatten(1, 2)) x_attn_mask = None if not self.refiner_control: for layer in self.control_noise_refiner: control_context = layer(control_context, x_attn_mask, x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input) return control_context def forward_noise_refiner_block(self, layer_id, control_context, x, x_attn_mask, x_freqs_cis, adaln_input): if self.refiner_control: if self.broken: if layer_id == 0: return self.control_layers[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) if layer_id > 0: out = None for i in range(1, len(self.control_layers)): o, control_context = self.control_layers[i](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) if out is None: out = o return (out, control_context) else: return self.control_noise_refiner[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) else: return (None, control_context) def forward_control_block(self, layer_id, control_context, x, x_attn_mask, x_freqs_cis, adaln_input): return self.control_layers[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/lumina/controlnet.py", "license": "GNU General Public License v3.0", "lines": 146, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/text_encoders/ovis.py
from transformers import Qwen2Tokenizer import comfy.text_encoders.llama from comfy import sd1_clip import os import torch import numbers class Qwen3Tokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='qwen3_2b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=284, pad_token=151643, tokenizer_data=tokenizer_data) class OvisTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen3_2b", tokenizer=Qwen3Tokenizer) self.llama_template = "<|im_start|>user\nDescribe the image by detailing the color, quantity, text, shape, size, texture, spatial relationships of the objects and background: {}<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n" def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs): if llama_template is None: llama_text = self.llama_template.format(text) else: llama_text = llama_template.format(text) tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) return tokens class Ovis25_2BModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Ovis25_2B, enable_attention_masks=attention_mask, return_attention_masks=False, zero_out_masked=True, model_options=model_options) class OvisTEModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__(device=device, dtype=dtype, name="qwen3_2b", clip_model=Ovis25_2BModel, model_options=model_options) def encode_token_weights(self, token_weight_pairs, template_end=-1): out, pooled = super().encode_token_weights(token_weight_pairs) tok_pairs = token_weight_pairs["qwen3_2b"][0] count_im_start = 0 if template_end == -1: for i, v in enumerate(tok_pairs): elem = v[0] if not torch.is_tensor(elem): if isinstance(elem, numbers.Integral): if elem == 4004 and count_im_start < 1: template_end = i count_im_start += 1 if out.shape[1] > (template_end + 1): if tok_pairs[template_end + 1][0] == 25: template_end += 1 out = out[:, template_end:] return out, pooled, {} def te(dtype_llama=None, llama_quantization_metadata=None): class OvisTEModel_(OvisTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): if dtype_llama is not None: dtype = dtype_llama if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, dtype=dtype, model_options=model_options) return OvisTEModel_
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/ovis.py", "license": "GNU General Public License v3.0", "lines": 54, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:tests-unit/app_test/user_manager_system_user_test.py
"""Tests for System User Protection in user_manager.py Tests cover: - get_request_user_id(): 1st defense layer - blocks System Users from HTTP headers - get_request_user_filepath(): 2nd defense layer - structural blocking via get_public_user_directory() - add_user(): 3rd defense layer - prevents creation of System User names - Defense layers integration tests """ import pytest from unittest.mock import MagicMock, patch import tempfile import folder_paths from app.user_manager import UserManager @pytest.fixture def mock_user_directory(): """Create a temporary user directory.""" with tempfile.TemporaryDirectory() as temp_dir: original_dir = folder_paths.get_user_directory() folder_paths.set_user_directory(temp_dir) yield temp_dir folder_paths.set_user_directory(original_dir) @pytest.fixture def user_manager(mock_user_directory): """Create a UserManager instance for testing.""" with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True manager = UserManager() # Add a default user for testing manager.users = {"default": "default", "test_user_123": "Test User"} yield manager @pytest.fixture def mock_request(): """Create a mock request object.""" request = MagicMock() request.headers = {} return request class TestGetRequestUserId: """Tests for get_request_user_id() - 1st defense layer. Verifies: - System Users (__ prefix) in HTTP header are rejected with KeyError - Public Users pass through successfully """ def test_system_user_raises_error(self, user_manager, mock_request): """Test System User in header raises KeyError.""" mock_request.headers = {"comfy-user": "__system"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True with pytest.raises(KeyError, match="Unknown user"): user_manager.get_request_user_id(mock_request) def test_system_user_cache_raises_error(self, user_manager, mock_request): """Test System User cache raises KeyError.""" mock_request.headers = {"comfy-user": "__cache"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True with pytest.raises(KeyError, match="Unknown user"): user_manager.get_request_user_id(mock_request) def test_normal_user_works(self, user_manager, mock_request): """Test normal user access works.""" mock_request.headers = {"comfy-user": "default"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True user_id = user_manager.get_request_user_id(mock_request) assert user_id == "default" def test_unknown_user_raises_error(self, user_manager, mock_request): """Test unknown user raises KeyError.""" mock_request.headers = {"comfy-user": "unknown_user"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True with pytest.raises(KeyError, match="Unknown user"): user_manager.get_request_user_id(mock_request) class TestGetRequestUserFilepath: """Tests for get_request_user_filepath() - 2nd defense layer. Verifies: - Returns None when get_public_user_directory() returns None (System User) - Acts as backup defense if 1st layer is bypassed """ def test_system_user_returns_none(self, user_manager, mock_request, mock_user_directory): """Test System User returns None (structural blocking).""" # First, we need to mock get_request_user_id to return System User # But actually, get_request_user_id will raise KeyError first # So we test via get_public_user_directory returning None mock_request.headers = {"comfy-user": "default"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True # Patch get_public_user_directory to return None for testing with patch.object(folder_paths, 'get_public_user_directory', return_value=None): result = user_manager.get_request_user_filepath(mock_request, "test.txt") assert result is None def test_normal_user_gets_path(self, user_manager, mock_request, mock_user_directory): """Test normal user gets valid filepath.""" mock_request.headers = {"comfy-user": "default"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True path = user_manager.get_request_user_filepath(mock_request, "test.txt") assert path is not None assert "default" in path assert path.endswith("test.txt") class TestAddUser: """Tests for add_user() - 3rd defense layer (creation-time blocking). Verifies: - System User name (__ prefix) creation is rejected with ValueError - Sanitized usernames that become System User are also rejected """ def test_system_user_prefix_name_raises(self, user_manager): """Test System User prefix in name raises ValueError.""" with pytest.raises(ValueError, match="System User prefix not allowed"): user_manager.add_user("__system") def test_system_user_prefix_cache_raises(self, user_manager): """Test System User cache prefix raises ValueError.""" with pytest.raises(ValueError, match="System User prefix not allowed"): user_manager.add_user("__cache") def test_sanitized_system_user_prefix_raises(self, user_manager): """Test sanitized name becoming System User prefix raises ValueError (bypass prevention).""" # "__test" directly starts with System User prefix with pytest.raises(ValueError, match="System User prefix not allowed"): user_manager.add_user("__test") def test_normal_user_creation(self, user_manager, mock_user_directory): """Test normal user creation works.""" user_id = user_manager.add_user("Normal User") assert user_id is not None assert not user_id.startswith("__") assert "Normal-User" in user_id or "Normal_User" in user_id def test_empty_name_raises(self, user_manager): """Test empty name raises ValueError.""" with pytest.raises(ValueError, match="username not provided"): user_manager.add_user("") def test_whitespace_only_raises(self, user_manager): """Test whitespace-only name raises ValueError.""" with pytest.raises(ValueError, match="username not provided"): user_manager.add_user(" ") class TestDefenseLayers: """Integration tests for all three defense layers. Verifies: - Each defense layer blocks System Users independently - System User bypass is impossible through any layer """ def test_layer1_get_request_user_id(self, user_manager, mock_request): """Test 1st defense layer blocks System Users.""" mock_request.headers = {"comfy-user": "__system"} with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True with pytest.raises(KeyError): user_manager.get_request_user_id(mock_request) def test_layer2_get_public_user_directory(self): """Test 2nd defense layer blocks System Users.""" result = folder_paths.get_public_user_directory("__system") assert result is None def test_layer3_add_user(self, user_manager): """Test 3rd defense layer blocks System User creation.""" with pytest.raises(ValueError): user_manager.add_user("__system")
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/app_test/user_manager_system_user_test.py", "license": "GNU General Public License v3.0", "lines": 150, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/folder_paths_test/system_user_test.py
"""Tests for System User Protection in folder_paths.py Tests cover: - get_system_user_directory(): Internal API for custom nodes to access System User directories - get_public_user_directory(): HTTP endpoint access with System User blocking - Backward compatibility: Existing APIs unchanged - Security: Path traversal and injection prevention """ import pytest import os import tempfile from folder_paths import ( get_system_user_directory, get_public_user_directory, get_user_directory, set_user_directory, ) @pytest.fixture(scope="module") def mock_user_directory(): """Create a temporary user directory for testing.""" with tempfile.TemporaryDirectory() as temp_dir: original_dir = get_user_directory() set_user_directory(temp_dir) yield temp_dir set_user_directory(original_dir) class TestGetSystemUserDirectory: """Tests for get_system_user_directory() - internal API for System User directories. Verifies: - Custom nodes can access System User directories via internal API - Input validation prevents path traversal attacks """ def test_default_name(self, mock_user_directory): """Test default 'system' name.""" path = get_system_user_directory() assert path.endswith("__system") assert mock_user_directory in path def test_custom_name(self, mock_user_directory): """Test custom system user name.""" path = get_system_user_directory("cache") assert path.endswith("__cache") assert "__cache" in path def test_name_with_underscore(self, mock_user_directory): """Test name with underscore in middle.""" path = get_system_user_directory("my_cache") assert "__my_cache" in path def test_empty_name_raises(self): """Test empty name raises ValueError.""" with pytest.raises(ValueError, match="cannot be empty"): get_system_user_directory("") def test_none_name_raises(self): """Test None name raises ValueError.""" with pytest.raises(ValueError, match="cannot be empty"): get_system_user_directory(None) def test_name_starting_with_underscore_raises(self): """Test name starting with underscore raises ValueError.""" with pytest.raises(ValueError, match="should not start with underscore"): get_system_user_directory("_system") def test_path_traversal_raises(self): """Test path traversal attempt raises ValueError (security).""" with pytest.raises(ValueError, match="Invalid system user name"): get_system_user_directory("../escape") def test_path_traversal_middle_raises(self): """Test path traversal in middle raises ValueError (security).""" with pytest.raises(ValueError, match="Invalid system user name"): get_system_user_directory("system/../other") def test_special_chars_raise(self): """Test special characters raise ValueError (security).""" with pytest.raises(ValueError, match="Invalid system user name"): get_system_user_directory("system!") def test_returns_absolute_path(self, mock_user_directory): """Test returned path is absolute.""" path = get_system_user_directory("test") assert os.path.isabs(path) class TestGetPublicUserDirectory: """Tests for get_public_user_directory() - HTTP endpoint access with System User blocking. Verifies: - System Users (__ prefix) return None, blocking HTTP access - Public Users get valid paths - New endpoints using this function are automatically protected """ def test_normal_user(self, mock_user_directory): """Test normal user returns valid path.""" path = get_public_user_directory("default") assert path is not None assert "default" in path assert mock_user_directory in path def test_system_user_returns_none(self): """Test System User (__ prefix) returns None - blocks HTTP access.""" assert get_public_user_directory("__system") is None def test_system_user_cache_returns_none(self): """Test System User cache returns None.""" assert get_public_user_directory("__cache") is None def test_empty_user_returns_none(self): """Test empty user returns None.""" assert get_public_user_directory("") is None def test_none_user_returns_none(self): """Test None user returns None.""" assert get_public_user_directory(None) is None def test_header_injection_returns_none(self): """Test header injection attempt returns None (security).""" assert get_public_user_directory("__system\r\nX-Injected: true") is None def test_null_byte_injection_returns_none(self): """Test null byte injection handling (security).""" # Note: startswith check happens before any path operations result = get_public_user_directory("user\x00__system") # This should return a path since it doesn't start with __ # The actual security comes from the path not being __* assert result is not None or result is None # Depends on validation def test_path_traversal_attempt(self, mock_user_directory): """Test path traversal attempt handling.""" # This function doesn't validate paths, only reserved prefix # Path traversal should be handled by the caller path = get_public_user_directory("../../../etc/passwd") # Returns path but doesn't start with __, so not None # Actual path validation happens in user_manager assert path is not None or "__" not in "../../../etc/passwd" def test_returns_absolute_path(self, mock_user_directory): """Test returned path is absolute.""" path = get_public_user_directory("testuser") assert path is not None assert os.path.isabs(path) class TestBackwardCompatibility: """Tests for backward compatibility with existing APIs. Verifies: - get_user_directory() API unchanged - Existing user data remains accessible """ def test_get_user_directory_unchanged(self, mock_user_directory): """Test get_user_directory() still works as before.""" user_dir = get_user_directory() assert user_dir is not None assert os.path.isabs(user_dir) assert user_dir == mock_user_directory def test_existing_user_accessible(self, mock_user_directory): """Test existing users can access their directories.""" path = get_public_user_directory("default") assert path is not None assert "default" in path class TestEdgeCases: """Tests for edge cases in System User detection. Verifies: - Only __ prefix is blocked (not _, not middle __) - Bypass attempts are prevented """ def test_prefix_only(self): """Test prefix-only string is blocked.""" assert get_public_user_directory("__") is None def test_single_underscore_allowed(self): """Test single underscore prefix is allowed (not System User).""" path = get_public_user_directory("_system") assert path is not None assert "_system" in path def test_triple_underscore_blocked(self): """Test triple underscore is blocked (starts with __).""" assert get_public_user_directory("___system") is None def test_underscore_in_middle_allowed(self): """Test underscore in middle is allowed.""" path = get_public_user_directory("my__system") assert path is not None assert "my__system" in path def test_leading_space_allowed(self): """Test leading space + prefix is allowed (doesn't start with __).""" path = get_public_user_directory(" __system") assert path is not None
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/folder_paths_test/system_user_test.py", "license": "GNU General Public License v3.0", "lines": 163, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:tests-unit/prompt_server_test/system_user_endpoint_test.py
"""E2E Tests for System User Protection HTTP Endpoints Tests cover: - HTTP endpoint blocking: System Users cannot access /userdata (GET, POST, DELETE, move) - User creation blocking: System User names cannot be created via POST /users - Backward compatibility: Public Users work as before - Custom node scenario: Internal API works while HTTP is blocked - Structural security: get_public_user_directory() provides automatic protection """ import pytest import os from aiohttp import web from app.user_manager import UserManager from unittest.mock import patch import folder_paths @pytest.fixture def mock_user_directory(tmp_path): """Create a temporary user directory.""" original_dir = folder_paths.get_user_directory() folder_paths.set_user_directory(str(tmp_path)) yield tmp_path folder_paths.set_user_directory(original_dir) @pytest.fixture def user_manager_multi_user(mock_user_directory): """Create UserManager in multi-user mode.""" with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True um = UserManager() # Add test users um.users = {"default": "default", "test_user_123": "Test User"} yield um @pytest.fixture def app_multi_user(user_manager_multi_user): """Create app with multi-user mode enabled.""" app = web.Application() routes = web.RouteTableDef() user_manager_multi_user.add_routes(routes) app.add_routes(routes) return app class TestSystemUserEndpointBlocking: """E2E tests for System User blocking on all HTTP endpoints. Verifies: - GET /userdata blocked for System Users - POST /userdata blocked for System Users - DELETE /userdata blocked for System Users - POST /userdata/.../move/... blocked for System Users """ @pytest.mark.asyncio async def test_userdata_get_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ GET /userdata with System User header should be blocked. """ # Create test directory for System User (simulating internal creation) system_user_dir = mock_user_directory / "__system" system_user_dir.mkdir() (system_user_dir / "secret.txt").write_text("sensitive data") client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True # Attempt to access System User's data via HTTP resp = await client.get( "/userdata?dir=.", headers={"comfy-user": "__system"} ) # Should be blocked (403 Forbidden or similar error) assert resp.status in [400, 403, 500], \ f"System User access should be blocked, got {resp.status}" @pytest.mark.asyncio async def test_userdata_post_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ POST /userdata with System User header should be blocked. """ client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.post( "/userdata/test.txt", headers={"comfy-user": "__system"}, data=b"malicious content" ) assert resp.status in [400, 403, 500], \ f"System User write should be blocked, got {resp.status}" # Verify no file was created assert not (mock_user_directory / "__system" / "test.txt").exists() @pytest.mark.asyncio async def test_userdata_delete_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ DELETE /userdata with System User header should be blocked. """ # Create a file in System User directory system_user_dir = mock_user_directory / "__system" system_user_dir.mkdir() secret_file = system_user_dir / "secret.txt" secret_file.write_text("do not delete") client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.delete( "/userdata/secret.txt", headers={"comfy-user": "__system"} ) assert resp.status in [400, 403, 500], \ f"System User delete should be blocked, got {resp.status}" # Verify file still exists assert secret_file.exists() @pytest.mark.asyncio async def test_v2_userdata_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ GET /v2/userdata with System User header should be blocked. """ client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.get( "/v2/userdata", headers={"comfy-user": "__system"} ) assert resp.status in [400, 403, 500], \ f"System User v2 access should be blocked, got {resp.status}" @pytest.mark.asyncio async def test_move_userdata_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ POST /userdata/{file}/move/{dest} with System User header should be blocked. """ system_user_dir = mock_user_directory / "__system" system_user_dir.mkdir() (system_user_dir / "source.txt").write_text("sensitive data") client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.post( "/userdata/source.txt/move/dest.txt", headers={"comfy-user": "__system"} ) assert resp.status in [400, 403, 500], \ f"System User move should be blocked, got {resp.status}" # Verify source file still exists (move was blocked) assert (system_user_dir / "source.txt").exists() class TestSystemUserCreationBlocking: """E2E tests for blocking System User name creation via POST /users. Verifies: - POST /users returns 400 for System User name (not 500) """ @pytest.mark.asyncio async def test_post_users_blocks_system_user_name( self, aiohttp_client, app_multi_user ): """POST /users with System User name should return 400 Bad Request.""" client = await aiohttp_client(app_multi_user) resp = await client.post( "/users", json={"username": "__system"} ) assert resp.status == 400, \ f"System User creation should return 400, got {resp.status}" @pytest.mark.asyncio async def test_post_users_blocks_system_user_prefix_variations( self, aiohttp_client, app_multi_user ): """POST /users with any System User prefix variation should return 400 Bad Request.""" client = await aiohttp_client(app_multi_user) system_user_names = ["__system", "__cache", "__config", "__anything"] for name in system_user_names: resp = await client.post("/users", json={"username": name}) assert resp.status == 400, \ f"System User name '{name}' should return 400, got {resp.status}" class TestPublicUserStillWorks: """E2E tests for backward compatibility - Public Users should work as before. Verifies: - Public Users can access their data via HTTP - Public Users can create files via HTTP """ @pytest.mark.asyncio async def test_public_user_can_access_userdata( self, aiohttp_client, app_multi_user, mock_user_directory ): """ Public Users should still be able to access their data. """ # Create test directory for Public User user_dir = mock_user_directory / "default" user_dir.mkdir() test_dir = user_dir / "workflows" test_dir.mkdir() (test_dir / "test.json").write_text('{"test": true}') client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.get( "/userdata?dir=workflows", headers={"comfy-user": "default"} ) assert resp.status == 200 data = await resp.json() assert "test.json" in data @pytest.mark.asyncio async def test_public_user_can_create_files( self, aiohttp_client, app_multi_user, mock_user_directory ): """ Public Users should still be able to create files. """ # Create user directory user_dir = mock_user_directory / "default" user_dir.mkdir() client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.post( "/userdata/newfile.txt", headers={"comfy-user": "default"}, data=b"user content" ) assert resp.status == 200 assert (user_dir / "newfile.txt").exists() class TestCustomNodeScenario: """Tests for custom node use case: internal API access vs HTTP blocking. Verifies: - Internal API (get_system_user_directory) works for custom nodes - HTTP endpoint cannot access data created via internal API """ def test_internal_api_can_access_system_user(self, mock_user_directory): """ Internal API (get_system_user_directory) should work for custom nodes. """ # Custom node uses internal API system_path = folder_paths.get_system_user_directory("mynode_config") assert system_path is not None assert "__mynode_config" in system_path # Can create and write to System User directory os.makedirs(system_path, exist_ok=True) config_file = os.path.join(system_path, "settings.json") with open(config_file, "w") as f: f.write('{"api_key": "secret"}') assert os.path.exists(config_file) @pytest.mark.asyncio async def test_http_cannot_access_internal_data( self, aiohttp_client, app_multi_user, mock_user_directory ): """ HTTP endpoint cannot access data created via internal API. """ # Custom node creates data via internal API system_path = folder_paths.get_system_user_directory("mynode_config") os.makedirs(system_path, exist_ok=True) with open(os.path.join(system_path, "secret.json"), "w") as f: f.write('{"api_key": "secret"}') client = await aiohttp_client(app_multi_user) # Attacker tries to access via HTTP with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.get( "/userdata/secret.json", headers={"comfy-user": "__mynode_config"} ) # Should be blocked assert resp.status in [400, 403, 500] class TestStructuralSecurity: """Tests for structural security pattern. Verifies: - get_public_user_directory() automatically blocks System Users - New endpoints using this function are automatically protected """ def test_get_public_user_directory_blocks_system_user(self): """ Any code using get_public_user_directory() is automatically protected. """ # This is the structural security - any new endpoint using this function # will automatically block System Users assert folder_paths.get_public_user_directory("__system") is None assert folder_paths.get_public_user_directory("__cache") is None assert folder_paths.get_public_user_directory("__anything") is None # Public Users work assert folder_paths.get_public_user_directory("default") is not None assert folder_paths.get_public_user_directory("user123") is not None def test_structural_security_pattern(self, mock_user_directory): """ Demonstrate the structural security pattern for new endpoints. Any new endpoint should follow this pattern: 1. Get user from request 2. Use get_public_user_directory() - automatically blocks System Users 3. If None, return error """ def new_endpoint_handler(user_id: str) -> str | None: """Example of how new endpoints should be implemented.""" user_path = folder_paths.get_public_user_directory(user_id) if user_path is None: return None # Blocked return user_path # System Users are automatically blocked assert new_endpoint_handler("__system") is None assert new_endpoint_handler("__secret") is None # Public Users work assert new_endpoint_handler("default") is not None
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/prompt_server_test/system_user_endpoint_test.py", "license": "GNU General Public License v3.0", "lines": 303, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:comfy/taesd/taehv.py
# Tiny AutoEncoder for HunyuanVideo and WanVideo https://github.com/madebyollin/taehv import torch import torch.nn as nn import torch.nn.functional as F from tqdm.auto import tqdm from collections import namedtuple, deque import comfy.ops operations=comfy.ops.disable_weight_init DecoderResult = namedtuple("DecoderResult", ("frame", "memory")) TWorkItem = namedtuple("TWorkItem", ("input_tensor", "block_index")) def conv(n_in, n_out, **kwargs): return operations.Conv2d(n_in, n_out, 3, padding=1, **kwargs) class Clamp(nn.Module): def forward(self, x): return torch.tanh(x / 3) * 3 class MemBlock(nn.Module): def __init__(self, n_in, n_out, act_func): super().__init__() self.conv = nn.Sequential(conv(n_in * 2, n_out), act_func, conv(n_out, n_out), act_func, conv(n_out, n_out)) self.skip = operations.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() self.act = act_func def forward(self, x, past): return self.act(self.conv(torch.cat([x, past], 1)) + self.skip(x)) class TPool(nn.Module): def __init__(self, n_f, stride): super().__init__() self.stride = stride self.conv = operations.Conv2d(n_f*stride,n_f, 1, bias=False) def forward(self, x): _NT, C, H, W = x.shape return self.conv(x.reshape(-1, self.stride * C, H, W)) class TGrow(nn.Module): def __init__(self, n_f, stride): super().__init__() self.stride = stride self.conv = operations.Conv2d(n_f, n_f*stride, 1, bias=False) def forward(self, x): _NT, C, H, W = x.shape x = self.conv(x) return x.reshape(-1, C, H, W) def apply_model_with_memblocks(model, x, parallel, show_progress_bar): B, T, C, H, W = x.shape if parallel: x = x.reshape(B*T, C, H, W) # parallel over input timesteps, iterate over blocks for b in tqdm(model, disable=not show_progress_bar): if isinstance(b, MemBlock): BT, C, H, W = x.shape T = BT // B _x = x.reshape(B, T, C, H, W) mem = F.pad(_x, (0,0,0,0,0,0,1,0), value=0)[:,:T].reshape(x.shape) x = b(x, mem) else: x = b(x) BT, C, H, W = x.shape T = BT // B x = x.view(B, T, C, H, W) else: out = [] work_queue = deque([TWorkItem(xt, 0) for t, xt in enumerate(x.reshape(B, T * C, H, W).chunk(T, dim=1))]) progress_bar = tqdm(range(T), disable=not show_progress_bar) mem = [None] * len(model) while work_queue: xt, i = work_queue.popleft() if i == 0: progress_bar.update(1) if i == len(model): out.append(xt) del xt else: b = model[i] if isinstance(b, MemBlock): if mem[i] is None: xt_new = b(xt, xt * 0) mem[i] = xt.detach().clone() else: xt_new = b(xt, mem[i]) mem[i] = xt.detach().clone() del xt work_queue.appendleft(TWorkItem(xt_new, i+1)) elif isinstance(b, TPool): if mem[i] is None: mem[i] = [] mem[i].append(xt.detach().clone()) if len(mem[i]) == b.stride: B, C, H, W = xt.shape xt = b(torch.cat(mem[i], 1).view(B*b.stride, C, H, W)) mem[i] = [] work_queue.appendleft(TWorkItem(xt, i+1)) elif isinstance(b, TGrow): xt = b(xt) NT, C, H, W = xt.shape for xt_next in reversed(xt.view(B, b.stride*C, H, W).chunk(b.stride, 1)): work_queue.appendleft(TWorkItem(xt_next, i+1)) del xt else: xt = b(xt) work_queue.appendleft(TWorkItem(xt, i+1)) progress_bar.close() x = torch.stack(out, 1) return x class TAEHV(nn.Module): def __init__(self, latent_channels, parallel=False, encoder_time_downscale=(True, True, False), decoder_time_upscale=(False, True, True), decoder_space_upscale=(True, True, True), latent_format=None, show_progress_bar=False): super().__init__() self.image_channels = 3 self.patch_size = 1 self.latent_channels = latent_channels self.parallel = parallel self.latent_format = latent_format self.show_progress_bar = show_progress_bar self.process_in = latent_format().process_in if latent_format is not None else (lambda x: x) self.process_out = latent_format().process_out if latent_format is not None else (lambda x: x) if self.latent_channels in [48, 32]: # Wan 2.2 and HunyuanVideo1.5 self.patch_size = 2 elif self.latent_channels == 128: # LTX2 self.patch_size, self.latent_channels, encoder_time_downscale, decoder_time_upscale = 4, 128, (True, True, True), (True, True, True) if self.latent_channels == 32: # HunyuanVideo1.5 act_func = nn.LeakyReLU(0.2, inplace=True) else: # HunyuanVideo, Wan 2.1 act_func = nn.ReLU(inplace=True) self.encoder = nn.Sequential( conv(self.image_channels*self.patch_size**2, 64), act_func, TPool(64, 2 if encoder_time_downscale[0] else 1), conv(64, 64, stride=2, bias=False), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), TPool(64, 2 if encoder_time_downscale[1] else 1), conv(64, 64, stride=2, bias=False), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), TPool(64, 2 if encoder_time_downscale[2] else 1), conv(64, 64, stride=2, bias=False), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), conv(64, self.latent_channels), ) n_f = [256, 128, 64, 64] self.decoder = nn.Sequential( Clamp(), conv(self.latent_channels, n_f[0]), act_func, MemBlock(n_f[0], n_f[0], act_func), MemBlock(n_f[0], n_f[0], act_func), MemBlock(n_f[0], n_f[0], act_func), nn.Upsample(scale_factor=2 if decoder_space_upscale[0] else 1), TGrow(n_f[0], 2 if decoder_time_upscale[0] else 1), conv(n_f[0], n_f[1], bias=False), MemBlock(n_f[1], n_f[1], act_func), MemBlock(n_f[1], n_f[1], act_func), MemBlock(n_f[1], n_f[1], act_func), nn.Upsample(scale_factor=2 if decoder_space_upscale[1] else 1), TGrow(n_f[1], 2 if decoder_time_upscale[1] else 1), conv(n_f[1], n_f[2], bias=False), MemBlock(n_f[2], n_f[2], act_func), MemBlock(n_f[2], n_f[2], act_func), MemBlock(n_f[2], n_f[2], act_func), nn.Upsample(scale_factor=2 if decoder_space_upscale[2] else 1), TGrow(n_f[2], 2 if decoder_time_upscale[2] else 1), conv(n_f[2], n_f[3], bias=False), act_func, conv(n_f[3], self.image_channels*self.patch_size**2), ) self.t_downscale = 2**sum(t.stride == 2 for t in self.encoder if isinstance(t, TPool)) self.t_upscale = 2**sum(t.stride == 2 for t in self.decoder if isinstance(t, TGrow)) self.frames_to_trim = self.t_upscale - 1 self._show_progress_bar = show_progress_bar @property def show_progress_bar(self): return self._show_progress_bar @show_progress_bar.setter def show_progress_bar(self, value): self._show_progress_bar = value def encode(self, x, **kwargs): x = x.movedim(2, 1) # [B, C, T, H, W] -> [B, T, C, H, W] if self.patch_size > 1: B, T, C, H, W = x.shape x = x.reshape(B * T, C, H, W) x = F.pixel_unshuffle(x, self.patch_size) x = x.reshape(B, T, C * self.patch_size ** 2, H // self.patch_size, W // self.patch_size) if x.shape[1] % self.t_downscale != 0: # pad at end to multiple of t_downscale n_pad = self.t_downscale - x.shape[1] % self.t_downscale padding = x[:, -1:].repeat_interleave(n_pad, dim=1) x = torch.cat([x, padding], 1) x = apply_model_with_memblocks(self.encoder, x, self.parallel, self.show_progress_bar).movedim(2, 1) return self.process_out(x) def decode(self, x, **kwargs): x = x.unsqueeze(0) if x.ndim == 4 else x # [T, C, H, W] -> [1, T, C, H, W] x = x.movedim(1, 2) if x.shape[1] != self.latent_channels else x # [B, T, C, H, W] or [B, C, T, H, W] x = self.process_in(x).movedim(2, 1) # [B, C, T, H, W] -> [B, T, C, H, W] x = apply_model_with_memblocks(self.decoder, x, self.parallel, self.show_progress_bar) if self.patch_size > 1: x = F.pixel_shuffle(x, self.patch_size) return x[:, self.frames_to_trim:].movedim(2, 1)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/taesd/taehv.py", "license": "GNU General Public License v3.0", "lines": 168, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_dataset.py
import logging import os import json import numpy as np import torch from PIL import Image from typing_extensions import override import folder_paths import node_helpers from comfy_api.latest import ComfyExtension, io def load_and_process_images(image_files, input_dir): """Utility function to load and process a list of images. Args: image_files: List of image filenames input_dir: Base directory containing the images resize_method: How to handle images of different sizes ("None", "Stretch", "Crop", "Pad") Returns: torch.Tensor: Batch of processed images """ if not image_files: raise ValueError("No valid images found in input") output_images = [] for file in image_files: image_path = os.path.join(input_dir, file) img = node_helpers.pillow(Image.open, image_path) if img.mode == "I": img = img.point(lambda i: i * (1 / 255)) img = img.convert("RGB") img_array = np.array(img).astype(np.float32) / 255.0 img_tensor = torch.from_numpy(img_array)[None,] output_images.append(img_tensor) return output_images class LoadImageDataSetFromFolderNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="LoadImageDataSetFromFolder", display_name="Load Image Dataset from Folder", category="dataset", is_experimental=True, inputs=[ io.Combo.Input( "folder", options=folder_paths.get_input_subfolders(), tooltip="The folder to load images from.", ) ], outputs=[ io.Image.Output( display_name="images", is_output_list=True, tooltip="List of loaded images", ) ], ) @classmethod def execute(cls, folder): sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] image_files = [ f for f in os.listdir(sub_input_dir) if any(f.lower().endswith(ext) for ext in valid_extensions) ] output_tensor = load_and_process_images(image_files, sub_input_dir) return io.NodeOutput(output_tensor) class LoadImageTextDataSetFromFolderNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="LoadImageTextDataSetFromFolder", display_name="Load Image and Text Dataset from Folder", category="dataset", is_experimental=True, inputs=[ io.Combo.Input( "folder", options=folder_paths.get_input_subfolders(), tooltip="The folder to load images from.", ) ], outputs=[ io.Image.Output( display_name="images", is_output_list=True, tooltip="List of loaded images", ), io.String.Output( display_name="texts", is_output_list=True, tooltip="List of text captions", ), ], ) @classmethod def execute(cls, folder): logging.info(f"Loading images from folder: {folder}") sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] image_files = [] for item in os.listdir(sub_input_dir): path = os.path.join(sub_input_dir, item) if any(item.lower().endswith(ext) for ext in valid_extensions): image_files.append(path) elif os.path.isdir(path): # Support kohya-ss/sd-scripts folder structure repeat = 1 if item.split("_")[0].isdigit(): repeat = int(item.split("_")[0]) image_files.extend( [ os.path.join(path, f) for f in os.listdir(path) if any(f.lower().endswith(ext) for ext in valid_extensions) ] * repeat ) caption_file_path = [ f.replace(os.path.splitext(f)[1], ".txt") for f in image_files ] captions = [] for caption_file in caption_file_path: caption_path = os.path.join(sub_input_dir, caption_file) if os.path.exists(caption_path): with open(caption_path, "r", encoding="utf-8") as f: caption = f.read().strip() captions.append(caption) else: captions.append("") output_tensor = load_and_process_images(image_files, sub_input_dir) logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") return io.NodeOutput(output_tensor, captions) def save_images_to_folder(image_list, output_dir, prefix="image"): """Utility function to save a list of image tensors to disk. Args: image_list: List of image tensors (each [1, H, W, C] or [H, W, C] or [C, H, W]) output_dir: Directory to save images to prefix: Filename prefix Returns: List of saved filenames """ os.makedirs(output_dir, exist_ok=True) saved_files = [] for idx, img_tensor in enumerate(image_list): # Handle different tensor shapes if isinstance(img_tensor, torch.Tensor): # Remove batch dimension if present [1, H, W, C] -> [H, W, C] if img_tensor.dim() == 4 and img_tensor.shape[0] == 1: img_tensor = img_tensor.squeeze(0) # If tensor is [C, H, W], permute to [H, W, C] if img_tensor.dim() == 3 and img_tensor.shape[0] in [1, 3, 4]: if ( img_tensor.shape[0] <= 4 and img_tensor.shape[1] > 4 and img_tensor.shape[2] > 4 ): img_tensor = img_tensor.permute(1, 2, 0) # Convert to numpy and scale to 0-255 img_array = img_tensor.cpu().numpy() img_array = np.clip(img_array * 255.0, 0, 255).astype(np.uint8) # Convert to PIL Image img = Image.fromarray(img_array) else: raise ValueError(f"Expected torch.Tensor, got {type(img_tensor)}") # Save image filename = f"{prefix}_{idx:05d}.png" filepath = os.path.join(output_dir, filename) img.save(filepath) saved_files.append(filename) return saved_files class SaveImageDataSetToFolderNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="SaveImageDataSetToFolder", display_name="Save Image Dataset to Folder", category="dataset", is_experimental=True, is_output_node=True, is_input_list=True, # Receive images as list inputs=[ io.Image.Input("images", tooltip="List of images to save."), io.String.Input( "folder_name", default="dataset", tooltip="Name of the folder to save images to (inside output directory).", ), io.String.Input( "filename_prefix", default="image", tooltip="Prefix for saved image filenames.", advanced=True, ), ], outputs=[], ) @classmethod def execute(cls, images, folder_name, filename_prefix): # Extract scalar values folder_name = folder_name[0] filename_prefix = filename_prefix[0] output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) saved_files = save_images_to_folder(images, output_dir, filename_prefix) logging.info(f"Saved {len(saved_files)} images to {output_dir}.") return io.NodeOutput() class SaveImageTextDataSetToFolderNode(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="SaveImageTextDataSetToFolder", display_name="Save Image and Text Dataset to Folder", category="dataset", is_experimental=True, is_output_node=True, is_input_list=True, # Receive both images and texts as lists inputs=[ io.Image.Input("images", tooltip="List of images to save."), io.String.Input("texts", tooltip="List of text captions to save."), io.String.Input( "folder_name", default="dataset", tooltip="Name of the folder to save images to (inside output directory).", ), io.String.Input( "filename_prefix", default="image", tooltip="Prefix for saved image filenames.", advanced=True, ), ], outputs=[], ) @classmethod def execute(cls, images, texts, folder_name, filename_prefix): # Extract scalar values folder_name = folder_name[0] filename_prefix = filename_prefix[0] output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) saved_files = save_images_to_folder(images, output_dir, filename_prefix) # Save captions for idx, (filename, caption) in enumerate(zip(saved_files, texts)): caption_filename = filename.replace(".png", ".txt") caption_path = os.path.join(output_dir, caption_filename) with open(caption_path, "w", encoding="utf-8") as f: f.write(caption) logging.info(f"Saved {len(saved_files)} images and captions to {output_dir}.") return io.NodeOutput() # ========== Helper Functions for Transform Nodes ========== def tensor_to_pil(img_tensor): """Convert tensor to PIL Image.""" if img_tensor.dim() == 4 and img_tensor.shape[0] == 1: img_tensor = img_tensor.squeeze(0) img_array = (img_tensor.cpu().numpy() * 255).clip(0, 255).astype(np.uint8) return Image.fromarray(img_array) def pil_to_tensor(img): """Convert PIL Image to tensor.""" img_array = np.array(img).astype(np.float32) / 255.0 return torch.from_numpy(img_array)[None,] # ========== Base Classes for Transform Nodes ========== class ImageProcessingNode(io.ComfyNode): """Base class for image processing nodes that operate on images. Child classes should set: node_id: Unique node identifier (required) display_name: Display name (optional, defaults to node_id) description: Node description (optional) extra_inputs: List of additional io.Input objects beyond "images" (optional) is_group_process: None (auto-detect), True (group), or False (individual) (optional) is_output_list: True (list output) or False (single output) (optional, default True) Child classes must implement ONE of: _process(cls, image, **kwargs) -> tensor (for single-item processing) _group_process(cls, images, **kwargs) -> list[tensor] (for group processing) """ node_id = None display_name = None description = None extra_inputs = [] is_group_process = None # None = auto-detect, True/False = explicit is_output_list = None # None = auto-detect based on processing mode @classmethod def _detect_processing_mode(cls): """Detect whether this node uses group or individual processing. Returns: bool: True if group processing, False if individual processing """ # Explicit setting takes precedence if cls.is_group_process is not None: return cls.is_group_process # Check which method is overridden by looking at the defining class in MRO base_class = ImageProcessingNode # Find which class in MRO defines _process process_definer = None for klass in cls.__mro__: if "_process" in klass.__dict__: process_definer = klass break # Find which class in MRO defines _group_process group_definer = None for klass in cls.__mro__: if "_group_process" in klass.__dict__: group_definer = klass break # Check what was overridden (not defined in base class) has_process = process_definer is not None and process_definer is not base_class has_group = group_definer is not None and group_definer is not base_class if has_process and has_group: raise ValueError( f"{cls.__name__}: Cannot override both _process and _group_process. " "Override only one, or set is_group_process explicitly." ) if not has_process and not has_group: raise ValueError( f"{cls.__name__}: Must override either _process or _group_process" ) return has_group @classmethod def define_schema(cls): if cls.node_id is None: raise NotImplementedError(f"{cls.__name__} must set node_id class variable") is_group = cls._detect_processing_mode() # Auto-detect is_output_list if not explicitly set # Single processing: False (backend collects results into list) # Group processing: True by default (can be False for single-output nodes) output_is_list = ( cls.is_output_list if cls.is_output_list is not None else is_group ) inputs = [ io.Image.Input( "images", tooltip=( "List of images to process." if is_group else "Image to process." ), ) ] inputs.extend(cls.extra_inputs) return io.Schema( node_id=cls.node_id, display_name=cls.display_name or cls.node_id, category="dataset/image", is_experimental=True, is_input_list=is_group, # True for group, False for individual inputs=inputs, outputs=[ io.Image.Output( display_name="images", is_output_list=output_is_list, tooltip="Processed images", ) ], ) @classmethod def execute(cls, images, **kwargs): """Execute the node. Routes to _process or _group_process based on mode.""" is_group = cls._detect_processing_mode() # Extract scalar values from lists for parameters params = {} for k, v in kwargs.items(): if isinstance(v, list) and len(v) == 1: params[k] = v[0] else: params[k] = v if is_group: # Group processing: images is list, call _group_process result = cls._group_process(images, **params) else: # Individual processing: images is single item, call _process result = cls._process(images, **params) return io.NodeOutput(result) @classmethod def _process(cls, image, **kwargs): """Override this method for single-item processing. Args: image: tensor - Single image tensor **kwargs: Additional parameters (already extracted from lists) Returns: tensor - Processed image """ raise NotImplementedError(f"{cls.__name__} must implement _process method") @classmethod def _group_process(cls, images, **kwargs): """Override this method for group processing. Args: images: list[tensor] - List of image tensors **kwargs: Additional parameters (already extracted from lists) Returns: list[tensor] - Processed images """ raise NotImplementedError( f"{cls.__name__} must implement _group_process method" ) class TextProcessingNode(io.ComfyNode): """Base class for text processing nodes that operate on texts. Child classes should set: node_id: Unique node identifier (required) display_name: Display name (optional, defaults to node_id) description: Node description (optional) extra_inputs: List of additional io.Input objects beyond "texts" (optional) is_group_process: None (auto-detect), True (group), or False (individual) (optional) is_output_list: True (list output) or False (single output) (optional, default True) Child classes must implement ONE of: _process(cls, text, **kwargs) -> str (for single-item processing) _group_process(cls, texts, **kwargs) -> list[str] (for group processing) """ node_id = None display_name = None description = None extra_inputs = [] is_group_process = None # None = auto-detect, True/False = explicit is_output_list = None # None = auto-detect based on processing mode @classmethod def _detect_processing_mode(cls): """Detect whether this node uses group or individual processing. Returns: bool: True if group processing, False if individual processing """ # Explicit setting takes precedence if cls.is_group_process is not None: return cls.is_group_process # Check which method is overridden by looking at the defining class in MRO base_class = TextProcessingNode # Find which class in MRO defines _process process_definer = None for klass in cls.__mro__: if "_process" in klass.__dict__: process_definer = klass break # Find which class in MRO defines _group_process group_definer = None for klass in cls.__mro__: if "_group_process" in klass.__dict__: group_definer = klass break # Check what was overridden (not defined in base class) has_process = process_definer is not None and process_definer is not base_class has_group = group_definer is not None and group_definer is not base_class if has_process and has_group: raise ValueError( f"{cls.__name__}: Cannot override both _process and _group_process. " "Override only one, or set is_group_process explicitly." ) if not has_process and not has_group: raise ValueError( f"{cls.__name__}: Must override either _process or _group_process" ) return has_group @classmethod def define_schema(cls): if cls.node_id is None: raise NotImplementedError(f"{cls.__name__} must set node_id class variable") is_group = cls._detect_processing_mode() inputs = [ io.String.Input( "texts", tooltip="List of texts to process." if is_group else "Text to process.", ) ] inputs.extend(cls.extra_inputs) return io.Schema( node_id=cls.node_id, display_name=cls.display_name or cls.node_id, category="dataset/text", is_experimental=True, is_input_list=is_group, # True for group, False for individual inputs=inputs, outputs=[ io.String.Output( display_name="texts", is_output_list=cls.is_output_list, tooltip="Processed texts", ) ], ) @classmethod def execute(cls, texts, **kwargs): """Execute the node. Routes to _process or _group_process based on mode.""" is_group = cls._detect_processing_mode() # Extract scalar values from lists for parameters params = {} for k, v in kwargs.items(): if isinstance(v, list) and len(v) == 1: params[k] = v[0] else: params[k] = v if is_group: # Group processing: texts is list, call _group_process result = cls._group_process(texts, **params) else: # Individual processing: texts is single item, call _process result = cls._process(texts, **params) # Wrap result based on is_output_list if cls.is_output_list: # Result should already be a list (or will be for individual) return io.NodeOutput(result if is_group else [result]) else: # Single output - wrap in list for NodeOutput return io.NodeOutput([result]) @classmethod def _process(cls, text, **kwargs): """Override this method for single-item processing. Args: text: str - Single text string **kwargs: Additional parameters (already extracted from lists) Returns: str - Processed text """ raise NotImplementedError(f"{cls.__name__} must implement _process method") @classmethod def _group_process(cls, texts, **kwargs): """Override this method for group processing. Args: texts: list[str] - List of text strings **kwargs: Additional parameters (already extracted from lists) Returns: list[str] - Processed texts """ raise NotImplementedError( f"{cls.__name__} must implement _group_process method" ) # ========== Image Transform Nodes ========== class ResizeImagesByShorterEdgeNode(ImageProcessingNode): node_id = "ResizeImagesByShorterEdge" display_name = "Resize Images by Shorter Edge" description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio." extra_inputs = [ io.Int.Input( "shorter_edge", default=512, min=1, max=8192, tooltip="Target length for the shorter edge.", ), ] @classmethod def _process(cls, image, shorter_edge): img = tensor_to_pil(image) w, h = img.size if w < h: new_w = shorter_edge new_h = int(h * (shorter_edge / w)) else: new_h = shorter_edge new_w = int(w * (shorter_edge / h)) img = img.resize((new_w, new_h), Image.Resampling.LANCZOS) return pil_to_tensor(img) class ResizeImagesByLongerEdgeNode(ImageProcessingNode): node_id = "ResizeImagesByLongerEdge" display_name = "Resize Images by Longer Edge" description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio." extra_inputs = [ io.Int.Input( "longer_edge", default=1024, min=1, max=8192, tooltip="Target length for the longer edge.", ), ] @classmethod def _process(cls, image, longer_edge): resized_images = [] for image_i in image: img = tensor_to_pil(image_i) w, h = img.size if w > h: new_w = longer_edge new_h = int(h * (longer_edge / w)) else: new_h = longer_edge new_w = int(w * (longer_edge / h)) img = img.resize((new_w, new_h), Image.Resampling.LANCZOS) resized_images.append(pil_to_tensor(img)) return torch.cat(resized_images, dim=0) class CenterCropImagesNode(ImageProcessingNode): node_id = "CenterCropImages" display_name = "Center Crop Images" description = "Center crop all images to the specified dimensions." extra_inputs = [ io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), ] @classmethod def _process(cls, image, width, height): img = tensor_to_pil(image) left = max(0, (img.width - width) // 2) top = max(0, (img.height - height) // 2) right = min(img.width, left + width) bottom = min(img.height, top + height) img = img.crop((left, top, right, bottom)) return pil_to_tensor(img) class RandomCropImagesNode(ImageProcessingNode): node_id = "RandomCropImages" display_name = "Random Crop Images" description = ( "Randomly crop all images to the specified dimensions (for data augmentation)." ) extra_inputs = [ io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), io.Int.Input( "seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="Random seed." ), ] @classmethod def _process(cls, image, width, height, seed): np.random.seed(seed % (2**32 - 1)) img = tensor_to_pil(image) max_left = max(0, img.width - width) max_top = max(0, img.height - height) left = np.random.randint(0, max_left + 1) if max_left > 0 else 0 top = np.random.randint(0, max_top + 1) if max_top > 0 else 0 right = min(img.width, left + width) bottom = min(img.height, top + height) img = img.crop((left, top, right, bottom)) return pil_to_tensor(img) class NormalizeImagesNode(ImageProcessingNode): node_id = "NormalizeImages" display_name = "Normalize Images" description = "Normalize images using mean and standard deviation." extra_inputs = [ io.Float.Input( "mean", default=0.5, min=0.0, max=1.0, tooltip="Mean value for normalization.", advanced=True, ), io.Float.Input( "std", default=0.5, min=0.001, max=1.0, tooltip="Standard deviation for normalization.", advanced=True, ), ] @classmethod def _process(cls, image, mean, std): return (image - mean) / std class AdjustBrightnessNode(ImageProcessingNode): node_id = "AdjustBrightness" display_name = "Adjust Brightness" description = "Adjust brightness of all images." extra_inputs = [ io.Float.Input( "factor", default=1.0, min=0.0, max=2.0, tooltip="Brightness factor. 1.0 = no change, <1.0 = darker, >1.0 = brighter.", ), ] @classmethod def _process(cls, image, factor): return (image * factor).clamp(0.0, 1.0) class AdjustContrastNode(ImageProcessingNode): node_id = "AdjustContrast" display_name = "Adjust Contrast" description = "Adjust contrast of all images." extra_inputs = [ io.Float.Input( "factor", default=1.0, min=0.0, max=2.0, tooltip="Contrast factor. 1.0 = no change, <1.0 = less contrast, >1.0 = more contrast.", ), ] @classmethod def _process(cls, image, factor): return ((image - 0.5) * factor + 0.5).clamp(0.0, 1.0) class ShuffleDatasetNode(ImageProcessingNode): node_id = "ShuffleDataset" display_name = "Shuffle Image Dataset" description = "Randomly shuffle the order of images in the dataset." is_group_process = True # Requires full list to shuffle extra_inputs = [ io.Int.Input( "seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="Random seed." ), ] @classmethod def _group_process(cls, images, seed): np.random.seed(seed % (2**32 - 1)) indices = np.random.permutation(len(images)) return [images[i] for i in indices] class ShuffleImageTextDatasetNode(io.ComfyNode): """Special node that shuffles both images and texts together.""" @classmethod def define_schema(cls): return io.Schema( node_id="ShuffleImageTextDataset", display_name="Shuffle Image-Text Dataset", category="dataset/image", is_experimental=True, is_input_list=True, inputs=[ io.Image.Input("images", tooltip="List of images to shuffle."), io.String.Input("texts", tooltip="List of texts to shuffle."), io.Int.Input( "seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="Random seed.", ), ], outputs=[ io.Image.Output( display_name="images", is_output_list=True, tooltip="Shuffled images", ), io.String.Output( display_name="texts", is_output_list=True, tooltip="Shuffled texts" ), ], ) @classmethod def execute(cls, images, texts, seed): seed = seed[0] # Extract scalar np.random.seed(seed % (2**32 - 1)) indices = np.random.permutation(len(images)) shuffled_images = [images[i] for i in indices] shuffled_texts = [texts[i] for i in indices] return io.NodeOutput(shuffled_images, shuffled_texts) # ========== Text Transform Nodes ========== class TextToLowercaseNode(TextProcessingNode): node_id = "TextToLowercase" display_name = "Text to Lowercase" description = "Convert all texts to lowercase." @classmethod def _process(cls, text): return text.lower() class TextToUppercaseNode(TextProcessingNode): node_id = "TextToUppercase" display_name = "Text to Uppercase" description = "Convert all texts to uppercase." @classmethod def _process(cls, text): return text.upper() class TruncateTextNode(TextProcessingNode): node_id = "TruncateText" display_name = "Truncate Text" description = "Truncate all texts to a maximum length." extra_inputs = [ io.Int.Input( "max_length", default=77, min=1, max=10000, tooltip="Maximum text length." ), ] @classmethod def _process(cls, text, max_length): return text[:max_length] class AddTextPrefixNode(TextProcessingNode): node_id = "AddTextPrefix" display_name = "Add Text Prefix" description = "Add a prefix to all texts." extra_inputs = [ io.String.Input("prefix", default="", tooltip="Prefix to add."), ] @classmethod def _process(cls, text, prefix): return prefix + text class AddTextSuffixNode(TextProcessingNode): node_id = "AddTextSuffix" display_name = "Add Text Suffix" description = "Add a suffix to all texts." extra_inputs = [ io.String.Input("suffix", default="", tooltip="Suffix to add."), ] @classmethod def _process(cls, text, suffix): return text + suffix class ReplaceTextNode(TextProcessingNode): node_id = "ReplaceText" display_name = "Replace Text" description = "Replace text in all texts." extra_inputs = [ io.String.Input("find", default="", tooltip="Text to find."), io.String.Input("replace", default="", tooltip="Text to replace with."), ] @classmethod def _process(cls, text, find, replace): return text.replace(find, replace) class StripWhitespaceNode(TextProcessingNode): node_id = "StripWhitespace" display_name = "Strip Whitespace" description = "Strip leading and trailing whitespace from all texts." @classmethod def _process(cls, text): return text.strip() # ========== Group Processing Example Nodes ========== class ImageDeduplicationNode(ImageProcessingNode): """Remove duplicate or very similar images from the dataset using perceptual hashing.""" node_id = "ImageDeduplication" display_name = "Image Deduplication" description = "Remove duplicate or very similar images from the dataset." is_group_process = True # Requires full list to compare images extra_inputs = [ io.Float.Input( "similarity_threshold", default=0.95, min=0.0, max=1.0, tooltip="Similarity threshold (0-1). Higher means more similar. Images above this threshold are considered duplicates.", advanced=True, ), ] @classmethod def _group_process(cls, images, similarity_threshold): """Remove duplicate images using perceptual hashing.""" if len(images) == 0: return [] # Compute simple perceptual hash for each image def compute_hash(img_tensor): """Compute a simple perceptual hash by resizing to 8x8 and comparing to average.""" img = tensor_to_pil(img_tensor) # Resize to 8x8 img_small = img.resize((8, 8), Image.Resampling.LANCZOS).convert("L") # Get pixels pixels = list(img_small.getdata()) # Compute average avg = sum(pixels) / len(pixels) # Create hash (1 if above average, 0 otherwise) hash_bits = "".join("1" if p > avg else "0" for p in pixels) return hash_bits def hamming_distance(hash1, hash2): """Compute Hamming distance between two hash strings.""" return sum(c1 != c2 for c1, c2 in zip(hash1, hash2)) # Compute hashes for all images hashes = [compute_hash(img) for img in images] # Find duplicates keep_indices = [] for i in range(len(images)): is_duplicate = False for j in keep_indices: # Compare hashes distance = hamming_distance(hashes[i], hashes[j]) similarity = 1.0 - (distance / 64.0) # 64 bits total if similarity >= similarity_threshold: is_duplicate = True logging.info( f"Image {i} is similar to image {j} (similarity: {similarity:.3f}), skipping" ) break if not is_duplicate: keep_indices.append(i) # Return only unique images unique_images = [images[i] for i in keep_indices] logging.info( f"Deduplication: kept {len(unique_images)} out of {len(images)} images" ) return unique_images class ImageGridNode(ImageProcessingNode): """Combine multiple images into a single grid/collage.""" node_id = "ImageGrid" display_name = "Image Grid" description = "Arrange multiple images into a grid layout." is_group_process = True # Requires full list to create grid is_output_list = False # Outputs single grid image extra_inputs = [ io.Int.Input( "columns", default=4, min=1, max=20, tooltip="Number of columns in the grid.", ), io.Int.Input( "cell_width", default=256, min=32, max=2048, tooltip="Width of each cell in the grid.", advanced=True, ), io.Int.Input( "cell_height", default=256, min=32, max=2048, tooltip="Height of each cell in the grid.", advanced=True, ), io.Int.Input( "padding", default=4, min=0, max=50, tooltip="Padding between images.", advanced=True ), ] @classmethod def _group_process(cls, images, columns, cell_width, cell_height, padding): """Arrange images into a grid.""" if len(images) == 0: raise ValueError("Cannot create grid from empty image list") # Calculate grid dimensions num_images = len(images) rows = (num_images + columns - 1) // columns # Ceiling division # Calculate total grid size grid_width = columns * cell_width + (columns - 1) * padding grid_height = rows * cell_height + (rows - 1) * padding # Create blank grid grid = Image.new("RGB", (grid_width, grid_height), (0, 0, 0)) # Place images for idx, img_tensor in enumerate(images): row = idx // columns col = idx % columns # Convert to PIL and resize to cell size img = tensor_to_pil(img_tensor) img = img.resize((cell_width, cell_height), Image.Resampling.LANCZOS) # Calculate position x = col * (cell_width + padding) y = row * (cell_height + padding) # Paste into grid grid.paste(img, (x, y)) logging.info( f"Created {columns}x{rows} grid with {num_images} images ({grid_width}x{grid_height})" ) return pil_to_tensor(grid) class MergeImageListsNode(ImageProcessingNode): """Merge multiple image lists into a single list.""" node_id = "MergeImageLists" display_name = "Merge Image Lists" description = "Concatenate multiple image lists into one." is_group_process = True # Receives images as list @classmethod def _group_process(cls, images): """Simply return the images list (already merged by input handling).""" # When multiple list inputs are connected, they're concatenated # For now, this is a simple pass-through logging.info(f"Merged image list contains {len(images)} images") return images class MergeTextListsNode(TextProcessingNode): """Merge multiple text lists into a single list.""" node_id = "MergeTextLists" display_name = "Merge Text Lists" description = "Concatenate multiple text lists into one." is_group_process = True # Receives texts as list @classmethod def _group_process(cls, texts): """Simply return the texts list (already merged by input handling).""" # When multiple list inputs are connected, they're concatenated # For now, this is a simple pass-through logging.info(f"Merged text list contains {len(texts)} texts") return texts # ========== Training Dataset Nodes ========== class ResolutionBucket(io.ComfyNode): """Bucket latents and conditions by resolution for efficient batch training.""" @classmethod def define_schema(cls): return io.Schema( node_id="ResolutionBucket", display_name="Resolution Bucket", category="dataset", is_experimental=True, is_input_list=True, inputs=[ io.Latent.Input( "latents", tooltip="List of latent dicts to bucket by resolution.", ), io.Conditioning.Input( "conditioning", tooltip="List of conditioning lists (must match latents length).", ), ], outputs=[ io.Latent.Output( display_name="latents", is_output_list=True, tooltip="List of batched latent dicts, one per resolution bucket.", ), io.Conditioning.Output( display_name="conditioning", is_output_list=True, tooltip="List of condition lists, one per resolution bucket.", ), ], ) @classmethod def execute(cls, latents, conditioning): # latents: list[{"samples": tensor}] where tensor is (B, C, H, W), typically B=1 # conditioning: list[list[cond]] # Validate lengths match if len(latents) != len(conditioning): raise ValueError( f"Number of latents ({len(latents)}) does not match number of conditions ({len(conditioning)})." ) # Flatten latents and conditions to individual samples flat_latents = [] # list of (C, H, W) tensors flat_conditions = [] # list of condition lists for latent_dict, cond in zip(latents, conditioning): samples = latent_dict["samples"] # (B, C, H, W) batch_size = samples.shape[0] # cond is a list of conditions with length == batch_size for i in range(batch_size): flat_latents.append(samples[i]) # (C, H, W) flat_conditions.append(cond[i]) # single condition # Group by resolution (H, W) buckets = {} # (H, W) -> {"latents": list, "conditions": list} for latent, cond in zip(flat_latents, flat_conditions): # latent shape is (..., H, W) (B, C, H, W) or (B, T, C, H ,W) h, w = latent.shape[-2], latent.shape[-1] key = (h, w) if key not in buckets: buckets[key] = {"latents": [], "conditions": []} buckets[key]["latents"].append(latent) buckets[key]["conditions"].append(cond) # Convert buckets to output format output_latents = [] # list[{"samples": tensor}] where tensor is (Bi, ..., H, W) output_conditions = [] # list[list[cond]] where each inner list has Bi conditions for (h, w), bucket_data in buckets.items(): # Stack latents into batch: list of (..., H, W) -> (Bi, ..., H, W) stacked_latents = torch.stack(bucket_data["latents"], dim=0) output_latents.append({"samples": stacked_latents}) # Conditions stay as list of condition lists output_conditions.append(bucket_data["conditions"]) logging.info( f"Resolution bucket ({h}x{w}): {len(bucket_data['latents'])} samples" ) logging.info(f"Created {len(buckets)} resolution buckets from {len(flat_latents)} samples") return io.NodeOutput(output_latents, output_conditions) class MakeTrainingDataset(io.ComfyNode): """Encode images with VAE and texts with CLIP to create a training dataset.""" @classmethod def define_schema(cls): return io.Schema( node_id="MakeTrainingDataset", search_aliases=["encode dataset"], display_name="Make Training Dataset", category="dataset", is_experimental=True, is_input_list=True, # images and texts as lists inputs=[ io.Image.Input("images", tooltip="List of images to encode."), io.Vae.Input( "vae", tooltip="VAE model for encoding images to latents." ), io.Clip.Input( "clip", tooltip="CLIP model for encoding text to conditioning." ), io.String.Input( "texts", optional=True, tooltip="List of text captions. Can be length n (matching images), 1 (repeated for all), or omitted (uses empty string).", ), ], outputs=[ io.Latent.Output( display_name="latents", is_output_list=True, tooltip="List of latent dicts", ), io.Conditioning.Output( display_name="conditioning", is_output_list=True, tooltip="List of conditioning lists", ), ], ) @classmethod def execute(cls, images, vae, clip, texts=None): # Extract scalars (vae and clip are single values wrapped in lists) vae = vae[0] clip = clip[0] # Handle text list num_images = len(images) if texts is None or len(texts) == 0: # Treat as [""] for unconditional training texts = [""] if len(texts) == 1 and num_images > 1: # Repeat single text for all images texts = texts * num_images elif len(texts) != num_images: raise ValueError( f"Number of texts ({len(texts)}) does not match number of images ({num_images}). " f"Text list should have length {num_images}, 1, or 0." ) # Encode images with VAE logging.info(f"Encoding {num_images} images with VAE...") latents_list = [] # list[{"samples": tensor}] for img_tensor in images: # img_tensor is [1, H, W, 3] latent_tensor = vae.encode(img_tensor[:, :, :, :3]) latents_list.append({"samples": latent_tensor}) # Encode texts with CLIP logging.info(f"Encoding {len(texts)} texts with CLIP...") conditioning_list = [] # list[list[cond]] for text in texts: if text == "": cond = clip.encode_from_tokens_scheduled(clip.tokenize("")) else: tokens = clip.tokenize(text) cond = clip.encode_from_tokens_scheduled(tokens) conditioning_list.append(cond) logging.info( f"Created dataset with {len(latents_list)} latents and {len(conditioning_list)} conditioning." ) return io.NodeOutput(latents_list, conditioning_list) class SaveTrainingDataset(io.ComfyNode): """Save encoded training dataset (latents + conditioning) to disk.""" @classmethod def define_schema(cls): return io.Schema( node_id="SaveTrainingDataset", search_aliases=["export training data"], display_name="Save Training Dataset", category="dataset", is_experimental=True, is_output_node=True, is_input_list=True, # Receive lists inputs=[ io.Latent.Input( "latents", tooltip="List of latent dicts from MakeTrainingDataset.", ), io.Conditioning.Input( "conditioning", tooltip="List of conditioning lists from MakeTrainingDataset.", ), io.String.Input( "folder_name", default="training_dataset", tooltip="Name of folder to save dataset (inside output directory).", ), io.Int.Input( "shard_size", default=1000, min=1, max=100000, tooltip="Number of samples per shard file.", advanced=True, ), ], outputs=[], ) @classmethod def execute(cls, latents, conditioning, folder_name, shard_size): # Extract scalars folder_name = folder_name[0] shard_size = shard_size[0] # latents: list[{"samples": tensor}] # conditioning: list[list[cond]] # Validate lengths match if len(latents) != len(conditioning): raise ValueError( f"Number of latents ({len(latents)}) does not match number of conditions ({len(conditioning)}). " f"Something went wrong in dataset preparation." ) # Create output directory output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) os.makedirs(output_dir, exist_ok=True) # Prepare data pairs num_samples = len(latents) num_shards = (num_samples + shard_size - 1) // shard_size # Ceiling division logging.info( f"Saving {num_samples} samples to {num_shards} shards in {output_dir}..." ) # Save data in shards for shard_idx in range(num_shards): start_idx = shard_idx * shard_size end_idx = min(start_idx + shard_size, num_samples) # Get shard data (list of latent dicts and conditioning lists) shard_data = { "latents": latents[start_idx:end_idx], "conditioning": conditioning[start_idx:end_idx], } # Save shard shard_filename = f"shard_{shard_idx:04d}.pkl" shard_path = os.path.join(output_dir, shard_filename) with open(shard_path, "wb") as f: torch.save(shard_data, f) logging.info( f"Saved shard {shard_idx + 1}/{num_shards}: {shard_filename} ({end_idx - start_idx} samples)" ) # Save metadata metadata = { "num_samples": num_samples, "num_shards": num_shards, "shard_size": shard_size, } metadata_path = os.path.join(output_dir, "metadata.json") with open(metadata_path, "w") as f: json.dump(metadata, f, indent=2) logging.info(f"Successfully saved {num_samples} samples to {output_dir}.") return io.NodeOutput() class LoadTrainingDataset(io.ComfyNode): """Load encoded training dataset from disk.""" @classmethod def define_schema(cls): return io.Schema( node_id="LoadTrainingDataset", search_aliases=["import dataset", "training data"], display_name="Load Training Dataset", category="dataset", is_experimental=True, inputs=[ io.String.Input( "folder_name", default="training_dataset", tooltip="Name of folder containing the saved dataset (inside output directory).", ), ], outputs=[ io.Latent.Output( display_name="latents", is_output_list=True, tooltip="List of latent dicts", ), io.Conditioning.Output( display_name="conditioning", is_output_list=True, tooltip="List of conditioning lists", ), ], ) @classmethod def execute(cls, folder_name): # Get dataset directory dataset_dir = os.path.join(folder_paths.get_output_directory(), folder_name) if not os.path.exists(dataset_dir): raise ValueError(f"Dataset directory not found: {dataset_dir}") # Find all shard files shard_files = sorted( [ f for f in os.listdir(dataset_dir) if f.startswith("shard_") and f.endswith(".pkl") ] ) if not shard_files: raise ValueError(f"No shard files found in {dataset_dir}") logging.info(f"Loading {len(shard_files)} shards from {dataset_dir}...") # Load all shards all_latents = [] # list[{"samples": tensor}] all_conditioning = [] # list[list[cond]] for shard_file in shard_files: shard_path = os.path.join(dataset_dir, shard_file) with open(shard_path, "rb") as f: shard_data = torch.load(f) all_latents.extend(shard_data["latents"]) all_conditioning.extend(shard_data["conditioning"]) logging.info(f"Loaded {shard_file}: {len(shard_data['latents'])} samples") logging.info( f"Successfully loaded {len(all_latents)} samples from {dataset_dir}." ) return io.NodeOutput(all_latents, all_conditioning) # ========== Extension Setup ========== class DatasetExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ # Data loading/saving nodes LoadImageDataSetFromFolderNode, LoadImageTextDataSetFromFolderNode, SaveImageDataSetToFolderNode, SaveImageTextDataSetToFolderNode, # Image transform nodes ResizeImagesByShorterEdgeNode, ResizeImagesByLongerEdgeNode, CenterCropImagesNode, RandomCropImagesNode, NormalizeImagesNode, AdjustBrightnessNode, AdjustContrastNode, ShuffleDatasetNode, ShuffleImageTextDatasetNode, # Text transform nodes TextToLowercaseNode, TextToUppercaseNode, TruncateTextNode, AddTextPrefixNode, AddTextSuffixNode, ReplaceTextNode, StripWhitespaceNode, # Group processing examples ImageDeduplicationNode, ImageGridNode, MergeImageListsNode, MergeTextListsNode, # Training dataset nodes MakeTrainingDataset, SaveTrainingDataset, LoadTrainingDataset, ResolutionBucket, ] async def comfy_entrypoint() -> DatasetExtension: return DatasetExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_dataset.py", "license": "GNU General Public License v3.0", "lines": 1285, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy/text_encoders/z_image.py
from transformers import Qwen2Tokenizer import comfy.text_encoders.llama from comfy import sd1_clip import os class Qwen3Tokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=2560, embedding_key='qwen3_4b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) class ZImageTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen3_4b", tokenizer=Qwen3Tokenizer) self.llama_template = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs): if llama_template is None: llama_text = self.llama_template.format(text) else: llama_text = llama_template.format(text) tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) return tokens class Qwen3_4BModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}): super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class ZImageTEModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__(device=device, dtype=dtype, name="qwen3_4b", clip_model=Qwen3_4BModel, model_options=model_options) def te(dtype_llama=None, llama_quantization_metadata=None): class ZImageTEModel_(ZImageTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): if dtype_llama is not None: dtype = dtype_llama if llama_quantization_metadata is not None: model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, dtype=dtype, model_options=model_options) return ZImageTEModel_
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/text_encoders/z_image.py", "license": "GNU General Public License v3.0", "lines": 35, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:tests/execution/test_public_api.py
""" Tests for public ComfyAPI and ComfyAPISync functions. These tests verify that the public API methods work correctly in both sync and async contexts, ensuring that the sync wrapper generation (via get_type_hints() in async_to_sync.py) correctly handles string annotations from 'from __future__ import annotations'. """ import pytest import time import subprocess import torch from pytest import fixture from comfy_execution.graph_utils import GraphBuilder from tests.execution.test_execution import ComfyClient @pytest.mark.execution class TestPublicAPI: """Test suite for public ComfyAPI and ComfyAPISync methods.""" @fixture(scope="class", autouse=True) def _server(self, args_pytest): """Start ComfyUI server for testing.""" pargs = [ 'python', 'main.py', '--output-directory', args_pytest["output_dir"], '--listen', args_pytest["listen"], '--port', str(args_pytest["port"]), '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', '--cpu', ] p = subprocess.Popen(pargs) yield p.kill() torch.cuda.empty_cache() @fixture(scope="class", autouse=True) def shared_client(self, args_pytest, _server): """Create shared client with connection retry.""" client = ComfyClient() n_tries = 5 for i in range(n_tries): time.sleep(4) try: client.connect(listen=args_pytest["listen"], port=args_pytest["port"]) break except ConnectionRefusedError: if i == n_tries - 1: raise yield client del client torch.cuda.empty_cache() @fixture def client(self, shared_client, request): """Set test name for each test.""" shared_client.set_test_name(f"public_api[{request.node.name}]") yield shared_client @fixture def builder(self, request): """Create GraphBuilder for each test.""" yield GraphBuilder(prefix=request.node.name) def test_sync_progress_update_executes(self, client: ComfyClient, builder: GraphBuilder): """Test that TestSyncProgressUpdate executes without errors. This test validates that api_sync.execution.set_progress() works correctly, which is the primary code path fixed by adding get_type_hints() to async_to_sync.py. """ g = builder image = g.node("StubImage", content="BLACK", height=256, width=256, batch_size=1) # Use TestSyncProgressUpdate with short sleep progress_node = g.node("TestSyncProgressUpdate", value=image.out(0), sleep_seconds=0.5) output = g.node("SaveImage", images=progress_node.out(0)) # Execute workflow result = client.run(g) # Verify execution assert result.did_run(progress_node), "Progress node should have executed" assert result.did_run(output), "Output node should have executed" # Verify output images = result.get_images(output) assert len(images) == 1, "Should have produced 1 image" def test_async_progress_update_executes(self, client: ComfyClient, builder: GraphBuilder): """Test that TestAsyncProgressUpdate executes without errors. This test validates that await api.execution.set_progress() works correctly in async contexts. """ g = builder image = g.node("StubImage", content="WHITE", height=256, width=256, batch_size=1) # Use TestAsyncProgressUpdate with short sleep progress_node = g.node("TestAsyncProgressUpdate", value=image.out(0), sleep_seconds=0.5) output = g.node("SaveImage", images=progress_node.out(0)) # Execute workflow result = client.run(g) # Verify execution assert result.did_run(progress_node), "Async progress node should have executed" assert result.did_run(output), "Output node should have executed" # Verify output images = result.get_images(output) assert len(images) == 1, "Should have produced 1 image" def test_sync_and_async_progress_together(self, client: ComfyClient, builder: GraphBuilder): """Test both sync and async progress updates in same workflow. This test ensures that both ComfyAPISync and ComfyAPI can coexist and work correctly in the same workflow execution. """ g = builder image1 = g.node("StubImage", content="BLACK", height=256, width=256, batch_size=1) image2 = g.node("StubImage", content="WHITE", height=256, width=256, batch_size=1) # Use both types of progress nodes sync_progress = g.node("TestSyncProgressUpdate", value=image1.out(0), sleep_seconds=0.3) async_progress = g.node("TestAsyncProgressUpdate", value=image2.out(0), sleep_seconds=0.3) # Create outputs output1 = g.node("SaveImage", images=sync_progress.out(0)) output2 = g.node("SaveImage", images=async_progress.out(0)) # Execute workflow result = client.run(g) # Both should execute successfully assert result.did_run(sync_progress), "Sync progress node should have executed" assert result.did_run(async_progress), "Async progress node should have executed" assert result.did_run(output1), "First output node should have executed" assert result.did_run(output2), "Second output node should have executed" # Verify outputs images1 = result.get_images(output1) images2 = result.get_images(output2) assert len(images1) == 1, "Should have produced 1 image from sync node" assert len(images2) == 1, "Should have produced 1 image from async node"
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests/execution/test_public_api.py", "license": "GNU General Public License v3.0", "lines": 126, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:comfy/ldm/hunyuan_video/upsampler.py
import torch import torch.nn as nn import torch.nn.functional as F from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, VideoConv3d from comfy.ldm.hunyuan_video.vae_refiner import RMS_norm import comfy.model_management import comfy.model_patcher class SRResidualCausalBlock3D(nn.Module): def __init__(self, channels: int): super().__init__() self.block = nn.Sequential( VideoConv3d(channels, channels, kernel_size=3), nn.SiLU(inplace=True), VideoConv3d(channels, channels, kernel_size=3), nn.SiLU(inplace=True), VideoConv3d(channels, channels, kernel_size=3), ) def forward(self, x: torch.Tensor) -> torch.Tensor: return x + self.block(x) class SRModel3DV2(nn.Module): def __init__( self, in_channels: int, out_channels: int, hidden_channels: int = 64, num_blocks: int = 6, global_residual: bool = False, ): super().__init__() self.in_conv = VideoConv3d(in_channels, hidden_channels, kernel_size=3) self.blocks = nn.ModuleList([SRResidualCausalBlock3D(hidden_channels) for _ in range(num_blocks)]) self.out_conv = VideoConv3d(hidden_channels, out_channels, kernel_size=3) self.global_residual = bool(global_residual) def forward(self, x: torch.Tensor) -> torch.Tensor: residual = x y = self.in_conv(x) for blk in self.blocks: y = blk(y) y = self.out_conv(y) if self.global_residual and (y.shape == residual.shape): y = y + residual return y class Upsampler(nn.Module): def __init__( self, z_channels: int, out_channels: int, block_out_channels: tuple[int, ...], num_res_blocks: int = 2, ): super().__init__() self.num_res_blocks = num_res_blocks self.block_out_channels = block_out_channels self.z_channels = z_channels ch = block_out_channels[0] self.conv_in = VideoConv3d(z_channels, ch, kernel_size=3) self.up = nn.ModuleList() for i, tgt in enumerate(block_out_channels): stage = nn.Module() stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, out_channels=tgt, temb_channels=0, conv_shortcut=False, conv_op=VideoConv3d, norm_op=RMS_norm) for j in range(num_res_blocks + 1)]) ch = tgt self.up.append(stage) self.norm_out = RMS_norm(ch) self.conv_out = VideoConv3d(ch, out_channels, kernel_size=3) def forward(self, z): """ Args: z: (B, C, T, H, W) target_shape: (H, W) """ # z to block_in repeats = self.block_out_channels[0] // (self.z_channels) x = self.conv_in(z) + z.repeat_interleave(repeats=repeats, dim=1) # upsampling for stage in self.up: for blk in stage.block: x = blk(x) out = self.conv_out(F.silu(self.norm_out(x))) return out UPSAMPLERS = { "720p": SRModel3DV2, "1080p": Upsampler, } class HunyuanVideo15SRModel(): def __init__(self, model_type, config): self.load_device = comfy.model_management.vae_device() offload_device = comfy.model_management.vae_offload_device() self.dtype = comfy.model_management.vae_dtype(self.load_device) self.model_class = UPSAMPLERS.get(model_type) self.model = self.model_class(**config).eval() self.patcher = comfy.model_patcher.CoreModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) def load_sd(self, sd): return self.model.load_state_dict(sd, strict=True, assign=self.patcher.is_dynamic()) def get_sd(self): return self.model.state_dict() def resample_latent(self, latent): comfy.model_management.load_model_gpu(self.patcher) return self.model(latent.to(self.load_device))
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/ldm/hunyuan_video/upsampler.py", "license": "GNU General Public License v3.0", "lines": 103, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_topaz.py
import builtins from io import BytesIO import aiohttp from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.topaz import ( CreateVideoRequest, CreateVideoRequestSource, CreateVideoResponse, ImageAsyncTaskResponse, ImageDownloadResponse, ImageEnhanceRequest, ImageStatusResponse, OutputInformationVideo, Resolution, VideoAcceptResponse, VideoCompleteUploadRequest, VideoCompleteUploadRequestPart, VideoCompleteUploadResponse, VideoEnhancementFilter, VideoFrameInterpolationFilter, VideoStatusResponse, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, download_url_to_video_output, get_fs_object_size, get_number_of_images, poll_op, sync_op, upload_images_to_comfyapi, validate_container_format_is_mp4, ) UPSCALER_MODELS_MAP = { "Starlight (Astra) Fast": "slf-1", "Starlight (Astra) Creative": "slc-1", } class TopazImageEnhance(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="TopazImageEnhance", display_name="Topaz Image Enhance", category="api node/image/Topaz", description="Industry-standard upscaling and image enhancement.", inputs=[ IO.Combo.Input("model", options=["Reimagine"]), IO.Image.Input("image"), IO.String.Input( "prompt", multiline=True, default="", tooltip="Optional text prompt for creative upscaling guidance.", optional=True, ), IO.Combo.Input( "subject_detection", options=["All", "Foreground", "Background"], optional=True, advanced=True, ), IO.Boolean.Input( "face_enhancement", default=True, optional=True, tooltip="Enhance faces (if present) during processing.", advanced=True, ), IO.Float.Input( "face_enhancement_creativity", default=0.0, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.number, optional=True, tooltip="Set the creativity level for face enhancement.", advanced=True, ), IO.Float.Input( "face_enhancement_strength", default=1.0, min=0.0, max=1.0, step=0.01, display_mode=IO.NumberDisplay.number, optional=True, tooltip="Controls how sharp enhanced faces are relative to the background.", advanced=True, ), IO.Boolean.Input( "crop_to_fill", default=False, optional=True, tooltip="By default, the image is letterboxed when the output aspect ratio differs. " "Enable to crop the image to fill the output dimensions.", advanced=True, ), IO.Int.Input( "output_width", default=0, min=0, max=32000, step=1, display_mode=IO.NumberDisplay.number, optional=True, tooltip="Zero value means to calculate automatically (usually it will be original size or output_height if specified).", advanced=True, ), IO.Int.Input( "output_height", default=0, min=0, max=32000, step=1, display_mode=IO.NumberDisplay.number, optional=True, tooltip="Zero value means to output in the same height as original or output width.", advanced=True, ), IO.Int.Input( "creativity", default=3, min=1, max=9, step=1, display_mode=IO.NumberDisplay.slider, optional=True, ), IO.Boolean.Input( "face_preservation", default=True, optional=True, tooltip="Preserve subjects' facial identity.", advanced=True, ), IO.Boolean.Input( "color_preservation", default=True, optional=True, tooltip="Preserve the original colors.", advanced=True, ), ], outputs=[ IO.Image.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, ) @classmethod async def execute( cls, model: str, image: Input.Image, prompt: str = "", subject_detection: str = "All", face_enhancement: bool = True, face_enhancement_creativity: float = 1.0, face_enhancement_strength: float = 0.8, crop_to_fill: bool = False, output_width: int = 0, output_height: int = 0, creativity: int = 3, face_preservation: bool = True, color_preservation: bool = True, ) -> IO.NodeOutput: if get_number_of_images(image) != 1: raise ValueError("Only one input image is supported.") download_url = await upload_images_to_comfyapi( cls, image, max_images=1, mime_type="image/png", total_pixels=4096 * 4096 ) initial_response = await sync_op( cls, ApiEndpoint(path="/proxy/topaz/image/v1/enhance-gen/async", method="POST"), response_model=ImageAsyncTaskResponse, data=ImageEnhanceRequest( model=model, prompt=prompt, subject_detection=subject_detection, face_enhancement=face_enhancement, face_enhancement_creativity=face_enhancement_creativity, face_enhancement_strength=face_enhancement_strength, crop_to_fill=crop_to_fill, output_width=output_width if output_width else None, output_height=output_height if output_height else None, creativity=creativity, face_preservation=str(face_preservation).lower(), color_preservation=str(color_preservation).lower(), source_url=download_url[0], output_format="png", ), content_type="multipart/form-data", ) await poll_op( cls, poll_endpoint=ApiEndpoint(path=f"/proxy/topaz/image/v1/status/{initial_response.process_id}"), response_model=ImageStatusResponse, status_extractor=lambda x: x.status, progress_extractor=lambda x: getattr(x, "progress", 0), price_extractor=lambda x: x.credits * 0.08, poll_interval=8.0, estimated_duration=60, ) results = await sync_op( cls, ApiEndpoint(path=f"/proxy/topaz/image/v1/download/{initial_response.process_id}"), response_model=ImageDownloadResponse, monitor_progress=False, ) return IO.NodeOutput(await download_url_to_image_tensor(results.download_url)) class TopazVideoEnhance(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="TopazVideoEnhance", display_name="Topaz Video Enhance", category="api node/video/Topaz", description="Breathe new life into video with powerful upscaling and recovery technology.", inputs=[ IO.Video.Input("video"), IO.Boolean.Input("upscaler_enabled", default=True), IO.Combo.Input("upscaler_model", options=list(UPSCALER_MODELS_MAP.keys())), IO.Combo.Input("upscaler_resolution", options=["FullHD (1080p)", "4K (2160p)"]), IO.Combo.Input( "upscaler_creativity", options=["low", "middle", "high"], default="low", tooltip="Creativity level (applies only to Starlight (Astra) Creative).", optional=True, advanced=True, ), IO.Boolean.Input("interpolation_enabled", default=False, optional=True), IO.Combo.Input("interpolation_model", options=["apo-8"], default="apo-8", optional=True, advanced=True), IO.Int.Input( "interpolation_slowmo", default=1, min=1, max=16, display_mode=IO.NumberDisplay.number, tooltip="Slow-motion factor applied to the input video. " "For example, 2 makes the output twice as slow and doubles the duration.", optional=True, advanced=True, ), IO.Int.Input( "interpolation_frame_rate", default=60, min=15, max=240, display_mode=IO.NumberDisplay.number, tooltip="Output frame rate.", optional=True, ), IO.Boolean.Input( "interpolation_duplicate", default=False, tooltip="Analyze the input for duplicate frames and remove them.", optional=True, advanced=True, ), IO.Float.Input( "interpolation_duplicate_threshold", default=0.01, min=0.001, max=0.1, step=0.001, display_mode=IO.NumberDisplay.number, tooltip="Detection sensitivity for duplicate frames.", optional=True, advanced=True, ), IO.Combo.Input( "dynamic_compression_level", options=["Low", "Mid", "High"], default="Low", tooltip="CQP level.", optional=True, advanced=True, ), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, ) @classmethod async def execute( cls, video: Input.Video, upscaler_enabled: bool, upscaler_model: str, upscaler_resolution: str, upscaler_creativity: str = "low", interpolation_enabled: bool = False, interpolation_model: str = "apo-8", interpolation_slowmo: int = 1, interpolation_frame_rate: int = 60, interpolation_duplicate: bool = False, interpolation_duplicate_threshold: float = 0.01, dynamic_compression_level: str = "Low", ) -> IO.NodeOutput: if upscaler_enabled is False and interpolation_enabled is False: raise ValueError("There is nothing to do: both upscaling and interpolation are disabled.") validate_container_format_is_mp4(video) src_width, src_height = video.get_dimensions() src_frame_rate = int(video.get_frame_rate()) duration_sec = video.get_duration() src_video_stream = video.get_stream_source() target_width = src_width target_height = src_height target_frame_rate = src_frame_rate filters = [] if upscaler_enabled: if "1080p" in upscaler_resolution: target_pixel_p = 1080 max_long_side = 1920 else: target_pixel_p = 2160 max_long_side = 3840 ar = src_width / src_height if src_width >= src_height: # Landscape or Square; Attempt to set height to target (e.g., 2160), calculate width target_height = target_pixel_p target_width = int(target_height * ar) # Check if width exceeds standard bounds (for ultra-wide e.g., 21:9 ARs) if target_width > max_long_side: target_width = max_long_side target_height = int(target_width / ar) else: # Portrait; Attempt to set width to target (e.g., 2160), calculate height target_width = target_pixel_p target_height = int(target_width / ar) # Check if height exceeds standard bounds if target_height > max_long_side: target_height = max_long_side target_width = int(target_height * ar) if target_width % 2 != 0: target_width += 1 if target_height % 2 != 0: target_height += 1 filters.append( VideoEnhancementFilter( model=UPSCALER_MODELS_MAP[upscaler_model], creativity=(upscaler_creativity if UPSCALER_MODELS_MAP[upscaler_model] == "slc-1" else None), isOptimizedMode=(True if UPSCALER_MODELS_MAP[upscaler_model] == "slc-1" else None), ), ) if interpolation_enabled: target_frame_rate = interpolation_frame_rate filters.append( VideoFrameInterpolationFilter( model=interpolation_model, slowmo=interpolation_slowmo, fps=interpolation_frame_rate, duplicate=interpolation_duplicate, duplicate_threshold=interpolation_duplicate_threshold, ), ) initial_res = await sync_op( cls, ApiEndpoint(path="/proxy/topaz/video/", method="POST"), response_model=CreateVideoResponse, data=CreateVideoRequest( source=CreateVideoRequestSource( container="mp4", size=get_fs_object_size(src_video_stream), duration=int(duration_sec), frameCount=video.get_frame_count(), frameRate=src_frame_rate, resolution=Resolution(width=src_width, height=src_height), ), filters=filters, output=OutputInformationVideo( resolution=Resolution(width=target_width, height=target_height), frameRate=target_frame_rate, audioCodec="AAC", audioTransfer="Copy", dynamicCompressionLevel=dynamic_compression_level, ), ), wait_label="Creating task", final_label_on_success="Task created", ) upload_res = await sync_op( cls, ApiEndpoint( path=f"/proxy/topaz/video/{initial_res.requestId}/accept", method="PATCH", ), response_model=VideoAcceptResponse, wait_label="Preparing upload", final_label_on_success="Upload started", ) if len(upload_res.urls) > 1: raise NotImplementedError( "Large files are not currently supported. Please open an issue in the ComfyUI repository." ) async with aiohttp.ClientSession(headers={"Content-Type": "video/mp4"}) as session: if isinstance(src_video_stream, BytesIO): src_video_stream.seek(0) async with session.put(upload_res.urls[0], data=src_video_stream, raise_for_status=True) as res: upload_etag = res.headers["Etag"] else: with builtins.open(src_video_stream, "rb") as video_file: async with session.put(upload_res.urls[0], data=video_file, raise_for_status=True) as res: upload_etag = res.headers["Etag"] await sync_op( cls, ApiEndpoint( path=f"/proxy/topaz/video/{initial_res.requestId}/complete-upload", method="PATCH", ), response_model=VideoCompleteUploadResponse, data=VideoCompleteUploadRequest( uploadResults=[ VideoCompleteUploadRequestPart( partNum=1, eTag=upload_etag, ), ], ), wait_label="Finalizing upload", final_label_on_success="Upload completed", ) final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/topaz/video/{initial_res.requestId}/status"), response_model=VideoStatusResponse, status_extractor=lambda x: x.status, progress_extractor=lambda x: getattr(x, "progress", 0), price_extractor=lambda x: (x.estimates.cost[0] * 0.08 if x.estimates and x.estimates.cost[0] else None), poll_interval=10.0, max_poll_attempts=320, ) return IO.NodeOutput(await download_url_to_video_output(final_response.download.url)) class TopazExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ TopazImageEnhance, TopazVideoEnhance, ] async def comfy_entrypoint() -> TopazExtension: return TopazExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_topaz.py", "license": "GNU General Public License v3.0", "lines": 455, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api/latest/_util/geometry_types.py
import shutil from io import BytesIO from pathlib import Path from typing import IO import torch class VOXEL: def __init__(self, data: torch.Tensor): self.data = data class MESH: def __init__(self, vertices: torch.Tensor, faces: torch.Tensor): self.vertices = vertices self.faces = faces class File3D: """Class representing a 3D file from a file path or binary stream. Supports both disk-backed (file path) and memory-backed (BytesIO) storage. """ def __init__(self, source: str | IO[bytes], file_format: str = ""): self._source = source self._format = file_format or self._infer_format() def _infer_format(self) -> str: if isinstance(self._source, str): return Path(self._source).suffix.lstrip(".").lower() return "" @property def format(self) -> str: return self._format @format.setter def format(self, value: str) -> None: self._format = value.lstrip(".").lower() if value else "" @property def is_disk_backed(self) -> bool: return isinstance(self._source, str) def get_source(self) -> str | IO[bytes]: if isinstance(self._source, str): return self._source if hasattr(self._source, "seek"): self._source.seek(0) return self._source def get_data(self) -> BytesIO: if isinstance(self._source, str): with open(self._source, "rb") as f: result = BytesIO(f.read()) return result if hasattr(self._source, "seek"): self._source.seek(0) if isinstance(self._source, BytesIO): return self._source return BytesIO(self._source.read()) def save_to(self, path: str) -> str: dest = Path(path) dest.parent.mkdir(parents=True, exist_ok=True) if isinstance(self._source, str): if Path(self._source).resolve() != dest.resolve(): shutil.copy2(self._source, dest) else: if hasattr(self._source, "seek"): self._source.seek(0) with open(dest, "wb") as f: f.write(self._source.read()) return str(dest) def get_bytes(self) -> bytes: if isinstance(self._source, str): return Path(self._source).read_bytes() if hasattr(self._source, "seek"): self._source.seek(0) return self._source.read() def __repr__(self) -> str: if isinstance(self._source, str): return f"File3D(source={self._source!r}, format={self._format!r})" return f"File3D(<stream>, format={self._format!r})"
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api/latest/_util/geometry_types.py", "license": "GNU General Public License v3.0", "lines": 70, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_extras/nodes_nop.py
from comfy_api.latest import ComfyExtension, io from typing_extensions import override # If you write a node that is so useless that it breaks ComfyUI it will be featured in this exclusive list # "native" block swap nodes are placebo at best and break the ComfyUI memory management system. # They are also considered harmful because instead of users reporting issues with the built in # memory management they install these stupid nodes and complain even harder. Now it completely # breaks with some of the new ComfyUI memory optimizations so I have made the decision to NOP it # out of all workflows. class wanBlockSwap(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="wanBlockSwap", category="", description="NOP", inputs=[ io.Model.Input("model"), ], outputs=[ io.Model.Output(), ], is_deprecated=True, ) @classmethod def execute(cls, model) -> io.NodeOutput: return io.NodeOutput(model) class NopExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ wanBlockSwap ] async def comfy_entrypoint() -> NopExtension: return NopExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_nop.py", "license": "GNU General Public License v3.0", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_extras/nodes_rope.py
from comfy_api.latest import ComfyExtension, io from typing_extensions import override class ScaleROPE(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( node_id="ScaleROPE", category="advanced/model_patches", description="Scale and shift the ROPE of the model.", is_experimental=True, inputs=[ io.Model.Input("model"), io.Float.Input("scale_x", default=1.0, min=0.0, max=100.0, step=0.1, advanced=True), io.Float.Input("shift_x", default=0.0, min=-256.0, max=256.0, step=0.1, advanced=True), io.Float.Input("scale_y", default=1.0, min=0.0, max=100.0, step=0.1, advanced=True), io.Float.Input("shift_y", default=0.0, min=-256.0, max=256.0, step=0.1, advanced=True), io.Float.Input("scale_t", default=1.0, min=0.0, max=100.0, step=0.1, advanced=True), io.Float.Input("shift_t", default=0.0, min=-256.0, max=256.0, step=0.1, advanced=True), ], outputs=[ io.Model.Output(), ], ) @classmethod def execute(cls, model, scale_x, shift_x, scale_y, shift_y, scale_t, shift_t) -> io.NodeOutput: m = model.clone() m.set_model_rope_options(scale_x, shift_x, scale_y, shift_y, scale_t, shift_t) return io.NodeOutput(m) class RopeExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ ScaleROPE ] async def comfy_entrypoint() -> RopeExtension: return RopeExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_extras/nodes_rope.py", "license": "GNU General Public License v3.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy/quant_ops.py
import torch import logging try: import comfy_kitchen as ck from comfy_kitchen.tensor import ( QuantizedTensor, QuantizedLayout, TensorCoreFP8Layout as _CKFp8Layout, TensorCoreNVFP4Layout as _CKNvfp4Layout, register_layout_op, register_layout_class, get_layout_class, ) _CK_AVAILABLE = True if torch.version.cuda is None: ck.registry.disable("cuda") else: cuda_version = tuple(map(int, str(torch.version.cuda).split('.'))) if cuda_version < (13,): ck.registry.disable("cuda") logging.warning("WARNING: You need pytorch with cu130 or higher to use optimized CUDA operations.") ck.registry.disable("triton") for k, v in ck.list_backends().items(): logging.info(f"Found comfy_kitchen backend {k}: {v}") except ImportError as e: logging.error(f"Failed to import comfy_kitchen, Error: {e}, fp8 and fp4 support will not be available.") _CK_AVAILABLE = False class QuantizedTensor: pass class _CKFp8Layout: pass class _CKNvfp4Layout: pass def register_layout_class(name, cls): pass def get_layout_class(name): return None import comfy.float # ============================================================================== # FP8 Layouts with Comfy-Specific Extensions # ============================================================================== class _TensorCoreFP8LayoutBase(_CKFp8Layout): FP8_DTYPE = None # Must be overridden in subclass @classmethod def quantize(cls, tensor, scale=None, stochastic_rounding=0, inplace_ops=False): if cls.FP8_DTYPE is None: raise NotImplementedError(f"{cls.__name__} must define FP8_DTYPE") orig_dtype = tensor.dtype orig_shape = tuple(tensor.shape) if isinstance(scale, str) and scale == "recalculate": scale = torch.amax(tensor.abs()).to(dtype=torch.float32) / torch.finfo(cls.FP8_DTYPE).max if tensor.dtype not in [torch.float32, torch.bfloat16]: # Prevent scale from being too small tensor_info = torch.finfo(tensor.dtype) scale = (1.0 / torch.clamp((1.0 / scale), min=tensor_info.min, max=tensor_info.max)) if scale is None: scale = torch.ones((), device=tensor.device, dtype=torch.float32) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale, device=tensor.device, dtype=torch.float32) if stochastic_rounding > 0: if inplace_ops: tensor *= (1.0 / scale).to(tensor.dtype) else: tensor = tensor * (1.0 / scale).to(tensor.dtype) qdata = comfy.float.stochastic_rounding(tensor, dtype=cls.FP8_DTYPE, seed=stochastic_rounding) else: qdata = ck.quantize_per_tensor_fp8(tensor, scale, cls.FP8_DTYPE) params = cls.Params(scale=scale.float(), orig_dtype=orig_dtype, orig_shape=orig_shape) return qdata, params class TensorCoreNVFP4Layout(_CKNvfp4Layout): @classmethod def quantize(cls, tensor, scale=None, stochastic_rounding=0, inplace_ops=False): if tensor.dim() != 2: raise ValueError(f"NVFP4 requires 2D tensor, got {tensor.dim()}D") orig_dtype = tensor.dtype orig_shape = tuple(tensor.shape) if scale is None or (isinstance(scale, str) and scale == "recalculate"): scale = torch.amax(tensor.abs()) / (ck.float_utils.F8_E4M3_MAX * ck.float_utils.F4_E2M1_MAX) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale) scale = scale.to(device=tensor.device, dtype=torch.float32) padded_shape = cls.get_padded_shape(orig_shape) needs_padding = padded_shape != orig_shape if stochastic_rounding > 0: qdata, block_scale = comfy.float.stochastic_round_quantize_nvfp4_by_block(tensor, scale, pad_16x=needs_padding, seed=stochastic_rounding) else: qdata, block_scale = ck.quantize_nvfp4(tensor, scale, pad_16x=needs_padding) params = cls.Params( scale=scale, orig_dtype=orig_dtype, orig_shape=orig_shape, block_scale=block_scale, ) return qdata, params class TensorCoreFP8E4M3Layout(_TensorCoreFP8LayoutBase): FP8_DTYPE = torch.float8_e4m3fn class TensorCoreFP8E5M2Layout(_TensorCoreFP8LayoutBase): FP8_DTYPE = torch.float8_e5m2 # Backward compatibility alias - default to E4M3 TensorCoreFP8Layout = TensorCoreFP8E4M3Layout # ============================================================================== # Registry # ============================================================================== register_layout_class("TensorCoreFP8Layout", TensorCoreFP8Layout) register_layout_class("TensorCoreFP8E4M3Layout", TensorCoreFP8E4M3Layout) register_layout_class("TensorCoreFP8E5M2Layout", TensorCoreFP8E5M2Layout) register_layout_class("TensorCoreNVFP4Layout", TensorCoreNVFP4Layout) QUANT_ALGOS = { "float8_e4m3fn": { "storage_t": torch.float8_e4m3fn, "parameters": {"weight_scale", "input_scale"}, "comfy_tensor_layout": "TensorCoreFP8E4M3Layout", }, "float8_e5m2": { "storage_t": torch.float8_e5m2, "parameters": {"weight_scale", "input_scale"}, "comfy_tensor_layout": "TensorCoreFP8E5M2Layout", }, "nvfp4": { "storage_t": torch.uint8, "parameters": {"weight_scale", "weight_scale_2", "input_scale"}, "comfy_tensor_layout": "TensorCoreNVFP4Layout", "group_size": 16, }, } # ============================================================================== # Re-exports for backward compatibility # ============================================================================== __all__ = [ "QuantizedTensor", "QuantizedLayout", "TensorCoreFP8Layout", "TensorCoreFP8E4M3Layout", "TensorCoreFP8E5M2Layout", "TensorCoreNVFP4Layout", "QUANT_ALGOS", "register_layout_op", ]
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy/quant_ops.py", "license": "GNU General Public License v3.0", "lines": 137, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:tests-unit/comfy_quant/test_mixed_precision.py
import unittest import torch import sys import os import json # Add comfy to path sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) def has_gpu(): return torch.cuda.is_available() from comfy.cli_args import args if not has_gpu(): args.cpu = True from comfy import ops from comfy.quant_ops import QuantizedTensor import comfy.utils class SimpleModel(torch.nn.Module): def __init__(self, operations=ops.disable_weight_init): super().__init__() self.layer1 = operations.Linear(10, 20, device="cpu", dtype=torch.bfloat16) self.layer2 = operations.Linear(20, 30, device="cpu", dtype=torch.bfloat16) self.layer3 = operations.Linear(30, 40, device="cpu", dtype=torch.bfloat16) def forward(self, x): x = self.layer1(x) x = torch.nn.functional.relu(x) x = self.layer2(x) x = torch.nn.functional.relu(x) x = self.layer3(x) return x class TestMixedPrecisionOps(unittest.TestCase): def test_all_layers_standard(self): """Test that model with no quantization works normally""" # Create model model = SimpleModel(operations=ops.mixed_precision_ops({})) # Initialize weights manually model.layer1.weight = torch.nn.Parameter(torch.randn(20, 10, dtype=torch.bfloat16)) model.layer1.bias = torch.nn.Parameter(torch.randn(20, dtype=torch.bfloat16)) model.layer2.weight = torch.nn.Parameter(torch.randn(30, 20, dtype=torch.bfloat16)) model.layer2.bias = torch.nn.Parameter(torch.randn(30, dtype=torch.bfloat16)) model.layer3.weight = torch.nn.Parameter(torch.randn(40, 30, dtype=torch.bfloat16)) model.layer3.bias = torch.nn.Parameter(torch.randn(40, dtype=torch.bfloat16)) # Initialize weight_function and bias_function for layer in [model.layer1, model.layer2, model.layer3]: layer.weight_function = [] layer.bias_function = [] # Forward pass input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) output = model(input_tensor) self.assertEqual(output.shape, (5, 40)) self.assertEqual(output.dtype, torch.bfloat16) def test_mixed_precision_load(self): """Test loading a mixed precision model from state dict""" # Configure mixed precision: layer1 is FP8, layer2 and layer3 are standard layer_quant_config = { "layer1": { "format": "float8_e4m3fn", "params": {} }, "layer3": { "format": "float8_e4m3fn", "params": {} } } # Create state dict with mixed precision fp8_weight1 = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) fp8_weight3 = torch.randn(40, 30, dtype=torch.float32).to(torch.float8_e4m3fn) state_dict = { # Layer 1: FP8 E4M3FN "layer1.weight": fp8_weight1, "layer1.bias": torch.randn(20, dtype=torch.bfloat16), "layer1.weight_scale": torch.tensor(2.0, dtype=torch.float32), # Layer 2: Standard BF16 "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), "layer2.bias": torch.randn(30, dtype=torch.bfloat16), # Layer 3: FP8 E4M3FN "layer3.weight": fp8_weight3, "layer3.bias": torch.randn(40, dtype=torch.bfloat16), "layer3.weight_scale": torch.tensor(1.5, dtype=torch.float32), } state_dict, _ = comfy.utils.convert_old_quants(state_dict, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) # Create model and load state dict (strict=False because custom loading pops keys) model = SimpleModel(operations=ops.mixed_precision_ops({})) model.load_state_dict(state_dict, strict=False) # Verify weights are wrapped in QuantizedTensor self.assertIsInstance(model.layer1.weight, QuantizedTensor) self.assertEqual(model.layer1.weight._layout_cls, "TensorCoreFP8E4M3Layout") # Layer 2 should NOT be quantized self.assertNotIsInstance(model.layer2.weight, QuantizedTensor) # Layer 3 should be quantized self.assertIsInstance(model.layer3.weight, QuantizedTensor) self.assertEqual(model.layer3.weight._layout_cls, "TensorCoreFP8E4M3Layout") # Verify scales were loaded self.assertEqual(model.layer1.weight._params.scale.item(), 2.0) self.assertEqual(model.layer3.weight._params.scale.item(), 1.5) # Forward pass input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) with torch.inference_mode(): output = model(input_tensor) self.assertEqual(output.shape, (5, 40)) def test_state_dict_quantized_preserved(self): """Test that quantized weights are preserved in state_dict()""" # Configure mixed precision layer_quant_config = { "layer1": { "format": "float8_e4m3fn", "params": {} } } # Create and load model fp8_weight = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) state_dict1 = { "layer1.weight": fp8_weight, "layer1.bias": torch.randn(20, dtype=torch.bfloat16), "layer1.weight_scale": torch.tensor(3.0, dtype=torch.float32), "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), "layer2.bias": torch.randn(30, dtype=torch.bfloat16), "layer3.weight": torch.randn(40, 30, dtype=torch.bfloat16), "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } state_dict1, _ = comfy.utils.convert_old_quants(state_dict1, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) model = SimpleModel(operations=ops.mixed_precision_ops({})) model.load_state_dict(state_dict1, strict=False) # Save state dict state_dict2 = model.state_dict() # Verify layer1.weight is a QuantizedTensor with scale preserved self.assertTrue(torch.equal(state_dict2["layer1.weight"].view(torch.uint8), fp8_weight.view(torch.uint8))) self.assertEqual(state_dict2["layer1.weight_scale"].item(), 3.0) self.assertEqual(model.layer1.weight._layout_cls, "TensorCoreFP8E4M3Layout") # Verify non-quantized layers are standard tensors self.assertNotIsInstance(state_dict2["layer2.weight"], QuantizedTensor) self.assertNotIsInstance(state_dict2["layer3.weight"], QuantizedTensor) def test_weight_function_compatibility(self): """Test that weight_function (LoRA) works with quantized layers""" # Configure FP8 quantization layer_quant_config = { "layer1": { "format": "float8_e4m3fn", "params": {} } } # Create and load model fp8_weight = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) state_dict = { "layer1.weight": fp8_weight, "layer1.bias": torch.randn(20, dtype=torch.bfloat16), "layer1.weight_scale": torch.tensor(2.0, dtype=torch.float32), "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), "layer2.bias": torch.randn(30, dtype=torch.bfloat16), "layer3.weight": torch.randn(40, 30, dtype=torch.bfloat16), "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } state_dict, _ = comfy.utils.convert_old_quants(state_dict, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) model = SimpleModel(operations=ops.mixed_precision_ops({})) model.load_state_dict(state_dict, strict=False) # Add a weight function (simulating LoRA) # This should trigger dequantization during forward pass def apply_lora(weight): lora_delta = torch.randn_like(weight) * 0.01 return weight + lora_delta model.layer1.weight_function.append(apply_lora) # Forward pass should work with LoRA (triggers weight_function path) input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) output = model(input_tensor) self.assertEqual(output.shape, (5, 40)) def test_error_handling_unknown_format(self): """Test that unknown formats raise error""" # Configure with unknown format layer_quant_config = { "layer1": { "format": "unknown_format_xyz", "params": {} } } # Create state dict state_dict = { "layer1.weight": torch.randn(20, 10, dtype=torch.bfloat16), "layer1.bias": torch.randn(20, dtype=torch.bfloat16), "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), "layer2.bias": torch.randn(30, dtype=torch.bfloat16), "layer3.weight": torch.randn(40, 30, dtype=torch.bfloat16), "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } state_dict, _ = comfy.utils.convert_old_quants(state_dict, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) # Load should raise KeyError for unknown format in QUANT_FORMAT_MIXINS model = SimpleModel(operations=ops.mixed_precision_ops({})) with self.assertRaises(KeyError): model.load_state_dict(state_dict, strict=False) if __name__ == "__main__": unittest.main()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "tests-unit/comfy_quant/test_mixed_precision.py", "license": "GNU General Public License v3.0", "lines": 188, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_ltxv.py
from io import BytesIO from pydantic import BaseModel, Field from typing_extensions import override from comfy_api.latest import IO, ComfyExtension, Input, InputImpl from comfy_api_nodes.util import ( ApiEndpoint, get_number_of_images, sync_op_raw, upload_images_to_comfyapi, validate_string, ) MODELS_MAP = { "LTX-2 (Pro)": "ltx-2-pro", "LTX-2 (Fast)": "ltx-2-fast", } class ExecuteTaskRequest(BaseModel): prompt: str = Field(...) model: str = Field(...) duration: int = Field(...) resolution: str = Field(...) fps: int | None = Field(25) generate_audio: bool | None = Field(True) image_uri: str | None = Field(None) PRICE_BADGE = IO.PriceBadge( depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution"]), expr=""" ( $prices := { "ltx-2 (pro)": {"1920x1080":0.06,"2560x1440":0.12,"3840x2160":0.24}, "ltx-2 (fast)": {"1920x1080":0.04,"2560x1440":0.08,"3840x2160":0.16} }; $modelPrices := $lookup($prices, $lowercase(widgets.model)); $pps := $lookup($modelPrices, widgets.resolution); {"type":"usd","usd": $pps * widgets.duration} ) """, ) class TextToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="LtxvApiTextToVideo", display_name="LTXV Text To Video", category="api node/video/LTXV", description="Professional-quality videos with customizable duration and resolution.", inputs=[ IO.Combo.Input("model", options=list(MODELS_MAP.keys())), IO.String.Input( "prompt", multiline=True, default="", ), IO.Combo.Input("duration", options=[6, 8, 10, 12, 14, 16, 18, 20], default=8), IO.Combo.Input( "resolution", options=[ "1920x1080", "2560x1440", "3840x2160", ], ), IO.Combo.Input("fps", options=[25, 50], default=25), IO.Boolean.Input( "generate_audio", default=False, optional=True, tooltip="When true, the generated video will include AI-generated audio matching the scene.", advanced=True, ), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=PRICE_BADGE, ) @classmethod async def execute( cls, model: str, prompt: str, duration: int, resolution: str, fps: int = 25, generate_audio: bool = False, ) -> IO.NodeOutput: validate_string(prompt, min_length=1, max_length=10000) if duration > 10 and (model != "LTX-2 (Fast)" or resolution != "1920x1080" or fps != 25): raise ValueError( "Durations over 10s are only available for the Fast model at 1920x1080 resolution and 25 FPS." ) response = await sync_op_raw( cls, ApiEndpoint("/proxy/ltx/v1/text-to-video", "POST"), data=ExecuteTaskRequest( prompt=prompt, model=MODELS_MAP[model], duration=duration, resolution=resolution, fps=fps, generate_audio=generate_audio, ), as_binary=True, max_retries=1, ) return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(response))) class ImageToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): return IO.Schema( node_id="LtxvApiImageToVideo", display_name="LTXV Image To Video", category="api node/video/LTXV", description="Professional-quality videos with customizable duration and resolution based on start image.", inputs=[ IO.Image.Input("image", tooltip="First frame to be used for the video."), IO.Combo.Input("model", options=list(MODELS_MAP.keys())), IO.String.Input( "prompt", multiline=True, default="", ), IO.Combo.Input("duration", options=[6, 8, 10, 12, 14, 16, 18, 20], default=8), IO.Combo.Input( "resolution", options=[ "1920x1080", "2560x1440", "3840x2160", ], ), IO.Combo.Input("fps", options=[25, 50], default=25), IO.Boolean.Input( "generate_audio", default=False, optional=True, tooltip="When true, the generated video will include AI-generated audio matching the scene.", advanced=True, ), ], outputs=[ IO.Video.Output(), ], hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, IO.Hidden.unique_id, ], is_api_node=True, price_badge=PRICE_BADGE, ) @classmethod async def execute( cls, image: Input.Image, model: str, prompt: str, duration: int, resolution: str, fps: int = 25, generate_audio: bool = False, ) -> IO.NodeOutput: validate_string(prompt, min_length=1, max_length=10000) if duration > 10 and (model != "LTX-2 (Fast)" or resolution != "1920x1080" or fps != 25): raise ValueError( "Durations over 10s are only available for the Fast model at 1920x1080 resolution and 25 FPS." ) if get_number_of_images(image) != 1: raise ValueError("Currently only one input image is supported.") response = await sync_op_raw( cls, ApiEndpoint("/proxy/ltx/v1/image-to-video", "POST"), data=ExecuteTaskRequest( image_uri=(await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0], prompt=prompt, model=MODELS_MAP[model], duration=duration, resolution=resolution, fps=fps, generate_audio=generate_audio, ), as_binary=True, max_retries=1, ) return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(response))) class LtxvApiExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ TextToVideoNode, ImageToVideoNode, ] async def comfy_entrypoint() -> LtxvApiExtension: return LtxvApiExtension()
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/nodes_ltxv.py", "license": "GNU General Public License v3.0", "lines": 199, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
Comfy-Org/ComfyUI:comfy_api_nodes/util/_helpers.py
import asyncio import contextlib import os import re import time from collections.abc import Callable from io import BytesIO from yarl import URL from comfy.cli_args import args from comfy.model_management import processing_interrupted from comfy_api.latest import IO from .common_exceptions import ProcessingInterrupted _HAS_PCT_ESC = re.compile(r"%[0-9A-Fa-f]{2}") # any % followed by 2 hex digits _HAS_BAD_PCT = re.compile(r"%(?![0-9A-Fa-f]{2})") # any % not followed by 2 hex digits def is_processing_interrupted() -> bool: """Return True if user/runtime requested interruption.""" return processing_interrupted() def get_node_id(node_cls: type[IO.ComfyNode]) -> str: return node_cls.hidden.unique_id def get_auth_header(node_cls: type[IO.ComfyNode]) -> dict[str, str]: if node_cls.hidden.auth_token_comfy_org: return {"Authorization": f"Bearer {node_cls.hidden.auth_token_comfy_org}"} if node_cls.hidden.api_key_comfy_org: return {"X-API-KEY": node_cls.hidden.api_key_comfy_org} return {} def default_base_url() -> str: return getattr(args, "comfy_api_base", "https://api.comfy.org") async def sleep_with_interrupt( seconds: float, node_cls: type[IO.ComfyNode] | None, label: str | None = None, start_ts: float | None = None, estimated_total: int | None = None, *, display_callback: Callable[[type[IO.ComfyNode], str, int, int | None], None] | None = None, ): """ Sleep in 1s slices while: - Checking for interruption (raises ProcessingInterrupted). - Optionally emitting time progress via display_callback (if provided). """ end = time.monotonic() + seconds while True: if is_processing_interrupted(): raise ProcessingInterrupted("Task cancelled") now = time.monotonic() if start_ts is not None and label and display_callback: with contextlib.suppress(Exception): display_callback(node_cls, label, int(now - start_ts), estimated_total) if now >= end: break await asyncio.sleep(min(1.0, end - now)) def mimetype_to_extension(mime_type: str) -> str: """Converts a MIME type to a file extension.""" return mime_type.split("/")[-1].lower() def get_fs_object_size(path_or_object: str | BytesIO) -> int: if isinstance(path_or_object, str): return os.path.getsize(path_or_object) return len(path_or_object.getvalue()) def to_aiohttp_url(url: str) -> URL: """If `url` appears to be already percent-encoded (contains at least one valid %HH escape and no malformed '%' sequences) and contains no raw whitespace/control characters preserve the original encoding byte-for-byte (important for signed/presigned URLs). Otherwise, return `URL(url)` and allow yarl to normalize/quote as needed.""" if any(c.isspace() for c in url) or any(ord(c) < 0x20 for c in url): # Avoid encoded=True if URL contains raw whitespace/control chars return URL(url) if _HAS_PCT_ESC.search(url) and not _HAS_BAD_PCT.search(url): # Preserve encoding only if it appears pre-encoded AND has no invalid % sequences return URL(url, encoded=True) return URL(url)
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/util/_helpers.py", "license": "GNU General Public License v3.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
Comfy-Org/ComfyUI:comfy_api_nodes/util/client.py
import asyncio import contextlib import json import logging import time import uuid from collections.abc import Callable, Iterable from dataclasses import dataclass from enum import Enum from io import BytesIO from typing import Any, Literal, TypeVar from urllib.parse import urljoin, urlparse import aiohttp from aiohttp.client_exceptions import ClientError, ContentTypeError from pydantic import BaseModel from comfy import utils from comfy_api.latest import IO from server import PromptServer from . import request_logger from ._helpers import ( default_base_url, get_auth_header, get_node_id, is_processing_interrupted, sleep_with_interrupt, ) from .common_exceptions import ApiServerError, LocalNetworkError, ProcessingInterrupted M = TypeVar("M", bound=BaseModel) class ApiEndpoint: def __init__( self, path: str, method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = "GET", *, query_params: dict[str, Any] | None = None, headers: dict[str, str] | None = None, ): self.path = path self.method = method self.query_params = query_params or {} self.headers = headers or {} @dataclass class _RequestConfig: node_cls: type[IO.ComfyNode] endpoint: ApiEndpoint timeout: float content_type: str data: dict[str, Any] | None files: dict[str, Any] | list[tuple[str, Any]] | None multipart_parser: Callable | None max_retries: int max_retries_on_rate_limit: int retry_delay: float retry_backoff: float wait_label: str = "Waiting" monitor_progress: bool = True estimated_total: int | None = None final_label_on_success: str | None = "Completed" progress_origin_ts: float | None = None price_extractor: Callable[[dict[str, Any]], float | None] | None = None is_rate_limited: Callable[[int, Any], bool] | None = None @dataclass class _PollUIState: started: float status_label: str = "Queued" is_queued: bool = True price: float | None = None estimated_duration: int | None = None base_processing_elapsed: float = 0.0 # sum of completed active intervals active_since: float | None = None # start time of current active interval (None if queued) _RETRY_STATUS = {408, 500, 502, 503, 504} # status 429 is handled separately COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed", "finished", "done", "complete"] FAILED_STATUSES = ["cancelled", "canceled", "canceling", "fail", "failed", "error"] QUEUED_STATUSES = ["created", "queued", "queueing", "submitted", "initializing"] async def sync_op( cls: type[IO.ComfyNode], endpoint: ApiEndpoint, *, response_model: type[M], price_extractor: Callable[[M | Any], float | None] | None = None, data: BaseModel | None = None, files: dict[str, Any] | list[tuple[str, Any]] | None = None, content_type: str = "application/json", timeout: float = 3600.0, multipart_parser: Callable | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff: float = 2.0, wait_label: str = "Waiting for server", estimated_duration: int | None = None, final_label_on_success: str | None = "Completed", progress_origin_ts: float | None = None, monitor_progress: bool = True, max_retries_on_rate_limit: int = 16, is_rate_limited: Callable[[int, Any], bool] | None = None, ) -> M: raw = await sync_op_raw( cls, endpoint, price_extractor=_wrap_model_extractor(response_model, price_extractor), data=data, files=files, content_type=content_type, timeout=timeout, multipart_parser=multipart_parser, max_retries=max_retries, retry_delay=retry_delay, retry_backoff=retry_backoff, wait_label=wait_label, estimated_duration=estimated_duration, as_binary=False, final_label_on_success=final_label_on_success, progress_origin_ts=progress_origin_ts, monitor_progress=monitor_progress, max_retries_on_rate_limit=max_retries_on_rate_limit, is_rate_limited=is_rate_limited, ) if not isinstance(raw, dict): raise Exception("Expected JSON response to validate into a Pydantic model, got non-JSON (binary or text).") return _validate_or_raise(response_model, raw) async def poll_op( cls: type[IO.ComfyNode], poll_endpoint: ApiEndpoint, *, response_model: type[M], status_extractor: Callable[[M | Any], str | int | None], progress_extractor: Callable[[M | Any], int | None] | None = None, price_extractor: Callable[[M | Any], float | None] | None = None, completed_statuses: list[str | int] | None = None, failed_statuses: list[str | int] | None = None, queued_statuses: list[str | int] | None = None, data: BaseModel | None = None, poll_interval: float = 5.0, max_poll_attempts: int = 160, timeout_per_poll: float = 120.0, max_retries_per_poll: int = 10, retry_delay_per_poll: float = 1.0, retry_backoff_per_poll: float = 1.4, estimated_duration: int | None = None, cancel_endpoint: ApiEndpoint | None = None, cancel_timeout: float = 10.0, ) -> M: raw = await poll_op_raw( cls, poll_endpoint=poll_endpoint, status_extractor=_wrap_model_extractor(response_model, status_extractor), progress_extractor=_wrap_model_extractor(response_model, progress_extractor), price_extractor=_wrap_model_extractor(response_model, price_extractor), completed_statuses=completed_statuses, failed_statuses=failed_statuses, queued_statuses=queued_statuses, data=data, poll_interval=poll_interval, max_poll_attempts=max_poll_attempts, timeout_per_poll=timeout_per_poll, max_retries_per_poll=max_retries_per_poll, retry_delay_per_poll=retry_delay_per_poll, retry_backoff_per_poll=retry_backoff_per_poll, estimated_duration=estimated_duration, cancel_endpoint=cancel_endpoint, cancel_timeout=cancel_timeout, ) if not isinstance(raw, dict): raise Exception("Expected JSON response to validate into a Pydantic model, got non-JSON (binary or text).") return _validate_or_raise(response_model, raw) async def sync_op_raw( cls: type[IO.ComfyNode], endpoint: ApiEndpoint, *, price_extractor: Callable[[dict[str, Any]], float | None] | None = None, data: dict[str, Any] | BaseModel | None = None, files: dict[str, Any] | list[tuple[str, Any]] | None = None, content_type: str = "application/json", timeout: float = 3600.0, multipart_parser: Callable | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff: float = 2.0, wait_label: str = "Waiting for server", estimated_duration: int | None = None, as_binary: bool = False, final_label_on_success: str | None = "Completed", progress_origin_ts: float | None = None, monitor_progress: bool = True, max_retries_on_rate_limit: int = 16, is_rate_limited: Callable[[int, Any], bool] | None = None, ) -> dict[str, Any] | bytes: """ Make a single network request. - If as_binary=False (default): returns JSON dict (or {'_raw': '<text>'} if non-JSON). - If as_binary=True: returns bytes. """ if isinstance(data, BaseModel): data = data.model_dump(exclude_none=True) for k, v in list(data.items()): if isinstance(v, Enum): data[k] = v.value cfg = _RequestConfig( node_cls=cls, endpoint=endpoint, timeout=timeout, content_type=content_type, data=data, files=files, multipart_parser=multipart_parser, max_retries=max_retries, retry_delay=retry_delay, retry_backoff=retry_backoff, wait_label=wait_label, monitor_progress=monitor_progress, estimated_total=estimated_duration, final_label_on_success=final_label_on_success, progress_origin_ts=progress_origin_ts, price_extractor=price_extractor, max_retries_on_rate_limit=max_retries_on_rate_limit, is_rate_limited=is_rate_limited, ) return await _request_base(cfg, expect_binary=as_binary) async def poll_op_raw( cls: type[IO.ComfyNode], poll_endpoint: ApiEndpoint, *, status_extractor: Callable[[dict[str, Any]], str | int | None], progress_extractor: Callable[[dict[str, Any]], int | None] | None = None, price_extractor: Callable[[dict[str, Any]], float | None] | None = None, completed_statuses: list[str | int] | None = None, failed_statuses: list[str | int] | None = None, queued_statuses: list[str | int] | None = None, data: dict[str, Any] | BaseModel | None = None, poll_interval: float = 5.0, max_poll_attempts: int = 160, timeout_per_poll: float = 120.0, max_retries_per_poll: int = 10, retry_delay_per_poll: float = 1.0, retry_backoff_per_poll: float = 1.4, estimated_duration: int | None = None, cancel_endpoint: ApiEndpoint | None = None, cancel_timeout: float = 10.0, ) -> dict[str, Any]: """ Polls an endpoint until the task reaches a terminal state. Displays time while queued/processing, checks interruption every second, and calls Cancel endpoint (if provided) on interruption. Uses default complete, failed and queued states assumption. Returns the final JSON response from the poll endpoint. """ completed_states = _normalize_statuses(COMPLETED_STATUSES if completed_statuses is None else completed_statuses) failed_states = _normalize_statuses(FAILED_STATUSES if failed_statuses is None else failed_statuses) queued_states = _normalize_statuses(QUEUED_STATUSES if queued_statuses is None else queued_statuses) started = time.monotonic() consumed_attempts = 0 # counts only non-queued polls progress_bar = utils.ProgressBar(100) if progress_extractor else None last_progress: int | None = None state = _PollUIState(started=started, estimated_duration=estimated_duration) stop_ticker = asyncio.Event() async def _ticker(): """Emit a UI update every second while polling is in progress.""" try: while not stop_ticker.is_set(): if is_processing_interrupted(): break now = time.monotonic() proc_elapsed = state.base_processing_elapsed + ( (now - state.active_since) if state.active_since is not None else 0.0 ) _display_time_progress( cls, status=state.status_label, elapsed_seconds=int(now - state.started), estimated_total=state.estimated_duration, price=state.price, is_queued=state.is_queued, processing_elapsed_seconds=int(proc_elapsed), ) await asyncio.sleep(1.0) except Exception as exc: logging.debug("Polling ticker exited: %s", exc) ticker_task = asyncio.create_task(_ticker()) try: while consumed_attempts < max_poll_attempts: try: resp_json = await sync_op_raw( cls, poll_endpoint, data=data, timeout=timeout_per_poll, max_retries=max_retries_per_poll, retry_delay=retry_delay_per_poll, retry_backoff=retry_backoff_per_poll, wait_label="Checking", estimated_duration=None, as_binary=False, final_label_on_success=None, monitor_progress=False, ) if not isinstance(resp_json, dict): raise Exception("Polling endpoint returned non-JSON response.") except ProcessingInterrupted: if cancel_endpoint: with contextlib.suppress(Exception): await sync_op_raw( cls, cancel_endpoint, timeout=cancel_timeout, max_retries=0, wait_label="Cancelling task", estimated_duration=None, as_binary=False, final_label_on_success=None, monitor_progress=False, ) raise try: status = _normalize_status_value(status_extractor(resp_json)) except Exception as e: logging.error("Status extraction failed: %s", e) status = None if price_extractor: new_price = price_extractor(resp_json) if new_price is not None: state.price = new_price if progress_extractor: new_progress = progress_extractor(resp_json) if new_progress is not None and last_progress != new_progress: progress_bar.update_absolute(new_progress, total=100) last_progress = new_progress now_ts = time.monotonic() is_queued = status in queued_states if is_queued: if state.active_since is not None: # If we just moved from active -> queued, close the active interval state.base_processing_elapsed += now_ts - state.active_since state.active_since = None else: if state.active_since is None: # If we just moved from queued -> active, open a new active interval state.active_since = now_ts state.is_queued = is_queued state.status_label = status or ("Queued" if is_queued else "Processing") if status in completed_states: if state.active_since is not None: state.base_processing_elapsed += now_ts - state.active_since state.active_since = None stop_ticker.set() with contextlib.suppress(Exception): await ticker_task if progress_bar and last_progress != 100: progress_bar.update_absolute(100, total=100) _display_time_progress( cls, status=status if status else "Completed", elapsed_seconds=int(now_ts - started), estimated_total=estimated_duration, price=state.price, is_queued=False, processing_elapsed_seconds=int(state.base_processing_elapsed), ) return resp_json if status in failed_states: msg = f"Task failed: {json.dumps(resp_json)}" logging.error(msg) raise Exception(msg) try: await sleep_with_interrupt(poll_interval, cls, None, None, None) except ProcessingInterrupted: if cancel_endpoint: with contextlib.suppress(Exception): await sync_op_raw( cls, cancel_endpoint, timeout=cancel_timeout, max_retries=0, wait_label="Cancelling task", estimated_duration=None, as_binary=False, final_label_on_success=None, monitor_progress=False, ) raise if not is_queued: consumed_attempts += 1 raise Exception( f"Polling timed out after {max_poll_attempts} non-queued attempts " f"(~{int(max_poll_attempts * poll_interval)}s of active polling)." ) except ProcessingInterrupted: raise except (LocalNetworkError, ApiServerError): raise except Exception as e: raise Exception(f"Polling aborted due to error: {e}") from e finally: stop_ticker.set() with contextlib.suppress(Exception): await ticker_task def _display_text( node_cls: type[IO.ComfyNode], text: str | None, *, status: str | int | None = None, price: float | None = None, ) -> None: display_lines: list[str] = [] if status: display_lines.append(f"Status: {status.capitalize() if isinstance(status, str) else status}") if price is not None: p = f"{float(price) * 211:,.1f}".rstrip("0").rstrip(".") if p != "0": display_lines.append(f"Price: {p} credits") if text is not None: display_lines.append(text) if display_lines: PromptServer.instance.send_progress_text("\n".join(display_lines), get_node_id(node_cls)) def _display_time_progress( node_cls: type[IO.ComfyNode], status: str | int | None, elapsed_seconds: int, estimated_total: int | None = None, *, price: float | None = None, is_queued: bool | None = None, processing_elapsed_seconds: int | None = None, ) -> None: if estimated_total is not None and estimated_total > 0 and is_queued is False: pe = processing_elapsed_seconds if processing_elapsed_seconds is not None else elapsed_seconds remaining = max(0, int(estimated_total) - int(pe)) time_line = f"Time elapsed: {int(elapsed_seconds)}s (~{remaining}s remaining)" else: time_line = f"Time elapsed: {int(elapsed_seconds)}s" _display_text(node_cls, time_line, status=status, price=price) async def _diagnose_connectivity() -> dict[str, bool]: """Best-effort connectivity diagnostics to distinguish local vs. server issues.""" results = { "internet_accessible": False, "api_accessible": False, } timeout = aiohttp.ClientTimeout(total=5.0) async with aiohttp.ClientSession(timeout=timeout) as session: with contextlib.suppress(ClientError, OSError): async with session.get("https://www.google.com") as resp: results["internet_accessible"] = resp.status < 500 if not results["internet_accessible"]: return results parsed = urlparse(default_base_url()) health_url = f"{parsed.scheme}://{parsed.netloc}/health" with contextlib.suppress(ClientError, OSError): async with session.get(health_url) as resp: results["api_accessible"] = resp.status < 500 return results def _unpack_tuple(t: tuple) -> tuple[str, Any, str]: """Normalize (filename, value, content_type).""" if len(t) == 2: return t[0], t[1], "application/octet-stream" if len(t) == 3: return t[0], t[1], t[2] raise ValueError("files tuple must be (filename, file[, content_type])") def _merge_params(endpoint_params: dict[str, Any], method: str, data: dict[str, Any] | None) -> dict[str, Any]: params = dict(endpoint_params or {}) if method.upper() == "GET" and data: for k, v in data.items(): if v is not None: params[k] = v return params def _friendly_http_message(status: int, body: Any) -> str: if status == 401: return "Unauthorized: Please login first to use this node." if status == 402: return "Payment Required: Please add credits to your account to use this node." if status == 409: return "There is a problem with your account. Please contact support@comfy.org." if status == 429: return "Rate Limit Exceeded: The server returned 429 after all retry attempts. Please wait and try again." try: if isinstance(body, dict): err = body.get("error") if isinstance(err, dict): msg = err.get("message") typ = err.get("type") if msg and typ: return f"API Error: {msg} (Type: {typ})" if msg: return f"API Error: {msg}" return f"API Error: {json.dumps(body)}" else: txt = str(body) if len(txt) <= 200: return f"API Error (raw): {txt}" return f"API Error (status {status})" except Exception: return f"HTTP {status}: Unknown error" def _generate_operation_id(method: str, path: str, attempt: int) -> str: slug = path.strip("/").replace("/", "_") or "op" return f"{method}_{slug}_try{attempt}_{uuid.uuid4().hex[:8]}" def _snapshot_request_body_for_logging( content_type: str, method: str, data: dict[str, Any] | None, files: dict[str, Any] | list[tuple[str, Any]] | None, ) -> dict[str, Any] | str | None: if method.upper() == "GET": return None if content_type == "multipart/form-data": form_fields = sorted([k for k, v in (data or {}).items() if v is not None]) file_fields: list[dict[str, str]] = [] if files: file_iter = files if isinstance(files, list) else list(files.items()) for field_name, file_obj in file_iter: if file_obj is None: continue if isinstance(file_obj, tuple): filename = file_obj[0] else: filename = getattr(file_obj, "name", field_name) file_fields.append({"field": field_name, "filename": str(filename or "")}) return {"_multipart": True, "form_fields": form_fields, "file_fields": file_fields} if content_type == "application/x-www-form-urlencoded": return data or {} return data or {} async def _request_base(cfg: _RequestConfig, expect_binary: bool): """Core request with retries, per-second interruption monitoring, true cancellation, and friendly errors.""" url = cfg.endpoint.path parsed_url = urlparse(url) if not parsed_url.scheme and not parsed_url.netloc: # is URL relative? url = urljoin(default_base_url().rstrip("/") + "/", url.lstrip("/")) method = cfg.endpoint.method params = _merge_params(cfg.endpoint.query_params, method, cfg.data if method == "GET" else None) async def _monitor(stop_evt: asyncio.Event, start_ts: float): """Every second: update elapsed time and signal interruption.""" try: while not stop_evt.is_set(): if is_processing_interrupted(): return if cfg.monitor_progress: _display_time_progress( cfg.node_cls, cfg.wait_label, int(time.monotonic() - start_ts), cfg.estimated_total ) await asyncio.sleep(1.0) except asyncio.CancelledError: return # normal shutdown start_time = cfg.progress_origin_ts if cfg.progress_origin_ts is not None else time.monotonic() attempt = 0 delay = cfg.retry_delay rate_limit_attempts = 0 rate_limit_delay = cfg.retry_delay operation_succeeded: bool = False final_elapsed_seconds: int | None = None extracted_price: float | None = None while True: attempt += 1 stop_event = asyncio.Event() monitor_task: asyncio.Task | None = None sess: aiohttp.ClientSession | None = None operation_id = _generate_operation_id(method, cfg.endpoint.path, attempt) logging.debug("[DEBUG] HTTP %s %s (attempt %d)", method, url, attempt) payload_headers = {"Accept": "*/*"} if expect_binary else {"Accept": "application/json"} if not parsed_url.scheme and not parsed_url.netloc: # is URL relative? payload_headers.update(get_auth_header(cfg.node_cls)) if cfg.endpoint.headers: payload_headers.update(cfg.endpoint.headers) payload_kw: dict[str, Any] = {"headers": payload_headers} if method == "GET": payload_headers.pop("Content-Type", None) request_body_log = _snapshot_request_body_for_logging(cfg.content_type, method, cfg.data, cfg.files) try: if cfg.monitor_progress: monitor_task = asyncio.create_task(_monitor(stop_event, start_time)) timeout = aiohttp.ClientTimeout(total=cfg.timeout) sess = aiohttp.ClientSession(timeout=timeout) if cfg.content_type == "multipart/form-data" and method != "GET": # aiohttp will set Content-Type boundary; remove any fixed Content-Type payload_headers.pop("Content-Type", None) if cfg.multipart_parser and cfg.data: form = cfg.multipart_parser(cfg.data) if not isinstance(form, aiohttp.FormData): raise ValueError("multipart_parser must return aiohttp.FormData") else: form = aiohttp.FormData(default_to_multipart=True) if cfg.data: for k, v in cfg.data.items(): if v is None: continue form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v) if cfg.files: file_iter = cfg.files if isinstance(cfg.files, list) else cfg.files.items() for field_name, file_obj in file_iter: if file_obj is None: continue if isinstance(file_obj, tuple): filename, file_value, content_type = _unpack_tuple(file_obj) else: filename = getattr(file_obj, "name", field_name) file_value = file_obj content_type = "application/octet-stream" # Attempt to rewind BytesIO for retries if isinstance(file_value, BytesIO): with contextlib.suppress(Exception): file_value.seek(0) form.add_field(field_name, file_value, filename=filename, content_type=content_type) payload_kw["data"] = form elif cfg.content_type == "application/x-www-form-urlencoded" and method != "GET": payload_headers["Content-Type"] = "application/x-www-form-urlencoded" payload_kw["data"] = cfg.data or {} elif method != "GET": payload_headers["Content-Type"] = "application/json" payload_kw["json"] = cfg.data or {} request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, request_headers=dict(payload_headers) if payload_headers else None, request_params=dict(params) if params else None, request_data=request_body_log, ) req_coro = sess.request(method, url, params=params, **payload_kw) req_task = asyncio.create_task(req_coro) # Race: request vs. monitor (interruption) tasks = {req_task} if monitor_task: tasks.add(monitor_task) done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) if monitor_task and monitor_task in done: # Interrupted – cancel the request and abort if req_task in pending: req_task.cancel() raise ProcessingInterrupted("Task cancelled") # Otherwise, request finished resp = await req_task async with resp: if resp.status >= 400: try: body = await resp.json() except (ContentTypeError, json.JSONDecodeError): body = await resp.text() should_retry = False wait_time = 0.0 retry_label = "" is_rl = resp.status == 429 or ( cfg.is_rate_limited is not None and cfg.is_rate_limited(resp.status, body) ) if is_rl and rate_limit_attempts < cfg.max_retries_on_rate_limit: rate_limit_attempts += 1 wait_time = min(rate_limit_delay, 30.0) rate_limit_delay *= cfg.retry_backoff retry_label = f"rate-limit retry {rate_limit_attempts} of {cfg.max_retries_on_rate_limit}" should_retry = True elif resp.status in _RETRY_STATUS and (attempt - rate_limit_attempts) <= cfg.max_retries: wait_time = delay delay *= cfg.retry_backoff retry_label = f"retry {attempt - rate_limit_attempts} of {cfg.max_retries}" should_retry = True if should_retry: logging.warning( "HTTP %s %s -> %s. Waiting %.2fs (%s).", method, url, resp.status, wait_time, retry_label, ) request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, response_status_code=resp.status, response_headers=dict(resp.headers), response_content=body, error_message=f"HTTP {resp.status} ({retry_label}, will retry in {wait_time:.1f}s)", ) await sleep_with_interrupt( wait_time, cfg.node_cls, cfg.wait_label if cfg.monitor_progress else None, start_time if cfg.monitor_progress else None, cfg.estimated_total, display_callback=_display_time_progress if cfg.monitor_progress else None, ) continue msg = _friendly_http_message(resp.status, body) request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, response_status_code=resp.status, response_headers=dict(resp.headers), response_content=body, error_message=msg, ) raise Exception(msg) if expect_binary: buff = bytearray() last_tick = time.monotonic() async for chunk in resp.content.iter_chunked(64 * 1024): buff.extend(chunk) now = time.monotonic() if now - last_tick >= 1.0: last_tick = now if is_processing_interrupted(): raise ProcessingInterrupted("Task cancelled") if cfg.monitor_progress: _display_time_progress( cfg.node_cls, cfg.wait_label, int(now - start_time), cfg.estimated_total ) bytes_payload = bytes(buff) operation_succeeded = True final_elapsed_seconds = int(time.monotonic() - start_time) request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, response_status_code=resp.status, response_headers=dict(resp.headers), response_content=bytes_payload, ) return bytes_payload else: try: payload = await resp.json() response_content_to_log: Any = payload except (ContentTypeError, json.JSONDecodeError): text = await resp.text() try: payload = json.loads(text) if text else {} except json.JSONDecodeError: payload = {"_raw": text} response_content_to_log = payload if isinstance(payload, dict) else text with contextlib.suppress(Exception): extracted_price = cfg.price_extractor(payload) if cfg.price_extractor else None operation_succeeded = True final_elapsed_seconds = int(time.monotonic() - start_time) request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, response_status_code=resp.status, response_headers=dict(resp.headers), response_content=response_content_to_log, ) return payload except ProcessingInterrupted: logging.debug("Polling was interrupted by user") raise except (ClientError, OSError) as e: if (attempt - rate_limit_attempts) <= cfg.max_retries: logging.warning( "Connection error calling %s %s. Retrying in %.2fs (%d/%d): %s", method, url, delay, attempt - rate_limit_attempts, cfg.max_retries, str(e), ) request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, request_headers=dict(payload_headers) if payload_headers else None, request_params=dict(params) if params else None, request_data=request_body_log, error_message=f"{type(e).__name__}: {str(e)} (will retry)", ) await sleep_with_interrupt( delay, cfg.node_cls, cfg.wait_label if cfg.monitor_progress else None, start_time if cfg.monitor_progress else None, cfg.estimated_total, display_callback=_display_time_progress if cfg.monitor_progress else None, ) delay *= cfg.retry_backoff continue diag = await _diagnose_connectivity() if not diag["internet_accessible"]: request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, request_headers=dict(payload_headers) if payload_headers else None, request_params=dict(params) if params else None, request_data=request_body_log, error_message=f"LocalNetworkError: {str(e)}", ) raise LocalNetworkError( "Unable to connect to the API server due to local network issues. " "Please check your internet connection and try again." ) from e request_logger.log_request_response( operation_id=operation_id, request_method=method, request_url=url, request_headers=dict(payload_headers) if payload_headers else None, request_params=dict(params) if params else None, request_data=request_body_log, error_message=f"ApiServerError: {str(e)}", ) raise ApiServerError( f"The API server at {default_base_url()} is currently unreachable. " f"The service may be experiencing issues." ) from e finally: stop_event.set() if monitor_task: monitor_task.cancel() with contextlib.suppress(Exception): await monitor_task if sess: with contextlib.suppress(Exception): await sess.close() if operation_succeeded and cfg.monitor_progress and cfg.final_label_on_success: _display_time_progress( cfg.node_cls, status=cfg.final_label_on_success, elapsed_seconds=( final_elapsed_seconds if final_elapsed_seconds is not None else int(time.monotonic() - start_time) ), estimated_total=cfg.estimated_total, price=extracted_price, is_queued=False, processing_elapsed_seconds=final_elapsed_seconds, ) def _validate_or_raise(response_model: type[M], payload: Any) -> M: try: return response_model.model_validate(payload) except Exception as e: logging.error( "Response validation failed for %s: %s", getattr(response_model, "__name__", response_model), e, ) raise Exception( f"Response validation failed for {getattr(response_model, '__name__', response_model)}: {e}" ) from e def _wrap_model_extractor( response_model: type[M], extractor: Callable[[M], Any] | None, ) -> Callable[[dict[str, Any]], Any] | None: """Wrap a typed extractor so it can be used by the dict-based poller. Validates the dict into `response_model` before invoking `extractor`. Uses a small per-wrapper cache keyed by `id(dict)` to avoid re-validating the same response for multiple extractors in a single poll attempt. """ if extractor is None: return None _cache: dict[int, M] = {} def _wrapped(d: dict[str, Any]) -> Any: try: key = id(d) model = _cache.get(key) if model is None: model = response_model.model_validate(d) _cache[key] = model return extractor(model) except Exception as e: logging.error("Extractor failed (typed -> dict wrapper): %s", e) raise return _wrapped def _normalize_statuses(values: Iterable[str | int] | None) -> set[str | int]: if not values: return set() out: set[str | int] = set() for v in values: nv = _normalize_status_value(v) if nv is not None: out.add(nv) return out def _normalize_status_value(val: str | int | None) -> str | int | None: if isinstance(val, str): return val.strip().lower() return val
{ "repo_id": "Comfy-Org/ComfyUI", "file_path": "comfy_api_nodes/util/client.py", "license": "GNU General Public License v3.0", "lines": 868, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex