|
|
|
|
|
|
|
|
import torch
|
|
|
import logging
|
|
|
from comfy.utils import common_upscale
|
|
|
|
|
|
|
|
|
ASPECT_RATIO_512 = {
|
|
|
"1:1": [512, 512],
|
|
|
"4:3": [512, 384],
|
|
|
"3:4": [384, 512],
|
|
|
"16:9": [512, 288],
|
|
|
"9:16": [288, 512],
|
|
|
"21:9": [512, 219],
|
|
|
"9:21": [219, 512]
|
|
|
}
|
|
|
|
|
|
def get_closest_ratio(height, width, ratios):
|
|
|
"""Find the closest aspect ratio bucket for given dimensions"""
|
|
|
input_ratio = height / width
|
|
|
closest_ratio = None
|
|
|
closest_diff = float('inf')
|
|
|
closest_size = None
|
|
|
|
|
|
for ratio_name, size in ratios.items():
|
|
|
target_ratio = size[0] / size[1]
|
|
|
diff = abs(input_ratio - target_ratio)
|
|
|
if diff < closest_diff:
|
|
|
closest_diff = diff
|
|
|
closest_ratio = ratio_name
|
|
|
closest_size = size
|
|
|
|
|
|
return closest_size, closest_ratio
|
|
|
|
|
|
class ResizeToClosetBucket:
|
|
|
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
|
|
|
|
|
@classmethod
|
|
|
def INPUT_TYPES(cls):
|
|
|
return {
|
|
|
"required": {
|
|
|
"images": ("IMAGE", ),
|
|
|
"base_resolution": ("INT", {
|
|
|
"min": 64,
|
|
|
"max": 1280,
|
|
|
"step": 64,
|
|
|
"default": 512,
|
|
|
"tooltip": "Base resolution: In bucket mode, chooses closest training bucket; In proportional mode, targets longest side."
|
|
|
}),
|
|
|
"mode": (["bucket", "proportional"], {
|
|
|
"default": "bucket",
|
|
|
"tooltip": "Bucket: Use predefined aspect ratios; Proportional: Maintain original proportions"
|
|
|
}),
|
|
|
"upscale_method": (cls.upscale_methods, {
|
|
|
"default": "lanczos",
|
|
|
"tooltip": "Upscale method to use"
|
|
|
}),
|
|
|
"crop": (["disabled", "center"],),
|
|
|
}
|
|
|
}
|
|
|
|
|
|
RETURN_TYPES = ("IMAGE", "INT", "INT")
|
|
|
RETURN_NAMES = ("images", "width", "height")
|
|
|
FUNCTION = "resize"
|
|
|
CATEGORY = "ImageProcessing"
|
|
|
|
|
|
def resize(self, images, base_resolution, mode, upscale_method, crop):
|
|
|
"""
|
|
|
Resize images either to closest bucket or maintaining proportions
|
|
|
|
|
|
Args:
|
|
|
images: Input tensor of shape (B, H, W, C)
|
|
|
base_resolution: Target resolution
|
|
|
mode: "bucket" or "proportional"
|
|
|
upscale_method: Method to use for upscaling
|
|
|
crop: Whether to center crop or not
|
|
|
|
|
|
Returns:
|
|
|
tuple: (resized_images, width, height)
|
|
|
"""
|
|
|
|
|
|
B, H, W, C = images.shape
|
|
|
aspect_ratio = H / W
|
|
|
|
|
|
if mode == "bucket":
|
|
|
|
|
|
aspect_ratio_sample_size = {
|
|
|
key: [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]]
|
|
|
for key in ASPECT_RATIO_512.keys()
|
|
|
}
|
|
|
closest_size, closest_ratio = get_closest_ratio(H, W, ratios=aspect_ratio_sample_size)
|
|
|
height, width = [int(x / 16) * 16 for x in closest_size]
|
|
|
logging.info(f"Bucket mode - Closest bucket size: {width}x{height}")
|
|
|
|
|
|
else:
|
|
|
|
|
|
if H > W:
|
|
|
target_height = base_resolution
|
|
|
target_width = int(target_height / aspect_ratio)
|
|
|
else:
|
|
|
target_width = base_resolution
|
|
|
target_height = int(target_width * aspect_ratio)
|
|
|
|
|
|
height = int(round(target_height / 16) * 16)
|
|
|
width = int(round(target_width / 16) * 16)
|
|
|
height = max(64, height)
|
|
|
width = max(64, width)
|
|
|
logging.info(f"Proportional mode - Resized to: {width}x{height} (aspect ratio: {aspect_ratio:.2f})")
|
|
|
|
|
|
|
|
|
resized_images = images.clone().movedim(-1, 1)
|
|
|
resized_images = common_upscale(resized_images, width, height, upscale_method, crop)
|
|
|
resized_images = resized_images.movedim(1, -1)
|
|
|
|
|
|
return (resized_images, width, height)
|
|
|
|
|
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
|
"ResizeToClosetBucket": ResizeToClosetBucket
|
|
|
}
|
|
|
|
|
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
|
"ResizeToClosetBucket": "Resize to Closet Bucket or Proportional"
|
|
|
} |