Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +29 -0
- comfyui_layerstyle/image/image_auto_crop_node.jpg +3 -0
- comfyui_layerstyle/image/image_blend_advance_example.jpg +3 -0
- comfyui_layerstyle/image/image_blend_advance_node.jpg +3 -0
- comfyui_layerstyle/image/image_blend_example.jpg +3 -0
- comfyui_layerstyle/image/image_mask_scale_as_v2_example.jpg +3 -0
- comfyui_layerstyle/image/image_mask_scale_as_v2_node.jpg +3 -0
- comfyui_layerstyle/image/image_reel_composit_node.jpg +3 -0
- comfyui_layerstyle/image/image_reel_example.jpg +3 -0
- comfyui_layerstyle/image/image_remove_alpha_example.jpg +3 -0
- comfyui_layerstyle/image/image_scale_by_aspect_ratio_example.jpg +3 -0
- comfyui_layerstyle/image/image_scale_restore_example.jpg +3 -0
- comfyui_layerstyle/image/image_shift_example.jpg +3 -0
- comfyui_layerstyle/image/image_shift_node.jpg +3 -0
- comfyui_layerstyle/image/image_tagger_save_example.jpg +3 -0
- comfyui_layerstyle/image/inner_glow_example.jpg +3 -0
- comfyui_layerstyle/image/inner_shadow_example.jpg +3 -0
- comfyui_layerstyle/image/layer_image_transform_example.jpg +3 -0
- comfyui_layerstyle/image/layer_image_transform_node.jpg +3 -0
- comfyui_layerstyle/image/layer_mask_transform_node.jpg +3 -0
- comfyui_layerstyle/image/layercolor_title.jpg +3 -0
- comfyui_layerstyle/image/layerfilter_nodes.jpg +3 -0
- comfyui_layerstyle/image/layermask_nodes.jpg +3 -0
- comfyui_layerstyle/image/layerstyle_nodes.jpg +3 -0
- comfyui_layerstyle/image/layerstyle_title.jpg +3 -0
- comfyui_layerstyle/image/layerutility_nodes.jpg +3 -0
- comfyui_layerstyle/image/levels_example.jpg +3 -0
- comfyui_layerstyle/image/light_leak_example.jpg +3 -0
- comfyui_layerstyle/py/image_hub.py +152 -0
- comfyui_layerstyle/py/image_mask_scale_as.py +189 -0
- comfyui_layerstyle/py/image_opacity.py +87 -0
- comfyui_layerstyle/py/image_reel.py +224 -0
- comfyui_layerstyle/py/image_remove_alpha.py +64 -0
- comfyui_layerstyle/py/image_scale_by_aspect_ratio.py +158 -0
- comfyui_layerstyle/py/image_scale_by_aspect_ratio_v2.py +186 -0
- comfyui_layerstyle/py/image_scale_restore.py +112 -0
- comfyui_layerstyle/py/image_scale_restore_v2.py +132 -0
- comfyui_layerstyle/py/image_shift.py +88 -0
- comfyui_layerstyle/py/image_tagger_save.py +142 -0
- comfyui_layerstyle/py/image_to_mask.py +111 -0
- comfyui_layerstyle/py/inner_glow.py +107 -0
- comfyui_layerstyle/py/inner_glow_v2.py +114 -0
- comfyui_layerstyle/py/inner_shadow.py +102 -0
- comfyui_layerstyle/py/inner_shadow_v2.py +103 -0
- comfyui_layerstyle/py/layer_image_transform.py +94 -0
- comfyui_layerstyle/py/layer_mask_transform.py +81 -0
- comfyui_layerstyle/py/light_leak.py +85 -0
- comfyui_layerstyle/py/mask_box_detect.py +78 -0
- comfyui_layerstyle/py/mask_by_color.py +84 -0
- comfyui_layerstyle/py/mask_edge_shrink.py +81 -0
.gitattributes
CHANGED
|
@@ -153,3 +153,32 @@ comfyui_layerstyle/image/icmask_example.jpg filter=lfs diff=lfs merge=lfs -text
|
|
| 153 |
comfyui_layerstyle/image/if_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 154 |
comfyui_layerstyle/image/image_auto_crop_v2_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 155 |
comfyui_layerstyle/image/image_auto_crop_example.jpg filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
comfyui_layerstyle/image/if_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 154 |
comfyui_layerstyle/image/image_auto_crop_v2_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 155 |
comfyui_layerstyle/image/image_auto_crop_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 156 |
+
comfyui_layerstyle/image/image_auto_crop_v3_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 157 |
+
comfyui_layerstyle/image/image_auto_crop_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 158 |
+
comfyui_layerstyle/image/image_blend_advance_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 159 |
+
comfyui_layerstyle/image/image_blend_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 160 |
+
comfyui_layerstyle/image/image_blend_advance_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 161 |
+
comfyui_layerstyle/image/image_shift_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 162 |
+
comfyui_layerstyle/image/image_scale_restore_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 163 |
+
comfyui_layerstyle/image/image_shift_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 164 |
+
comfyui_layerstyle/image/image_tagger_save_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 165 |
+
comfyui_layerstyle/image/image_to_mask_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 166 |
+
comfyui_layerstyle/image/image_mask_scale_as_v2_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 167 |
+
comfyui_layerstyle/image/image_mask_scale_as_v2_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 168 |
+
comfyui_layerstyle/image/image_reel_composit_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 169 |
+
comfyui_layerstyle/image/image_remove_alpha_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 170 |
+
comfyui_layerstyle/image/image_reel_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 171 |
+
comfyui_layerstyle/image/layerutility_nodes.jpg filter=lfs diff=lfs merge=lfs -text
|
| 172 |
+
comfyui_layerstyle/image/image_scale_by_aspect_ratio_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 173 |
+
comfyui_layerstyle/image/layer_image_transform_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 174 |
+
comfyui_layerstyle/image/layerstyle_nodes.jpg filter=lfs diff=lfs merge=lfs -text
|
| 175 |
+
comfyui_layerstyle/image/layerstyle_title.jpg filter=lfs diff=lfs merge=lfs -text
|
| 176 |
+
comfyui_layerstyle/image/layer_image_transform_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 177 |
+
comfyui_layerstyle/image/layer_mask_transform_node.jpg filter=lfs diff=lfs merge=lfs -text
|
| 178 |
+
comfyui_layerstyle/image/levels_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 179 |
+
comfyui_layerstyle/image/inner_glow_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 180 |
+
comfyui_layerstyle/image/light_leak_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 181 |
+
comfyui_layerstyle/image/inner_shadow_example.jpg filter=lfs diff=lfs merge=lfs -text
|
| 182 |
+
comfyui_layerstyle/image/layercolor_title.jpg filter=lfs diff=lfs merge=lfs -text
|
| 183 |
+
comfyui_layerstyle/image/layermask_nodes.jpg filter=lfs diff=lfs merge=lfs -text
|
| 184 |
+
comfyui_layerstyle/image/layerfilter_nodes.jpg filter=lfs diff=lfs merge=lfs -text
|
comfyui_layerstyle/image/image_auto_crop_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_blend_advance_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_blend_advance_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_blend_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_mask_scale_as_v2_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_mask_scale_as_v2_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_reel_composit_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_reel_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_remove_alpha_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_scale_by_aspect_ratio_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_scale_restore_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_shift_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_shift_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/image_tagger_save_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/inner_glow_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/inner_shadow_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layer_image_transform_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layer_image_transform_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layer_mask_transform_node.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layercolor_title.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layerfilter_nodes.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layermask_nodes.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layerstyle_nodes.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layerstyle_title.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/layerutility_nodes.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/levels_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/image/light_leak_example.jpg
ADDED
|
Git LFS Details
|
comfyui_layerstyle/py/image_hub.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import random
|
| 3 |
+
from .imagefunc import log
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ImageHub:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'ImageHub'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"output": ("INT", {"default": 1, "min": 1, "max": 9, "step": 1}),
|
| 18 |
+
"random_output": ("BOOLEAN", {"default": False}),
|
| 19 |
+
},
|
| 20 |
+
"optional": {
|
| 21 |
+
"input1_image": ("IMAGE",),
|
| 22 |
+
"input1_mask": ("MASK",),
|
| 23 |
+
"input2_image": ("IMAGE",),
|
| 24 |
+
"input2_mask": ("MASK",),
|
| 25 |
+
"input3_image": ("IMAGE",),
|
| 26 |
+
"input3_mask": ("MASK",),
|
| 27 |
+
"input4_image": ("IMAGE",),
|
| 28 |
+
"input4_mask": ("MASK",),
|
| 29 |
+
"input5_image": ("IMAGE",),
|
| 30 |
+
"input5_mask": ("MASK",),
|
| 31 |
+
"input6_image": ("IMAGE",),
|
| 32 |
+
"input6_mask": ("MASK",),
|
| 33 |
+
"input7_image": ("IMAGE",),
|
| 34 |
+
"input7_mask": ("MASK",),
|
| 35 |
+
"input8_image": ("IMAGE",),
|
| 36 |
+
"input8_mask": ("MASK",),
|
| 37 |
+
"input9_image": ("IMAGE",),
|
| 38 |
+
"input9_mask": ("MASK",),
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
RETURN_TYPES = ("IMAGE", "MASK",)
|
| 43 |
+
RETURN_NAMES = ("image", "mask")
|
| 44 |
+
FUNCTION = 'image_hub'
|
| 45 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 46 |
+
|
| 47 |
+
def image_hub(self, output, random_output,
|
| 48 |
+
input1_image=None, input1_mask=None,
|
| 49 |
+
input2_image=None, input2_mask=None,
|
| 50 |
+
input3_image=None, input3_mask=None,
|
| 51 |
+
input4_image=None, input4_mask=None,
|
| 52 |
+
input5_image=None, input5_mask=None,
|
| 53 |
+
input6_image=None, input6_mask=None,
|
| 54 |
+
input7_image=None, input7_mask=None,
|
| 55 |
+
input8_image=None, input8_mask=None,
|
| 56 |
+
input9_image=None, input9_mask=None,
|
| 57 |
+
):
|
| 58 |
+
|
| 59 |
+
output_list = []
|
| 60 |
+
if input1_image is not None or input1_mask is not None:
|
| 61 |
+
output_list.append(1)
|
| 62 |
+
if input2_image is not None or input2_mask is not None:
|
| 63 |
+
output_list.append(2)
|
| 64 |
+
if input3_image is not None or input3_mask is not None:
|
| 65 |
+
output_list.append(3)
|
| 66 |
+
if input4_image is not None or input4_mask is not None:
|
| 67 |
+
output_list.append(4)
|
| 68 |
+
if input5_image is not None or input5_mask is not None:
|
| 69 |
+
output_list.append(5)
|
| 70 |
+
if input6_image is not None or input6_mask is not None:
|
| 71 |
+
output_list.append(6)
|
| 72 |
+
if input7_image is not None or input7_mask is not None:
|
| 73 |
+
output_list.append(7)
|
| 74 |
+
if input8_image is not None or input8_mask is not None:
|
| 75 |
+
output_list.append(8)
|
| 76 |
+
if input9_image is not None or input9_mask is not None:
|
| 77 |
+
output_list.append(9)
|
| 78 |
+
|
| 79 |
+
log(f"output_list={output_list}")
|
| 80 |
+
if len(output_list) == 0:
|
| 81 |
+
log(f"{self.NODE_NAME} is skip, because No Input.", message_type='error')
|
| 82 |
+
return (None, None)
|
| 83 |
+
|
| 84 |
+
if random_output:
|
| 85 |
+
index = random.randint(1, len(output_list))
|
| 86 |
+
output = output_list[index - 1]
|
| 87 |
+
|
| 88 |
+
ret_image = None
|
| 89 |
+
ret_mask = None
|
| 90 |
+
if output == 1:
|
| 91 |
+
if input1_image is not None:
|
| 92 |
+
ret_image = input1_image
|
| 93 |
+
if input1_mask is not None:
|
| 94 |
+
ret_mask = input1_mask
|
| 95 |
+
elif output == 2:
|
| 96 |
+
if input2_image is not None:
|
| 97 |
+
ret_image = input2_image
|
| 98 |
+
if input2_mask is not None:
|
| 99 |
+
ret_mask = input2_mask
|
| 100 |
+
elif output == 3:
|
| 101 |
+
if input3_image is not None:
|
| 102 |
+
ret_image = input3_image
|
| 103 |
+
if input3_mask is not None:
|
| 104 |
+
ret_mask = input3_mask
|
| 105 |
+
elif output == 4:
|
| 106 |
+
if input4_image is not None:
|
| 107 |
+
ret_image = input4_image
|
| 108 |
+
if input4_mask is not None:
|
| 109 |
+
ret_mask = input4_mask
|
| 110 |
+
elif output == 5:
|
| 111 |
+
if input5_image is not None:
|
| 112 |
+
ret_image = input5_image
|
| 113 |
+
if input5_mask is not None:
|
| 114 |
+
ret_mask = input5_mask
|
| 115 |
+
elif output == 6:
|
| 116 |
+
if input6_image is not None:
|
| 117 |
+
ret_image = input6_image
|
| 118 |
+
if input6_mask is not None:
|
| 119 |
+
ret_mask = input6_mask
|
| 120 |
+
elif output == 7:
|
| 121 |
+
if input7_image is not None:
|
| 122 |
+
ret_image = input7_image
|
| 123 |
+
if input7_mask is not None:
|
| 124 |
+
ret_mask = input7_mask
|
| 125 |
+
elif output == 8:
|
| 126 |
+
if input8_image is not None:
|
| 127 |
+
ret_image = input8_image
|
| 128 |
+
if input8_mask is not None:
|
| 129 |
+
ret_mask = input8_mask
|
| 130 |
+
else:
|
| 131 |
+
if input9_image is not None:
|
| 132 |
+
ret_image = input9_image
|
| 133 |
+
if input9_mask is not None:
|
| 134 |
+
ret_mask = input9_mask
|
| 135 |
+
|
| 136 |
+
if ret_image is None and ret_mask is None:
|
| 137 |
+
log(f"{self.NODE_NAME} have {output_list} inputs, output is {output}, but there is no corresponding input.", message_type="error")
|
| 138 |
+
elif ret_image is None:
|
| 139 |
+
log(f"{self.NODE_NAME} have {output_list} inputs, output is {output}, but image is None.", message_type='finish')
|
| 140 |
+
elif ret_mask is None:
|
| 141 |
+
log(f"{self.NODE_NAME} have {output_list} inputs, output is {output}, but mask is None.", message_type='finish')
|
| 142 |
+
else:
|
| 143 |
+
log(f"{self.NODE_NAME} have {output_list} inputs, output is {output}.", message_type='finish')
|
| 144 |
+
|
| 145 |
+
return (ret_image, ret_mask)
|
| 146 |
+
NODE_CLASS_MAPPINGS = {
|
| 147 |
+
"LayerUtility: ImageHub": ImageHub
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 151 |
+
"LayerUtility: ImageHub": "LayerUtility: ImageHub"
|
| 152 |
+
}
|
comfyui_layerstyle/py/image_mask_scale_as.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import AnyType, log, tensor2pil, pil2tensor, image2mask, fit_resize_image
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
any = AnyType("*")
|
| 8 |
+
|
| 9 |
+
class ImageMaskScaleAs:
|
| 10 |
+
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.NODE_NAME = 'ImageMaskScaleAs'
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def INPUT_TYPES(self):
|
| 16 |
+
|
| 17 |
+
fit_mode = ['letterbox', 'crop', 'fill']
|
| 18 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 19 |
+
|
| 20 |
+
return {
|
| 21 |
+
"required": {
|
| 22 |
+
"scale_as": (any, {}),
|
| 23 |
+
"fit": (fit_mode,),
|
| 24 |
+
"method": (method_mode,),
|
| 25 |
+
},
|
| 26 |
+
"optional": {
|
| 27 |
+
"image": ("IMAGE",), #
|
| 28 |
+
"mask": ("MASK",), #
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BOX", "INT", "INT")
|
| 33 |
+
RETURN_NAMES = ("image", "mask", "original_size", "widht", "height",)
|
| 34 |
+
FUNCTION = 'image_mask_scale_as'
|
| 35 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 36 |
+
|
| 37 |
+
def image_mask_scale_as(self, scale_as, fit, method,
|
| 38 |
+
image=None, mask = None,
|
| 39 |
+
):
|
| 40 |
+
if scale_as.shape[0] > 0:
|
| 41 |
+
_asimage = tensor2pil(scale_as[0])
|
| 42 |
+
else:
|
| 43 |
+
_asimage = tensor2pil(scale_as)
|
| 44 |
+
target_width, target_height = _asimage.size
|
| 45 |
+
_mask = Image.new('L', size=_asimage.size, color='black')
|
| 46 |
+
_image = Image.new('RGB', size=_asimage.size, color='black')
|
| 47 |
+
orig_width = 4
|
| 48 |
+
orig_height = 4
|
| 49 |
+
resize_sampler = Image.LANCZOS
|
| 50 |
+
if method == "bicubic":
|
| 51 |
+
resize_sampler = Image.BICUBIC
|
| 52 |
+
elif method == "hamming":
|
| 53 |
+
resize_sampler = Image.HAMMING
|
| 54 |
+
elif method == "bilinear":
|
| 55 |
+
resize_sampler = Image.BILINEAR
|
| 56 |
+
elif method == "box":
|
| 57 |
+
resize_sampler = Image.BOX
|
| 58 |
+
elif method == "nearest":
|
| 59 |
+
resize_sampler = Image.NEAREST
|
| 60 |
+
|
| 61 |
+
ret_images = []
|
| 62 |
+
ret_masks = []
|
| 63 |
+
|
| 64 |
+
if image is not None:
|
| 65 |
+
for i in image:
|
| 66 |
+
i = torch.unsqueeze(i, 0)
|
| 67 |
+
_image = tensor2pil(i).convert('RGB')
|
| 68 |
+
orig_width, orig_height = _image.size
|
| 69 |
+
_image = fit_resize_image(_image, target_width, target_height, fit, resize_sampler)
|
| 70 |
+
ret_images.append(pil2tensor(_image))
|
| 71 |
+
if mask is not None:
|
| 72 |
+
if mask.dim() == 2:
|
| 73 |
+
mask = torch.unsqueeze(mask, 0)
|
| 74 |
+
for m in mask:
|
| 75 |
+
m = torch.unsqueeze(m, 0)
|
| 76 |
+
_mask = tensor2pil(m).convert('L')
|
| 77 |
+
orig_width, orig_height = _mask.size
|
| 78 |
+
_mask = fit_resize_image(_mask, target_width, target_height, fit, resize_sampler).convert('L')
|
| 79 |
+
ret_masks.append(image2mask(_mask))
|
| 80 |
+
if len(ret_images) > 0 and len(ret_masks) >0:
|
| 81 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 82 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0), [orig_width, orig_height],target_width, target_height,)
|
| 83 |
+
elif len(ret_images) > 0 and len(ret_masks) == 0:
|
| 84 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 85 |
+
return (torch.cat(ret_images, dim=0), None, [orig_width, orig_height],target_width, target_height,)
|
| 86 |
+
elif len(ret_images) == 0 and len(ret_masks) > 0:
|
| 87 |
+
log(f"{self.NODE_NAME} Processed {len(ret_masks)} image(s).", message_type='finish')
|
| 88 |
+
return (None, torch.cat(ret_masks, dim=0), [orig_width, orig_height], target_width, target_height,)
|
| 89 |
+
else:
|
| 90 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available image or mask is not found.", message_type='error')
|
| 91 |
+
return (None, None, [orig_width, orig_height], 0, 0,)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class LS_ImageMaskScaleAsV2:
|
| 95 |
+
|
| 96 |
+
def __init__(self):
|
| 97 |
+
self.NODE_NAME = 'ImageMaskScaleAsV2'
|
| 98 |
+
|
| 99 |
+
@classmethod
|
| 100 |
+
def INPUT_TYPES(self):
|
| 101 |
+
|
| 102 |
+
fit_mode = ['letterbox', 'crop', 'fill']
|
| 103 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 104 |
+
|
| 105 |
+
return {
|
| 106 |
+
"required": {
|
| 107 |
+
"scale_as": (any, {}),
|
| 108 |
+
"fit": (fit_mode,),
|
| 109 |
+
"method": (method_mode,),
|
| 110 |
+
"background_color": ("STRING", {"default": "#FFFFFF"},),
|
| 111 |
+
},
|
| 112 |
+
"optional": {
|
| 113 |
+
"image": ("IMAGE",), #
|
| 114 |
+
"mask": ("MASK",), #
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BOX", "INT", "INT")
|
| 119 |
+
RETURN_NAMES = ("image", "mask", "original_size", "widht", "height",)
|
| 120 |
+
FUNCTION = 'image_mask_scale_as_v2'
|
| 121 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 122 |
+
|
| 123 |
+
def image_mask_scale_as_v2(self, scale_as, fit, method, background_color,
|
| 124 |
+
image=None, mask=None,
|
| 125 |
+
):
|
| 126 |
+
if scale_as.shape[0] > 0:
|
| 127 |
+
_asimage = tensor2pil(scale_as[0])
|
| 128 |
+
else:
|
| 129 |
+
_asimage = tensor2pil(scale_as)
|
| 130 |
+
target_width, target_height = _asimage.size
|
| 131 |
+
_mask = Image.new('L', size=_asimage.size, color='black')
|
| 132 |
+
_image = Image.new('RGB', size=_asimage.size, color=background_color)
|
| 133 |
+
orig_width = 4
|
| 134 |
+
orig_height = 4
|
| 135 |
+
resize_sampler = Image.LANCZOS
|
| 136 |
+
if method == "bicubic":
|
| 137 |
+
resize_sampler = Image.BICUBIC
|
| 138 |
+
elif method == "hamming":
|
| 139 |
+
resize_sampler = Image.HAMMING
|
| 140 |
+
elif method == "bilinear":
|
| 141 |
+
resize_sampler = Image.BILINEAR
|
| 142 |
+
elif method == "box":
|
| 143 |
+
resize_sampler = Image.BOX
|
| 144 |
+
elif method == "nearest":
|
| 145 |
+
resize_sampler = Image.NEAREST
|
| 146 |
+
|
| 147 |
+
ret_images = []
|
| 148 |
+
ret_masks = []
|
| 149 |
+
|
| 150 |
+
if image is not None:
|
| 151 |
+
for i in image:
|
| 152 |
+
i = torch.unsqueeze(i, 0)
|
| 153 |
+
_image = tensor2pil(i).convert('RGB')
|
| 154 |
+
orig_width, orig_height = _image.size
|
| 155 |
+
_image = fit_resize_image(_image, target_width, target_height, fit, resize_sampler, background_color=background_color)
|
| 156 |
+
ret_images.append(pil2tensor(_image))
|
| 157 |
+
if mask is not None:
|
| 158 |
+
if mask.dim() == 2:
|
| 159 |
+
mask = torch.unsqueeze(mask, 0)
|
| 160 |
+
for m in mask:
|
| 161 |
+
m = torch.unsqueeze(m, 0)
|
| 162 |
+
_mask = tensor2pil(m).convert('L')
|
| 163 |
+
orig_width, orig_height = _mask.size
|
| 164 |
+
_mask = fit_resize_image(_mask, target_width, target_height, fit, resize_sampler, background_color=background_color).convert('L')
|
| 165 |
+
ret_masks.append(image2mask(_mask))
|
| 166 |
+
if len(ret_images) > 0 and len(ret_masks) > 0:
|
| 167 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 168 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0), [orig_width, orig_height], target_width,
|
| 169 |
+
target_height,)
|
| 170 |
+
elif len(ret_images) > 0 and len(ret_masks) == 0:
|
| 171 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 172 |
+
return (torch.cat(ret_images, dim=0), None, [orig_width, orig_height], target_width, target_height,)
|
| 173 |
+
elif len(ret_images) == 0 and len(ret_masks) > 0:
|
| 174 |
+
log(f"{self.NODE_NAME} Processed {len(ret_masks)} image(s).", message_type='finish')
|
| 175 |
+
return (None, torch.cat(ret_masks, dim=0), [orig_width, orig_height], target_width, target_height,)
|
| 176 |
+
else:
|
| 177 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available image or mask is not found.",
|
| 178 |
+
message_type='error')
|
| 179 |
+
return (None, None, [orig_width, orig_height], 0, 0,)
|
| 180 |
+
|
| 181 |
+
NODE_CLASS_MAPPINGS = {
|
| 182 |
+
"LayerUtility: ImageMaskScaleAs": ImageMaskScaleAs,
|
| 183 |
+
"LayerUtility: ImageMaskScaleAsV2": LS_ImageMaskScaleAsV2,
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 187 |
+
"LayerUtility: ImageMaskScaleAs": "LayerUtility: Image Mask Scale As",
|
| 188 |
+
"LayerUtility: ImageMaskScaleAsV2": "LayerUtility: Image Mask Scale As V2",
|
| 189 |
+
}
|
comfyui_layerstyle/py/image_opacity.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageChops
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ImageOpacity:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'ImageOpacity'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"image": ("IMAGE", ), #
|
| 18 |
+
"opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度
|
| 19 |
+
"invert_mask": ("BOOLEAN", {"default": True}), # 反转mask
|
| 20 |
+
},
|
| 21 |
+
"optional": {
|
| 22 |
+
"mask": ("MASK",), #
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
RETURN_TYPES = ("IMAGE", "MASK",)
|
| 27 |
+
RETURN_NAMES = ("image", "mask",)
|
| 28 |
+
FUNCTION = 'image_opacity'
|
| 29 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 30 |
+
|
| 31 |
+
def image_opacity(self, image, opacity, invert_mask,
|
| 32 |
+
mask=None,
|
| 33 |
+
):
|
| 34 |
+
|
| 35 |
+
ret_images = []
|
| 36 |
+
ret_masks = []
|
| 37 |
+
l_images = []
|
| 38 |
+
l_masks = []
|
| 39 |
+
for l in image:
|
| 40 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 41 |
+
m = tensor2pil(l)
|
| 42 |
+
if m.mode == 'RGBA':
|
| 43 |
+
l_masks.append(m.split()[-1])
|
| 44 |
+
else:
|
| 45 |
+
l_masks.append(Image.new('L', size=m.size, color='white'))
|
| 46 |
+
|
| 47 |
+
if mask is not None:
|
| 48 |
+
if mask.dim() == 2:
|
| 49 |
+
mask = torch.unsqueeze(mask, 0)
|
| 50 |
+
l_masks = []
|
| 51 |
+
for m in mask:
|
| 52 |
+
if invert_mask:
|
| 53 |
+
m = 1 - m
|
| 54 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 55 |
+
|
| 56 |
+
max_batch = max(len(l_images), len(l_masks))
|
| 57 |
+
|
| 58 |
+
for i in range(max_batch):
|
| 59 |
+
_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 60 |
+
_image = tensor2pil(_image)
|
| 61 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 62 |
+
if invert_mask:
|
| 63 |
+
_color = Image.new("L", _image.size, color=('white'))
|
| 64 |
+
_mask = ImageChops.invert(_mask)
|
| 65 |
+
else:
|
| 66 |
+
_color = Image.new("L", _image.size, color=('black'))
|
| 67 |
+
|
| 68 |
+
alpha = 1 - opacity / 100.0
|
| 69 |
+
ret_mask = Image.blend(_mask, _color, alpha)
|
| 70 |
+
R, G, B, = _image.convert('RGB').split()
|
| 71 |
+
if invert_mask:
|
| 72 |
+
ret_mask = ImageChops.invert(ret_mask)
|
| 73 |
+
ret_image = Image.merge('RGBA', (R, G, B, ret_mask))
|
| 74 |
+
|
| 75 |
+
ret_images.append(pil2tensor(ret_image))
|
| 76 |
+
ret_masks.append(image2mask(ret_mask))
|
| 77 |
+
|
| 78 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 79 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),)
|
| 80 |
+
|
| 81 |
+
NODE_CLASS_MAPPINGS = {
|
| 82 |
+
"LayerUtility: ImageOpacity": ImageOpacity
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 86 |
+
"LayerUtility: ImageOpacity": "LayerUtility: ImageOpacity"
|
| 87 |
+
}
|
comfyui_layerstyle/py/image_reel.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageFont, ImageDraw
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, gaussian_blur, adjust_levels, get_resource_dir
|
| 4 |
+
|
| 5 |
+
class ImageReelPipeline:
|
| 6 |
+
def __init__(self):
|
| 7 |
+
self.image = None
|
| 8 |
+
self.texts = {}
|
| 9 |
+
self.reel_height = 0
|
| 10 |
+
self.reel_border = 0
|
| 11 |
+
|
| 12 |
+
Reel = ImageReelPipeline()
|
| 13 |
+
class ImageReel:
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.NODE_NAME = 'ImageReel'
|
| 17 |
+
|
| 18 |
+
@classmethod
|
| 19 |
+
def INPUT_TYPES(self):
|
| 20 |
+
return {
|
| 21 |
+
"required": {
|
| 22 |
+
"image1": ("IMAGE",),
|
| 23 |
+
"image1_text": ("STRING", {"multiline": False, "default": "image1"}),
|
| 24 |
+
"image2_text": ("STRING", {"multiline": False, "default": "image2"}),
|
| 25 |
+
"image3_text": ("STRING", {"multiline": False, "default": "image3"}),
|
| 26 |
+
"image4_text": ("STRING", {"multiline": False, "default": "image4"}),
|
| 27 |
+
"reel_height": ("INT", {"default": 512, "min": 64, "max": 2048}),
|
| 28 |
+
"border": ("INT", {"default": 32, "min": 8, "max": 512}),
|
| 29 |
+
},
|
| 30 |
+
"optional": {
|
| 31 |
+
"image2": ("IMAGE",),
|
| 32 |
+
"image3": ("IMAGE",),
|
| 33 |
+
"image4": ("IMAGE",),
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
RETURN_TYPES = ("Reel",)
|
| 38 |
+
RETURN_NAMES = ("reel",)
|
| 39 |
+
FUNCTION = 'image_reel'
|
| 40 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 41 |
+
|
| 42 |
+
def image_reel(self, image1, image1_text, image2_text, image3_text, image4_text,
|
| 43 |
+
reel_height, border,
|
| 44 |
+
image2=None, image3=None, image4=None,):
|
| 45 |
+
|
| 46 |
+
image_list = []
|
| 47 |
+
texts = []
|
| 48 |
+
for img in image1:
|
| 49 |
+
i = self.resize_image_to_height(tensor2pil(img.unsqueeze(0)),reel_height)
|
| 50 |
+
image_list.append(i)
|
| 51 |
+
texts.append([image1_text,i.width])
|
| 52 |
+
if image2 is not None:
|
| 53 |
+
for img in image2:
|
| 54 |
+
i = self.resize_image_to_height(tensor2pil(img.unsqueeze(0)),reel_height)
|
| 55 |
+
image_list.append(i)
|
| 56 |
+
texts.append([image2_text,i.width])
|
| 57 |
+
if image3 is not None:
|
| 58 |
+
for img in image3:
|
| 59 |
+
i = self.resize_image_to_height(tensor2pil(img.unsqueeze(0)),reel_height)
|
| 60 |
+
image_list.append(i)
|
| 61 |
+
texts.append([image3_text,i.width])
|
| 62 |
+
if image4 is not None:
|
| 63 |
+
for img in image4:
|
| 64 |
+
i = self.resize_image_to_height(tensor2pil(img.unsqueeze(0)),reel_height)
|
| 65 |
+
image_list.append(i)
|
| 66 |
+
texts.append([image4_text,i.width])
|
| 67 |
+
|
| 68 |
+
reel = ImageReel()
|
| 69 |
+
reel.image = self.draw_reel_image(image_list, border, reel_height)
|
| 70 |
+
reel.texts = texts
|
| 71 |
+
reel.reel_height = reel_height
|
| 72 |
+
reel.reel_border = border
|
| 73 |
+
return (reel,)
|
| 74 |
+
|
| 75 |
+
def resize_image_to_height(self, image, target_height) -> Image:
|
| 76 |
+
w = int(target_height / image.height * image.width)
|
| 77 |
+
return image.resize((w, target_height), Image.LANCZOS)
|
| 78 |
+
|
| 79 |
+
def draw_reel_image(self, image_list, border, reel_height) -> Image:
|
| 80 |
+
reel_width = 0
|
| 81 |
+
for img in image_list:
|
| 82 |
+
reel_width += img.width + border
|
| 83 |
+
reel_img = Image.new('RGBA', (reel_width, reel_height + border), color=(0, 0, 0, 0))
|
| 84 |
+
#paste images
|
| 85 |
+
w = border // 2
|
| 86 |
+
for img in image_list:
|
| 87 |
+
reel_img.paste(img, (w, border // 2))
|
| 88 |
+
w += img.width + border
|
| 89 |
+
return reel_img
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class ImageReelComposit:
|
| 93 |
+
|
| 94 |
+
def __init__(self):
|
| 95 |
+
self.NODE_NAME = 'ImageReelComposit'
|
| 96 |
+
(_, self.FONT_DICT) = get_resource_dir()
|
| 97 |
+
self.FONT_LIST = list(self.FONT_DICT.keys())
|
| 98 |
+
|
| 99 |
+
@classmethod
|
| 100 |
+
def INPUT_TYPES(self):
|
| 101 |
+
(LUT_DICT, FONT_DICT) = get_resource_dir()
|
| 102 |
+
FONT_LIST = list(FONT_DICT.keys())
|
| 103 |
+
LUT_LIST = list(LUT_DICT.keys())
|
| 104 |
+
|
| 105 |
+
color_theme_list = ['light', 'dark']
|
| 106 |
+
return {
|
| 107 |
+
"required": {
|
| 108 |
+
"reel_1": ("Reel",),
|
| 109 |
+
"font_file": (FONT_LIST,),
|
| 110 |
+
"font_size": ("INT", {"default": 40, "min": 4, "max": 1024}),
|
| 111 |
+
"border": ("INT", {"default": 32, "min": 8, "max": 512}),
|
| 112 |
+
"color_theme": (color_theme_list,),
|
| 113 |
+
},
|
| 114 |
+
"optional": {
|
| 115 |
+
"reel_2": ("Reel",),
|
| 116 |
+
"reel_3": ("Reel",),
|
| 117 |
+
"reel_4": ("Reel",),
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
RETURN_TYPES = ("IMAGE",)
|
| 122 |
+
RETURN_NAMES = ("image1",)
|
| 123 |
+
FUNCTION = 'image_reel_composit'
|
| 124 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 125 |
+
|
| 126 |
+
def image_reel_composit(self, reel_1, font_file, font_size, border, color_theme, reel_2=None, reel_3=None, reel_4=None,):
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
ret_images = []
|
| 130 |
+
|
| 131 |
+
if color_theme == 'light':
|
| 132 |
+
bg_color = "#E5E5E5"
|
| 133 |
+
text_color = "#121212"
|
| 134 |
+
else:
|
| 135 |
+
bg_color = "#121212"
|
| 136 |
+
text_color = "#E5E5E5"
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
font_space = int(font_size * 1.5)
|
| 140 |
+
width = reel_1.image.width
|
| 141 |
+
height = reel_1.image.height + font_space + border
|
| 142 |
+
if reel_2 is not None:
|
| 143 |
+
width = max(width, reel_2.image.width)
|
| 144 |
+
height += reel_2.image.height + font_space + border
|
| 145 |
+
if reel_3 is not None:
|
| 146 |
+
width = max(width, reel_3.image.width)
|
| 147 |
+
height += reel_3.image.height + font_space + border
|
| 148 |
+
if reel_4 is not None:
|
| 149 |
+
width = max(width, reel_4.image.width)
|
| 150 |
+
height += reel_4.image.height + font_space + border
|
| 151 |
+
|
| 152 |
+
ret_image = Image.new('RGB', (width, height), color=bg_color)
|
| 153 |
+
paste_y = 0
|
| 154 |
+
reel1_text_image = self.draw_reel_text(reel_1, font_file, font_size, text_color)
|
| 155 |
+
shadow_size = reel_1.image.height // 80
|
| 156 |
+
ret_image = self.paste_drop_shadow(ret_image, reel_1.image, reel1_text_image, ((width - reel_1.image.width) // 2, paste_y),
|
| 157 |
+
shadow_size, text_color)
|
| 158 |
+
|
| 159 |
+
paste_y += reel_1.image.height + font_space + border
|
| 160 |
+
if reel_2 is not None:
|
| 161 |
+
reel2_text_image = self.draw_reel_text(reel_2, font_file, font_size, text_color)
|
| 162 |
+
shadow_size = reel_2.image.height // 80
|
| 163 |
+
ret_image = self.paste_drop_shadow(ret_image, reel_2.image, reel2_text_image, ((width - reel_2.image.width) // 2, paste_y),
|
| 164 |
+
shadow_size, text_color)
|
| 165 |
+
paste_y += reel_2.image.height + font_space + border
|
| 166 |
+
if reel_3 is not None:
|
| 167 |
+
reel3_text_image = self.draw_reel_text(reel_3, font_file, font_size, text_color)
|
| 168 |
+
shadow_size = reel_3.image.height // 80
|
| 169 |
+
ret_image = self.paste_drop_shadow(ret_image, reel_3.image, reel3_text_image,((width - reel_3.image.width) // 2, paste_y),
|
| 170 |
+
shadow_size, text_color)
|
| 171 |
+
paste_y += reel_3.image.height + font_space + border
|
| 172 |
+
if reel_4 is not None:
|
| 173 |
+
reel4_text_image = self.draw_reel_text(reel_4, font_file, font_size, text_color)
|
| 174 |
+
shadow_size = reel_4.image.height // 80
|
| 175 |
+
ret_image = self.paste_drop_shadow(ret_image, reel_4.image, reel4_text_image,((width - reel_4.image.width) // 2, paste_y),
|
| 176 |
+
shadow_size, text_color)
|
| 177 |
+
|
| 178 |
+
ret_images.append(pil2tensor(ret_image))
|
| 179 |
+
|
| 180 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 181 |
+
return (torch.cat(ret_images, dim=0),)
|
| 182 |
+
|
| 183 |
+
def paste_drop_shadow(self, background_image, image, text_image, box, shadow_size, text_color) -> Image:
|
| 184 |
+
# drop shadow
|
| 185 |
+
_mask = image.split()[3]
|
| 186 |
+
_blured_mask = gaussian_blur(_mask, shadow_size//1.3)
|
| 187 |
+
_blured_mask = adjust_levels(_blured_mask, 0, 255, 0.5, 0, output_white=54).convert('L')
|
| 188 |
+
background_image.paste(Image.new('RGBA', image.size, color="black"), (box[0]+shadow_size, box[1]+shadow_size), mask=_blured_mask)
|
| 189 |
+
background_image.paste(image, box, mask=_mask)
|
| 190 |
+
background_image.paste(Image.new('RGB', text_image.size, color=text_color), (box[0], box[1] + image.height), mask=text_image.split()[3])
|
| 191 |
+
return background_image
|
| 192 |
+
|
| 193 |
+
def draw_reel_text(self, reel, font_file, font_size, text_color) -> Image:
|
| 194 |
+
|
| 195 |
+
font_path = self.FONT_DICT.get(font_file)
|
| 196 |
+
font = ImageFont.truetype(font_path, font_size)
|
| 197 |
+
texts = reel.texts
|
| 198 |
+
text_image = Image.new('RGBA', (reel.image.width, reel.reel_border + int(font_size * 1.5)), color=(0, 0, 0, 0))
|
| 199 |
+
draw = ImageDraw.Draw(text_image)
|
| 200 |
+
x = reel.reel_border
|
| 201 |
+
for t in texts:
|
| 202 |
+
text = t[0]
|
| 203 |
+
width = t[1]
|
| 204 |
+
text_width = font.getbbox(text)[2]
|
| 205 |
+
draw.text(
|
| 206 |
+
xy=(x + width // 2 - text_width//2, reel.reel_border//4),
|
| 207 |
+
text=text,
|
| 208 |
+
fill=text_color,
|
| 209 |
+
font=font,
|
| 210 |
+
)
|
| 211 |
+
x += width + reel.reel_border
|
| 212 |
+
return text_image
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
NODE_CLASS_MAPPINGS = {
|
| 217 |
+
"LayerUtility: ImageReel": ImageReel,
|
| 218 |
+
"LayerUtility: ImageReelComposit": ImageReelComposit
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 222 |
+
"LayerUtility: ImageReel": "LayerUtility: Image Reel",
|
| 223 |
+
"LayerUtility: ImageReelComposit": "LayerUtility: Image Reel Composit"
|
| 224 |
+
}
|
comfyui_layerstyle/py/image_remove_alpha.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ImageRemoveAlpha:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'ImageRemoveAlpha'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"RGBA_image": ("IMAGE", ), #
|
| 18 |
+
"fill_background": ("BOOLEAN", {"default": False}),
|
| 19 |
+
"background_color": ("STRING", {"default": "#000000"}),
|
| 20 |
+
},
|
| 21 |
+
"optional": {
|
| 22 |
+
"mask": ("MASK",), #
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
RETURN_TYPES = ("IMAGE", )
|
| 27 |
+
RETURN_NAMES = ("RGB_image", )
|
| 28 |
+
FUNCTION = 'image_remove_alpha'
|
| 29 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 30 |
+
|
| 31 |
+
def image_remove_alpha(self, RGBA_image, fill_background, background_color, mask=None):
|
| 32 |
+
|
| 33 |
+
ret_images = []
|
| 34 |
+
|
| 35 |
+
for index, img in enumerate(RGBA_image):
|
| 36 |
+
_image = tensor2pil(img)
|
| 37 |
+
|
| 38 |
+
if fill_background:
|
| 39 |
+
if mask is not None:
|
| 40 |
+
m = mask[index].unsqueeze(0) if index < len(mask) else mask[-1].unsqueeze(0)
|
| 41 |
+
alpha = tensor2pil(m).convert('L')
|
| 42 |
+
elif _image.mode == "RGBA":
|
| 43 |
+
alpha = _image.split()[-1]
|
| 44 |
+
else:
|
| 45 |
+
log(f"Error: {self.NODE_NAME} skipped, because the input image is not RGBA and mask is None.",
|
| 46 |
+
message_type='error')
|
| 47 |
+
return (RGBA_image,)
|
| 48 |
+
ret_image = Image.new('RGB', size=_image.size, color=background_color)
|
| 49 |
+
ret_image.paste(_image, mask=alpha)
|
| 50 |
+
ret_images.append(pil2tensor(ret_image))
|
| 51 |
+
|
| 52 |
+
else:
|
| 53 |
+
ret_images.append(pil2tensor(tensor2pil(img).convert('RGB')))
|
| 54 |
+
|
| 55 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 56 |
+
return (torch.cat(ret_images, dim=0), )
|
| 57 |
+
|
| 58 |
+
NODE_CLASS_MAPPINGS = {
|
| 59 |
+
"LayerUtility: ImageRemoveAlpha": ImageRemoveAlpha
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 63 |
+
"LayerUtility: ImageRemoveAlpha": "LayerUtility: ImageRemoveAlpha"
|
| 64 |
+
}
|
comfyui_layerstyle/py/image_scale_by_aspect_ratio.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import math
|
| 4 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, num_round_up_to_multiple, fit_resize_image
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ImageScaleByAspectRatio:
|
| 9 |
+
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.NODE_NAME = 'ImageScaleByAspectRatio'
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def INPUT_TYPES(self):
|
| 15 |
+
ratio_list = ['original', 'custom', '1:1', '3:2', '4:3', '16:9', '2:3', '3:4', '9:16']
|
| 16 |
+
fit_mode = ['letterbox', 'crop', 'fill']
|
| 17 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 18 |
+
multiple_list = ['8', '16', '32', '64', '128', '256', '512', 'None']
|
| 19 |
+
|
| 20 |
+
return {
|
| 21 |
+
"required": {
|
| 22 |
+
"aspect_ratio": (ratio_list,),
|
| 23 |
+
"proportional_width": ("INT", {"default": 2, "min": 1, "max": 999, "step": 1}),
|
| 24 |
+
"proportional_height": ("INT", {"default": 1, "min": 1, "max": 999, "step": 1}),
|
| 25 |
+
"fit": (fit_mode,),
|
| 26 |
+
"method": (method_mode,),
|
| 27 |
+
"round_to_multiple": (multiple_list,),
|
| 28 |
+
"scale_to_longest_side": ("BOOLEAN", {"default": False}), # 是否按长边缩放
|
| 29 |
+
"longest_side": ("INT", {"default": 1024, "min": 4, "max": 999999, "step": 1}),
|
| 30 |
+
},
|
| 31 |
+
"optional": {
|
| 32 |
+
"image": ("IMAGE",), #
|
| 33 |
+
"mask": ("MASK",), #
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BOX", "INT", "INT",)
|
| 38 |
+
RETURN_NAMES = ("image", "mask", "original_size", "width", "height",)
|
| 39 |
+
FUNCTION = 'image_scale_by_aspect_ratio'
|
| 40 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 41 |
+
|
| 42 |
+
def image_scale_by_aspect_ratio(self, aspect_ratio, proportional_width, proportional_height,
|
| 43 |
+
fit, method, round_to_multiple, scale_to_longest_side, longest_side,
|
| 44 |
+
image=None, mask = None,
|
| 45 |
+
):
|
| 46 |
+
orig_images = []
|
| 47 |
+
orig_masks = []
|
| 48 |
+
orig_width = 0
|
| 49 |
+
orig_height = 0
|
| 50 |
+
target_width = 0
|
| 51 |
+
target_height = 0
|
| 52 |
+
ratio = 1.0
|
| 53 |
+
ret_images = []
|
| 54 |
+
ret_masks = []
|
| 55 |
+
if image is not None:
|
| 56 |
+
for i in image:
|
| 57 |
+
i = torch.unsqueeze(i, 0)
|
| 58 |
+
orig_images.append(i)
|
| 59 |
+
orig_width, orig_height = tensor2pil(orig_images[0]).size
|
| 60 |
+
if mask is not None:
|
| 61 |
+
if mask.dim() == 2:
|
| 62 |
+
mask = torch.unsqueeze(mask, 0)
|
| 63 |
+
for m in mask:
|
| 64 |
+
m = torch.unsqueeze(m, 0)
|
| 65 |
+
orig_masks.append(m)
|
| 66 |
+
_width, _height = tensor2pil(orig_masks[0]).size
|
| 67 |
+
if (orig_width > 0 and orig_width != _width) or (orig_height > 0 and orig_height != _height):
|
| 68 |
+
log(f"Error: {self.NODE_NAME} skipped, because the mask is does'nt match image.", message_type='error')
|
| 69 |
+
return (None, None, None, 0, 0,)
|
| 70 |
+
elif orig_width + orig_height == 0:
|
| 71 |
+
orig_width = _width
|
| 72 |
+
orig_height = _height
|
| 73 |
+
|
| 74 |
+
if orig_width + orig_height == 0:
|
| 75 |
+
log(f"Error: {self.NODE_NAME} skipped, because the image or mask at least one must be input.", message_type='error')
|
| 76 |
+
return (None, None, None, 0, 0,)
|
| 77 |
+
|
| 78 |
+
if aspect_ratio == 'original':
|
| 79 |
+
ratio = orig_width / orig_height
|
| 80 |
+
elif aspect_ratio == 'custom':
|
| 81 |
+
ratio = proportional_width / proportional_height
|
| 82 |
+
else:
|
| 83 |
+
s = aspect_ratio.split(":")
|
| 84 |
+
ratio = int(s[0]) / int(s[1])
|
| 85 |
+
|
| 86 |
+
# calculate target width and height
|
| 87 |
+
if orig_width > orig_height:
|
| 88 |
+
if scale_to_longest_side:
|
| 89 |
+
target_width = longest_side
|
| 90 |
+
else:
|
| 91 |
+
target_width = orig_width
|
| 92 |
+
target_height = int(target_width / ratio)
|
| 93 |
+
else:
|
| 94 |
+
if scale_to_longest_side:
|
| 95 |
+
target_height = longest_side
|
| 96 |
+
else:
|
| 97 |
+
target_height = orig_height
|
| 98 |
+
target_width = int(target_height * ratio)
|
| 99 |
+
|
| 100 |
+
if ratio < 1:
|
| 101 |
+
if scale_to_longest_side:
|
| 102 |
+
_r = longest_side / target_height
|
| 103 |
+
target_height = longest_side
|
| 104 |
+
else:
|
| 105 |
+
_r = orig_height / target_height
|
| 106 |
+
target_height = orig_height
|
| 107 |
+
target_width = int(target_width * _r)
|
| 108 |
+
|
| 109 |
+
if round_to_multiple != 'None':
|
| 110 |
+
multiple = int(round_to_multiple)
|
| 111 |
+
target_width = num_round_up_to_multiple(target_width, multiple)
|
| 112 |
+
target_height = num_round_up_to_multiple(target_height, multiple)
|
| 113 |
+
|
| 114 |
+
_mask = Image.new('L', size=(target_width, target_height), color='black')
|
| 115 |
+
_image = Image.new('RGB', size=(target_width, target_height), color='black')
|
| 116 |
+
|
| 117 |
+
resize_sampler = Image.LANCZOS
|
| 118 |
+
if method == "bicubic":
|
| 119 |
+
resize_sampler = Image.BICUBIC
|
| 120 |
+
elif method == "hamming":
|
| 121 |
+
resize_sampler = Image.HAMMING
|
| 122 |
+
elif method == "bilinear":
|
| 123 |
+
resize_sampler = Image.BILINEAR
|
| 124 |
+
elif method == "box":
|
| 125 |
+
resize_sampler = Image.BOX
|
| 126 |
+
elif method == "nearest":
|
| 127 |
+
resize_sampler = Image.NEAREST
|
| 128 |
+
|
| 129 |
+
if len(orig_images) > 0:
|
| 130 |
+
for i in orig_images:
|
| 131 |
+
_image = tensor2pil(i).convert('RGB')
|
| 132 |
+
_image = fit_resize_image(_image, target_width, target_height, fit, resize_sampler)
|
| 133 |
+
ret_images.append(pil2tensor(_image))
|
| 134 |
+
if len(orig_masks) > 0:
|
| 135 |
+
for m in orig_masks:
|
| 136 |
+
_mask = tensor2pil(m).convert('L')
|
| 137 |
+
_mask = fit_resize_image(_mask, target_width, target_height, fit, resize_sampler).convert('L')
|
| 138 |
+
ret_masks.append(image2mask(_mask))
|
| 139 |
+
if len(ret_images) > 0 and len(ret_masks) >0:
|
| 140 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 141 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),[orig_width, orig_height], target_width, target_height,)
|
| 142 |
+
elif len(ret_images) > 0 and len(ret_masks) == 0:
|
| 143 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 144 |
+
return (torch.cat(ret_images, dim=0), None,[orig_width, orig_height], target_width, target_height,)
|
| 145 |
+
elif len(ret_images) == 0 and len(ret_masks) > 0:
|
| 146 |
+
log(f"{self.NODE_NAME} Processed {len(ret_masks)} image(s).", message_type='finish')
|
| 147 |
+
return (None, torch.cat(ret_masks, dim=0),[orig_width, orig_height], target_width, target_height,)
|
| 148 |
+
else:
|
| 149 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available image or mask is not found.", message_type='error')
|
| 150 |
+
return (None, None, None, 0, 0,)
|
| 151 |
+
|
| 152 |
+
NODE_CLASS_MAPPINGS = {
|
| 153 |
+
"LayerUtility: ImageScaleByAspectRatio": ImageScaleByAspectRatio
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 157 |
+
"LayerUtility: ImageScaleByAspectRatio": "LayerUtility: ImageScaleByAspectRatio"
|
| 158 |
+
}
|
comfyui_layerstyle/py/image_scale_by_aspect_ratio_v2.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import math
|
| 4 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, num_round_up_to_multiple, fit_resize_image, is_valid_mask
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ImageScaleByAspectRatioV2:
|
| 9 |
+
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.NODE_NAME = 'ImageScaleByAspectRatio V2'
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def INPUT_TYPES(self):
|
| 15 |
+
ratio_list = ['original', 'custom', '1:1', '3:2', '4:3', '16:9', '2:3', '3:4', '9:16']
|
| 16 |
+
fit_mode = ['letterbox', 'crop', 'fill']
|
| 17 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 18 |
+
multiple_list = ['8', '16', '32', '64', '128', '256', '512', 'None']
|
| 19 |
+
scale_to_list = ['None', 'longest', 'shortest', 'width', 'height', 'total_pixel(kilo pixel)']
|
| 20 |
+
return {
|
| 21 |
+
"required": {
|
| 22 |
+
"aspect_ratio": (ratio_list,),
|
| 23 |
+
"proportional_width": ("INT", {"default": 1, "min": 1, "max": 1e8, "step": 1}),
|
| 24 |
+
"proportional_height": ("INT", {"default": 1, "min": 1, "max": 1e8, "step": 1}),
|
| 25 |
+
"fit": (fit_mode,),
|
| 26 |
+
"method": (method_mode,),
|
| 27 |
+
"round_to_multiple": (multiple_list,),
|
| 28 |
+
"scale_to_side": (scale_to_list,), # 是否按长边缩放
|
| 29 |
+
"scale_to_length": ("INT", {"default": 1024, "min": 4, "max": 1e8, "step": 1}),
|
| 30 |
+
"background_color": ("STRING", {"default": "#000000"}), # 背景颜色
|
| 31 |
+
},
|
| 32 |
+
"optional": {
|
| 33 |
+
"image": ("IMAGE",), #
|
| 34 |
+
"mask": ("MASK",), #
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BOX", "INT", "INT",)
|
| 39 |
+
RETURN_NAMES = ("image", "mask", "original_size", "width", "height",)
|
| 40 |
+
FUNCTION = 'image_scale_by_aspect_ratio'
|
| 41 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 42 |
+
|
| 43 |
+
def image_scale_by_aspect_ratio(self, aspect_ratio, proportional_width, proportional_height,
|
| 44 |
+
fit, method, round_to_multiple, scale_to_side, scale_to_length,
|
| 45 |
+
background_color,
|
| 46 |
+
image=None, mask = None,
|
| 47 |
+
):
|
| 48 |
+
orig_images = []
|
| 49 |
+
orig_masks = []
|
| 50 |
+
orig_width = 0
|
| 51 |
+
orig_height = 0
|
| 52 |
+
target_width = 0
|
| 53 |
+
target_height = 0
|
| 54 |
+
ratio = 1.0
|
| 55 |
+
ret_images = []
|
| 56 |
+
ret_masks = []
|
| 57 |
+
if image is not None:
|
| 58 |
+
for i in image:
|
| 59 |
+
i = torch.unsqueeze(i, 0)
|
| 60 |
+
orig_images.append(i)
|
| 61 |
+
orig_width, orig_height = tensor2pil(orig_images[0]).size
|
| 62 |
+
if mask is not None:
|
| 63 |
+
if mask.dim() == 2:
|
| 64 |
+
mask = torch.unsqueeze(mask, 0)
|
| 65 |
+
for m in mask:
|
| 66 |
+
m = torch.unsqueeze(m, 0)
|
| 67 |
+
if not is_valid_mask(m) and m.shape==torch.Size([1,64,64]):
|
| 68 |
+
log(f"Warning: {self.NODE_NAME} input mask is empty, ignore it.", message_type='warning')
|
| 69 |
+
else:
|
| 70 |
+
orig_masks.append(m)
|
| 71 |
+
|
| 72 |
+
if len(orig_masks) > 0:
|
| 73 |
+
_width, _height = tensor2pil(orig_masks[0]).size
|
| 74 |
+
if (orig_width > 0 and orig_width != _width) or (orig_height > 0 and orig_height != _height):
|
| 75 |
+
log(f"Error: {self.NODE_NAME} execute failed, because the mask is does'nt match image.", message_type='error')
|
| 76 |
+
return (None, None, None, 0, 0,)
|
| 77 |
+
elif orig_width + orig_height == 0:
|
| 78 |
+
orig_width = _width
|
| 79 |
+
orig_height = _height
|
| 80 |
+
|
| 81 |
+
if orig_width + orig_height == 0:
|
| 82 |
+
log(f"Error: {self.NODE_NAME} execute failed, because the image or mask at least one must be input.", message_type='error')
|
| 83 |
+
return (None, None, None, 0, 0,)
|
| 84 |
+
|
| 85 |
+
if aspect_ratio == 'original':
|
| 86 |
+
ratio = orig_width / orig_height
|
| 87 |
+
elif aspect_ratio == 'custom':
|
| 88 |
+
ratio = proportional_width / proportional_height
|
| 89 |
+
else:
|
| 90 |
+
s = aspect_ratio.split(":")
|
| 91 |
+
ratio = int(s[0]) / int(s[1])
|
| 92 |
+
|
| 93 |
+
# calculate target width and height
|
| 94 |
+
if ratio > 1:
|
| 95 |
+
if scale_to_side == 'longest':
|
| 96 |
+
target_width = scale_to_length
|
| 97 |
+
target_height = int(target_width / ratio)
|
| 98 |
+
elif scale_to_side == 'shortest':
|
| 99 |
+
target_height = scale_to_length
|
| 100 |
+
target_width = int(target_height * ratio)
|
| 101 |
+
elif scale_to_side == 'width':
|
| 102 |
+
target_width = scale_to_length
|
| 103 |
+
target_height = int(target_width / ratio)
|
| 104 |
+
elif scale_to_side == 'height':
|
| 105 |
+
target_height = scale_to_length
|
| 106 |
+
target_width = int(target_height * ratio)
|
| 107 |
+
elif scale_to_side == 'total_pixel(kilo pixel)':
|
| 108 |
+
target_width = math.sqrt(ratio * scale_to_length * 1000)
|
| 109 |
+
target_height = target_width / ratio
|
| 110 |
+
target_width = int(target_width)
|
| 111 |
+
target_height = int(target_height)
|
| 112 |
+
else:
|
| 113 |
+
target_width = orig_width
|
| 114 |
+
target_height = int(target_width / ratio)
|
| 115 |
+
else:
|
| 116 |
+
if scale_to_side == 'longest':
|
| 117 |
+
target_height = scale_to_length
|
| 118 |
+
target_width = int(target_height * ratio)
|
| 119 |
+
elif scale_to_side == 'shortest':
|
| 120 |
+
target_width = scale_to_length
|
| 121 |
+
target_height = int(target_width / ratio)
|
| 122 |
+
elif scale_to_side == 'width':
|
| 123 |
+
target_width = scale_to_length
|
| 124 |
+
target_height = int(target_width / ratio)
|
| 125 |
+
elif scale_to_side == 'height':
|
| 126 |
+
target_height = scale_to_length
|
| 127 |
+
target_width = int(target_height * ratio)
|
| 128 |
+
elif scale_to_side == 'total_pixel(kilo pixel)':
|
| 129 |
+
target_width = math.sqrt(ratio * scale_to_length * 1000)
|
| 130 |
+
target_height = target_width / ratio
|
| 131 |
+
target_width = int(target_width)
|
| 132 |
+
target_height = int(target_height)
|
| 133 |
+
else:
|
| 134 |
+
target_height = orig_height
|
| 135 |
+
target_width = int(target_height * ratio)
|
| 136 |
+
|
| 137 |
+
if round_to_multiple != 'None':
|
| 138 |
+
multiple = int(round_to_multiple)
|
| 139 |
+
target_width = num_round_up_to_multiple(target_width, multiple)
|
| 140 |
+
target_height = num_round_up_to_multiple(target_height, multiple)
|
| 141 |
+
|
| 142 |
+
_mask = Image.new('L', size=(target_width, target_height), color='black')
|
| 143 |
+
_image = Image.new('RGB', size=(target_width, target_height), color='black')
|
| 144 |
+
|
| 145 |
+
resize_sampler = Image.LANCZOS
|
| 146 |
+
if method == "bicubic":
|
| 147 |
+
resize_sampler = Image.BICUBIC
|
| 148 |
+
elif method == "hamming":
|
| 149 |
+
resize_sampler = Image.HAMMING
|
| 150 |
+
elif method == "bilinear":
|
| 151 |
+
resize_sampler = Image.BILINEAR
|
| 152 |
+
elif method == "box":
|
| 153 |
+
resize_sampler = Image.BOX
|
| 154 |
+
elif method == "nearest":
|
| 155 |
+
resize_sampler = Image.NEAREST
|
| 156 |
+
|
| 157 |
+
if len(orig_images) > 0:
|
| 158 |
+
for i in orig_images:
|
| 159 |
+
_image = tensor2pil(i).convert('RGB')
|
| 160 |
+
_image = fit_resize_image(_image, target_width, target_height, fit, resize_sampler, background_color)
|
| 161 |
+
ret_images.append(pil2tensor(_image))
|
| 162 |
+
if len(orig_masks) > 0:
|
| 163 |
+
for m in orig_masks:
|
| 164 |
+
_mask = tensor2pil(m).convert('L')
|
| 165 |
+
_mask = fit_resize_image(_mask, target_width, target_height, fit, resize_sampler).convert('L')
|
| 166 |
+
ret_masks.append(image2mask(_mask))
|
| 167 |
+
if len(ret_images) > 0 and len(ret_masks) >0:
|
| 168 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 169 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),[orig_width, orig_height], target_width, target_height,)
|
| 170 |
+
elif len(ret_images) > 0 and len(ret_masks) == 0:
|
| 171 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 172 |
+
return (torch.cat(ret_images, dim=0), None, [orig_width, orig_height], target_width, target_height,)
|
| 173 |
+
elif len(ret_images) == 0 and len(ret_masks) > 0:
|
| 174 |
+
log(f"{self.NODE_NAME} Processed {len(ret_masks)} image(s).", message_type='finish')
|
| 175 |
+
return (None, torch.cat(ret_masks, dim=0), [orig_width, orig_height], target_width, target_height,)
|
| 176 |
+
else:
|
| 177 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available image or mask is not found.", message_type='error')
|
| 178 |
+
return (None, None, None, 0, 0,)
|
| 179 |
+
|
| 180 |
+
NODE_CLASS_MAPPINGS = {
|
| 181 |
+
"LayerUtility: ImageScaleByAspectRatio V2": ImageScaleByAspectRatioV2
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 185 |
+
"LayerUtility: ImageScaleByAspectRatio V2": "LayerUtility: ImageScaleByAspectRatio V2"
|
| 186 |
+
}
|
comfyui_layerstyle/py/image_scale_restore.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ImageScaleRestore:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'ImageScaleRestore'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"image": ("IMAGE", ), #
|
| 18 |
+
"scale": ("FLOAT", {"default": 1, "min": 0.01, "max": 100, "step": 0.01}),
|
| 19 |
+
"method": (method_mode,),
|
| 20 |
+
"scale_by_longest_side": ("BOOLEAN", {"default": False}), # 是否按长边缩放
|
| 21 |
+
"longest_side": ("INT", {"default": 1024, "min": 4, "max": 999999, "step": 1}),
|
| 22 |
+
},
|
| 23 |
+
"optional": {
|
| 24 |
+
"mask": ("MASK",), #
|
| 25 |
+
"original_size": ("BOX",),
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BOX", "INT", "INT")
|
| 30 |
+
RETURN_NAMES = ("image", "mask", "original_size", "width", "height",)
|
| 31 |
+
FUNCTION = 'image_scale_restore'
|
| 32 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 33 |
+
|
| 34 |
+
def image_scale_restore(self, image, scale, method,
|
| 35 |
+
scale_by_longest_side, longest_side,
|
| 36 |
+
mask = None, original_size = None
|
| 37 |
+
):
|
| 38 |
+
|
| 39 |
+
l_images = []
|
| 40 |
+
l_masks = []
|
| 41 |
+
ret_images = []
|
| 42 |
+
ret_masks = []
|
| 43 |
+
for l in image:
|
| 44 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 45 |
+
m = tensor2pil(l)
|
| 46 |
+
if m.mode == 'RGBA':
|
| 47 |
+
l_masks.append(m.split()[-1])
|
| 48 |
+
|
| 49 |
+
if mask is not None:
|
| 50 |
+
if mask.dim() == 2:
|
| 51 |
+
mask = torch.unsqueeze(mask, 0)
|
| 52 |
+
l_masks = []
|
| 53 |
+
for m in mask:
|
| 54 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 55 |
+
|
| 56 |
+
max_batch = max(len(l_images), len(l_masks))
|
| 57 |
+
|
| 58 |
+
orig_width, orig_height = tensor2pil(l_images[0]).size
|
| 59 |
+
if original_size is not None:
|
| 60 |
+
target_width = original_size[0]
|
| 61 |
+
target_height = original_size[1]
|
| 62 |
+
else:
|
| 63 |
+
target_width = int(orig_width * scale)
|
| 64 |
+
target_height = int(orig_height * scale)
|
| 65 |
+
if scale_by_longest_side:
|
| 66 |
+
if orig_width > orig_height:
|
| 67 |
+
target_width = longest_side
|
| 68 |
+
target_height = int(target_width * orig_height / orig_width)
|
| 69 |
+
else:
|
| 70 |
+
target_height = longest_side
|
| 71 |
+
target_width = int(target_height * orig_width / orig_height)
|
| 72 |
+
if target_width < 4:
|
| 73 |
+
target_width = 4
|
| 74 |
+
if target_height < 4:
|
| 75 |
+
target_height = 4
|
| 76 |
+
resize_sampler = Image.LANCZOS
|
| 77 |
+
if method == "bicubic":
|
| 78 |
+
resize_sampler = Image.BICUBIC
|
| 79 |
+
elif method == "hamming":
|
| 80 |
+
resize_sampler = Image.HAMMING
|
| 81 |
+
elif method == "bilinear":
|
| 82 |
+
resize_sampler = Image.BILINEAR
|
| 83 |
+
elif method == "box":
|
| 84 |
+
resize_sampler = Image.BOX
|
| 85 |
+
elif method == "nearest":
|
| 86 |
+
resize_sampler = Image.NEAREST
|
| 87 |
+
|
| 88 |
+
for i in range(max_batch):
|
| 89 |
+
|
| 90 |
+
_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 91 |
+
|
| 92 |
+
_canvas = tensor2pil(_image).convert('RGB')
|
| 93 |
+
ret_image = _canvas.resize((target_width, target_height), resize_sampler)
|
| 94 |
+
ret_mask = Image.new('L', size=ret_image.size, color='white')
|
| 95 |
+
if mask is not None:
|
| 96 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 97 |
+
ret_mask = _mask.resize((target_width, target_height), resize_sampler)
|
| 98 |
+
|
| 99 |
+
ret_images.append(pil2tensor(ret_image))
|
| 100 |
+
ret_masks.append(image2mask(ret_mask))
|
| 101 |
+
|
| 102 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 103 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0), [orig_width, orig_height], target_width, target_height,)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
NODE_CLASS_MAPPINGS = {
|
| 107 |
+
"LayerUtility: ImageScaleRestore": ImageScaleRestore
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 111 |
+
"LayerUtility: ImageScaleRestore": "LayerUtility: ImageScaleRestore"
|
| 112 |
+
}
|
comfyui_layerstyle/py/image_scale_restore_v2.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ImageScaleRestoreV2:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'ImageScaleRestore V2'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 15 |
+
scale_by_list = ['by_scale', 'longest', 'shortest', 'width', 'height', 'total_pixel(kilo pixel)']
|
| 16 |
+
return {
|
| 17 |
+
"required": {
|
| 18 |
+
"image": ("IMAGE", ), #
|
| 19 |
+
"scale": ("FLOAT", {"default": 1, "min": 0.01, "max": 100, "step": 0.01}),
|
| 20 |
+
"method": (method_mode,),
|
| 21 |
+
"scale_by": (scale_by_list,), # 是否按长边缩放
|
| 22 |
+
"scale_by_length": ("INT", {"default": 1024, "min": 4, "max": 99999999, "step": 1}),
|
| 23 |
+
},
|
| 24 |
+
"optional": {
|
| 25 |
+
"mask": ("MASK",), #
|
| 26 |
+
"original_size": ("BOX",),
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BOX", "INT", "INT")
|
| 31 |
+
RETURN_NAMES = ("image", "mask", "original_size", "width", "height",)
|
| 32 |
+
FUNCTION = 'image_scale_restore'
|
| 33 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 34 |
+
|
| 35 |
+
def image_scale_restore(self, image, scale, method,
|
| 36 |
+
scale_by, scale_by_length,
|
| 37 |
+
mask = None, original_size = None
|
| 38 |
+
):
|
| 39 |
+
|
| 40 |
+
l_images = []
|
| 41 |
+
l_masks = []
|
| 42 |
+
ret_images = []
|
| 43 |
+
ret_masks = []
|
| 44 |
+
for l in image:
|
| 45 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 46 |
+
m = tensor2pil(l)
|
| 47 |
+
if m.mode == 'RGBA':
|
| 48 |
+
l_masks.append(m.split()[-1])
|
| 49 |
+
|
| 50 |
+
if mask is not None:
|
| 51 |
+
if mask.dim() == 2:
|
| 52 |
+
mask = torch.unsqueeze(mask, 0)
|
| 53 |
+
l_masks = []
|
| 54 |
+
for m in mask:
|
| 55 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 56 |
+
|
| 57 |
+
max_batch = max(len(l_images), len(l_masks))
|
| 58 |
+
|
| 59 |
+
orig_width, orig_height = tensor2pil(l_images[0]).size
|
| 60 |
+
if original_size is not None:
|
| 61 |
+
target_width = original_size[0]
|
| 62 |
+
target_height = original_size[1]
|
| 63 |
+
else:
|
| 64 |
+
target_width = int(orig_width * scale)
|
| 65 |
+
target_height = int(orig_height * scale)
|
| 66 |
+
if scale_by == 'longest':
|
| 67 |
+
if orig_width > orig_height:
|
| 68 |
+
target_width = scale_by_length
|
| 69 |
+
target_height = int(target_width * orig_height / orig_width)
|
| 70 |
+
else:
|
| 71 |
+
target_height = scale_by_length
|
| 72 |
+
target_width = int(target_height * orig_width / orig_height)
|
| 73 |
+
if scale_by == 'shortest':
|
| 74 |
+
if orig_width < orig_height:
|
| 75 |
+
target_width = scale_by_length
|
| 76 |
+
target_height = int(target_width * orig_height / orig_width)
|
| 77 |
+
else:
|
| 78 |
+
target_height = scale_by_length
|
| 79 |
+
target_width = int(target_height * orig_width / orig_height)
|
| 80 |
+
if scale_by == 'width':
|
| 81 |
+
target_width = scale_by_length
|
| 82 |
+
target_height = int(target_width * orig_height / orig_width)
|
| 83 |
+
if scale_by == 'height':
|
| 84 |
+
target_height = scale_by_length
|
| 85 |
+
target_width = int(target_height * orig_width / orig_height)
|
| 86 |
+
if scale_by == 'total_pixel(kilo pixel)':
|
| 87 |
+
r = orig_width / orig_height
|
| 88 |
+
target_width = math.sqrt(r * scale_by_length * 1000)
|
| 89 |
+
target_height = target_width / r
|
| 90 |
+
target_width = int(target_width)
|
| 91 |
+
target_height = int(target_height)
|
| 92 |
+
if target_width < 4:
|
| 93 |
+
target_width = 4
|
| 94 |
+
if target_height < 4:
|
| 95 |
+
target_height = 4
|
| 96 |
+
resize_sampler = Image.LANCZOS
|
| 97 |
+
if method == "bicubic":
|
| 98 |
+
resize_sampler = Image.BICUBIC
|
| 99 |
+
elif method == "hamming":
|
| 100 |
+
resize_sampler = Image.HAMMING
|
| 101 |
+
elif method == "bilinear":
|
| 102 |
+
resize_sampler = Image.BILINEAR
|
| 103 |
+
elif method == "box":
|
| 104 |
+
resize_sampler = Image.BOX
|
| 105 |
+
elif method == "nearest":
|
| 106 |
+
resize_sampler = Image.NEAREST
|
| 107 |
+
|
| 108 |
+
for i in range(max_batch):
|
| 109 |
+
|
| 110 |
+
_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 111 |
+
|
| 112 |
+
_canvas = tensor2pil(_image).convert('RGB')
|
| 113 |
+
ret_image = _canvas.resize((target_width, target_height), resize_sampler)
|
| 114 |
+
ret_mask = Image.new('L', size=ret_image.size, color='white')
|
| 115 |
+
if mask is not None:
|
| 116 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 117 |
+
ret_mask = _mask.resize((target_width, target_height), resize_sampler)
|
| 118 |
+
|
| 119 |
+
ret_images.append(pil2tensor(ret_image))
|
| 120 |
+
ret_masks.append(image2mask(ret_mask))
|
| 121 |
+
|
| 122 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 123 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0), [orig_width, orig_height], target_width, target_height,)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
NODE_CLASS_MAPPINGS = {
|
| 127 |
+
"LayerUtility: ImageScaleRestore V2": ImageScaleRestoreV2
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 131 |
+
"LayerUtility: ImageScaleRestore V2": "LayerUtility: ImageScaleRestore V2"
|
| 132 |
+
}
|
comfyui_layerstyle/py/image_shift.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, draw_border, gaussian_blur, shift_image
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class ImageShift:
|
| 7 |
+
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.NODE_NAME = 'ImageShift'
|
| 10 |
+
|
| 11 |
+
@classmethod
|
| 12 |
+
def INPUT_TYPES(self):
|
| 13 |
+
|
| 14 |
+
return {
|
| 15 |
+
"required": {
|
| 16 |
+
"image": ("IMAGE", ), #
|
| 17 |
+
"shift_x": ("INT", {"default": 256, "min": -9999, "max": 9999, "step": 1}),
|
| 18 |
+
"shift_y": ("INT", {"default": 256, "min": -9999, "max": 9999, "step": 1}),
|
| 19 |
+
"cyclic": ("BOOLEAN", {"default": True}), # 是否循环重复
|
| 20 |
+
"background_color": ("STRING", {"default": "#000000"}),
|
| 21 |
+
"border_mask_width": ("INT", {"default": 20, "min": 0, "max": 999, "step": 1}),
|
| 22 |
+
"border_mask_blur": ("INT", {"default": 12, "min": 0, "max": 999, "step": 1}),
|
| 23 |
+
},
|
| 24 |
+
"optional": {
|
| 25 |
+
"mask": ("MASK",), #
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
RETURN_TYPES = ("IMAGE", "MASK", "MASK",)
|
| 30 |
+
RETURN_NAMES = ("image", "mask", "border_mask")
|
| 31 |
+
FUNCTION = 'image_shift'
|
| 32 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 33 |
+
|
| 34 |
+
def image_shift(self, image, shift_x, shift_y,
|
| 35 |
+
cyclic, background_color,
|
| 36 |
+
border_mask_width, border_mask_blur,
|
| 37 |
+
mask=None
|
| 38 |
+
):
|
| 39 |
+
|
| 40 |
+
ret_images = []
|
| 41 |
+
ret_masks = []
|
| 42 |
+
ret_border_masks = []
|
| 43 |
+
|
| 44 |
+
l_images = []
|
| 45 |
+
l_masks = []
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
for l in image:
|
| 49 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 50 |
+
m = tensor2pil(l)
|
| 51 |
+
if m.mode == 'RGBA':
|
| 52 |
+
l_masks.append(m.split()[-1])
|
| 53 |
+
else:
|
| 54 |
+
l_masks.append(Image.new('L', size=m.size, color='white'))
|
| 55 |
+
if mask is not None:
|
| 56 |
+
if mask.dim() == 2:
|
| 57 |
+
mask = torch.unsqueeze(mask, 0)
|
| 58 |
+
l_masks = []
|
| 59 |
+
for m in mask:
|
| 60 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 61 |
+
|
| 62 |
+
shift_x, shift_y = -shift_x, -shift_y
|
| 63 |
+
for i in range(len(l_images)):
|
| 64 |
+
_image = l_images[i]
|
| 65 |
+
_canvas = tensor2pil(_image).convert('RGB')
|
| 66 |
+
_mask = l_masks[i] if len(l_masks) < i else l_masks[-1]
|
| 67 |
+
_border = Image.new('L', size=_canvas.size, color='black')
|
| 68 |
+
_border = draw_border(_border, border_width=border_mask_width, color='#FFFFFF')
|
| 69 |
+
_border = _border.resize(_canvas.size)
|
| 70 |
+
_canvas = shift_image(_canvas, shift_x, shift_y, background_color=background_color, cyclic=cyclic)
|
| 71 |
+
_mask = shift_image(_mask, shift_x, shift_y, background_color='#000000', cyclic=cyclic)
|
| 72 |
+
_border = shift_image(_border, shift_x, shift_y, background_color='#000000', cyclic=cyclic)
|
| 73 |
+
_border = gaussian_blur(_border, border_mask_blur)
|
| 74 |
+
|
| 75 |
+
ret_images.append(pil2tensor(_canvas))
|
| 76 |
+
ret_masks.append(image2mask(_mask))
|
| 77 |
+
ret_border_masks.append(image2mask(_border))
|
| 78 |
+
|
| 79 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 80 |
+
return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0), torch.cat(ret_border_masks, dim=0),)
|
| 81 |
+
|
| 82 |
+
NODE_CLASS_MAPPINGS = {
|
| 83 |
+
"LayerUtility: ImageShift": ImageShift
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 87 |
+
"LayerUtility: ImageShift": "LayerUtility: ImageShift"
|
| 88 |
+
}
|
comfyui_layerstyle/py/image_tagger_save.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
import shutil
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from PIL.PngImagePlugin import PngInfo
|
| 5 |
+
import datetime
|
| 6 |
+
import torch
|
| 7 |
+
import numpy as np
|
| 8 |
+
import folder_paths
|
| 9 |
+
from .imagefunc import log, generate_random_name, remove_empty_lines
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class LSImageTaggerSave:
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.output_dir = folder_paths.get_output_directory()
|
| 16 |
+
self.type = "output"
|
| 17 |
+
self.prefix_append = ""
|
| 18 |
+
self.compress_level = 4
|
| 19 |
+
self.NODE_NAME = 'ImageTaggerSave'
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def INPUT_TYPES(s):
|
| 23 |
+
return {"required":
|
| 24 |
+
{"image": ("IMAGE", ),
|
| 25 |
+
"tag_text": ("STRING", {"default": "", "forceInput":True}),
|
| 26 |
+
"custom_path": ("STRING", {"default": ""}),
|
| 27 |
+
"filename_prefix": ("STRING", {"default": "comfyui"}),
|
| 28 |
+
"timestamp": (["None", "second", "millisecond"],),
|
| 29 |
+
"format": (["png", "jpg"],),
|
| 30 |
+
"quality": ("INT", {"default": 80, "min": 10, "max": 100, "step": 1}),
|
| 31 |
+
"preview": ("BOOLEAN", {"default": True}),
|
| 32 |
+
},
|
| 33 |
+
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
RETURN_TYPES = ()
|
| 37 |
+
FUNCTION = "image_tagger_save"
|
| 38 |
+
OUTPUT_NODE = True
|
| 39 |
+
CATEGORY = '😺dzNodes/LayerUtility/SystemIO'
|
| 40 |
+
|
| 41 |
+
def image_tagger_save(self, image, tag_text, custom_path, filename_prefix, timestamp, format, quality,
|
| 42 |
+
preview,
|
| 43 |
+
prompt=None, extra_pnginfo=None):
|
| 44 |
+
|
| 45 |
+
now = datetime.datetime.now()
|
| 46 |
+
custom_path = custom_path.replace("%date", now.strftime("%Y-%m-%d"))
|
| 47 |
+
custom_path = custom_path.replace("%time", now.strftime("%H-%M-%S"))
|
| 48 |
+
filename_prefix = filename_prefix.replace("%date", now.strftime("%Y-%m-%d"))
|
| 49 |
+
filename_prefix = filename_prefix.replace("%time", now.strftime("%H-%M-%S"))
|
| 50 |
+
filename_prefix += self.prefix_append
|
| 51 |
+
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, image[0].shape[1], image[0].shape[0])
|
| 52 |
+
results = list()
|
| 53 |
+
temp_sub_dir = generate_random_name('_savepreview_', '_temp', 16)
|
| 54 |
+
temp_dir = os.path.join(folder_paths.get_temp_directory(), temp_sub_dir)
|
| 55 |
+
metadata = None
|
| 56 |
+
i = 255. * image[0].cpu().numpy()
|
| 57 |
+
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
| 58 |
+
|
| 59 |
+
if timestamp == "millisecond":
|
| 60 |
+
file = f'{filename}_{now.strftime("%Y-%m-%d_%H-%M-%S-%f")[:-3]}'
|
| 61 |
+
elif timestamp == "second":
|
| 62 |
+
file = f'{filename}_{now.strftime("%Y-%m-%d_%H-%M-%S")}'
|
| 63 |
+
else:
|
| 64 |
+
file = f'{filename}_{counter:08}'
|
| 65 |
+
|
| 66 |
+
preview_filename = ""
|
| 67 |
+
if custom_path != "":
|
| 68 |
+
if not os.path.exists(custom_path):
|
| 69 |
+
try:
|
| 70 |
+
os.makedirs(custom_path)
|
| 71 |
+
except Exception as e:
|
| 72 |
+
log(f"Error: {self.NODE_NAME} skipped, because unable to create temporary folder.",
|
| 73 |
+
message_type='warning')
|
| 74 |
+
raise FileNotFoundError(f"cannot create custom_path {custom_path}, {e}")
|
| 75 |
+
else:
|
| 76 |
+
custom_path = folder_paths.get_output_directory()
|
| 77 |
+
|
| 78 |
+
full_output_folder = os.path.normpath(custom_path)
|
| 79 |
+
# save preview image to temp_dir
|
| 80 |
+
if os.path.isdir(temp_dir):
|
| 81 |
+
shutil.rmtree(temp_dir)
|
| 82 |
+
try:
|
| 83 |
+
os.makedirs(temp_dir)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
print(e)
|
| 86 |
+
log(f"Error: {self.NODE_NAME} skipped, because unable to create temporary folder.",
|
| 87 |
+
message_type='warning')
|
| 88 |
+
try:
|
| 89 |
+
preview_filename = os.path.join(generate_random_name('saveimage_preview_', '_temp', 16) + '.png')
|
| 90 |
+
img.save(os.path.join(temp_dir, preview_filename))
|
| 91 |
+
except Exception as e:
|
| 92 |
+
print(e)
|
| 93 |
+
log(f"Error: {self.NODE_NAME} skipped, because unable to create temporary file.", message_type='warning')
|
| 94 |
+
|
| 95 |
+
# check if file exists, change filename
|
| 96 |
+
while os.path.isfile(os.path.join(full_output_folder, f"{file}.{format}")):
|
| 97 |
+
counter += 1
|
| 98 |
+
if timestamp == "millisecond":
|
| 99 |
+
file = f'{filename}_{now.strftime("%Y-%m-%d_%H-%M-%S-%f")[:-3]}_{counter:08}'
|
| 100 |
+
elif timestamp == "second":
|
| 101 |
+
file = f'{filename}_{now.strftime("%Y-%m-%d_%H-%M-%S")}_{counter:08}'
|
| 102 |
+
else:
|
| 103 |
+
file = f"{filename}_{counter:08}"
|
| 104 |
+
|
| 105 |
+
image_file_name = os.path.join(full_output_folder, f"{file}.{format}")
|
| 106 |
+
tag_file_name = os.path.join(full_output_folder, f"{file}.txt")
|
| 107 |
+
|
| 108 |
+
if format == "png":
|
| 109 |
+
img.save(image_file_name, pnginfo=metadata, compress_level= (100 - quality) // 10)
|
| 110 |
+
else:
|
| 111 |
+
if img.mode == "RGBA":
|
| 112 |
+
img = img.convert("RGB")
|
| 113 |
+
img.save(image_file_name, quality=quality)
|
| 114 |
+
with open(tag_file_name, "w", encoding="utf-8") as f:
|
| 115 |
+
f.write(remove_empty_lines(tag_text))
|
| 116 |
+
log(f"{self.NODE_NAME} -> Saving image to {image_file_name}")
|
| 117 |
+
|
| 118 |
+
if preview:
|
| 119 |
+
if custom_path == "":
|
| 120 |
+
results.append({
|
| 121 |
+
"filename": f"{file}.{format}",
|
| 122 |
+
"subfolder": subfolder,
|
| 123 |
+
"type": self.type
|
| 124 |
+
})
|
| 125 |
+
else:
|
| 126 |
+
results.append({
|
| 127 |
+
"filename": preview_filename,
|
| 128 |
+
"subfolder": temp_sub_dir,
|
| 129 |
+
"type": "temp"
|
| 130 |
+
})
|
| 131 |
+
|
| 132 |
+
counter += 1
|
| 133 |
+
|
| 134 |
+
return { "ui": { "images": results } }
|
| 135 |
+
|
| 136 |
+
NODE_CLASS_MAPPINGS = {
|
| 137 |
+
"LayerUtility: ImageTaggerSave": LSImageTaggerSave
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 141 |
+
"LayerUtility: ImageTaggerSave": "LayerUtility: Image Tagger Save"
|
| 142 |
+
}
|
comfyui_layerstyle/py/image_to_mask.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageChops
|
| 3 |
+
from .imagefunc import log, tensor2pil, image2mask, image_channel_split, normalize_gray, adjust_levels
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ImageToMask:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.NODE_NAME = 'ImageToMask'
|
| 10 |
+
@classmethod
|
| 11 |
+
def INPUT_TYPES(s):
|
| 12 |
+
channel_list = ["L(LAB)", "A(Lab)", "B(Lab)",
|
| 13 |
+
"R(RGB)", "G(RGB)", "B(RGB)", "alpha",
|
| 14 |
+
"Y(YUV)", "U(YUV)", "V(YUV)",
|
| 15 |
+
"H(HSV)", "S(HSV", "V(HSV)"]
|
| 16 |
+
return {
|
| 17 |
+
"required": {
|
| 18 |
+
"image": ("IMAGE", ),
|
| 19 |
+
"channel": (channel_list,),
|
| 20 |
+
"black_point": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1, "display": "slider"}),
|
| 21 |
+
"white_point": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1, "display": "slider"}),
|
| 22 |
+
"gray_point": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 9.99, "step": 0.01}),
|
| 23 |
+
"invert_output_mask": ("BOOLEAN", {"default": False}), # 反转mask
|
| 24 |
+
},
|
| 25 |
+
"optional": {
|
| 26 |
+
"mask": ("MASK",), #
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
RETURN_TYPES = ("MASK",)
|
| 31 |
+
RETURN_NAMES = ("mask",)
|
| 32 |
+
FUNCTION = "image_to_mask"
|
| 33 |
+
CATEGORY = '😺dzNodes/LayerMask'
|
| 34 |
+
|
| 35 |
+
def image_to_mask(self, image, channel,
|
| 36 |
+
black_point, white_point, gray_point,
|
| 37 |
+
invert_output_mask, mask=None
|
| 38 |
+
):
|
| 39 |
+
|
| 40 |
+
ret_masks = []
|
| 41 |
+
l_images = []
|
| 42 |
+
l_masks = []
|
| 43 |
+
|
| 44 |
+
for l in image:
|
| 45 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 46 |
+
m = tensor2pil(l)
|
| 47 |
+
if m.mode == 'RGBA':
|
| 48 |
+
l_masks.append(m.split()[-1])
|
| 49 |
+
else:
|
| 50 |
+
l_masks.append(Image.new('L', m.size, 'white'))
|
| 51 |
+
if mask is not None:
|
| 52 |
+
if mask.dim() == 2:
|
| 53 |
+
mask = torch.unsqueeze(mask, 0)
|
| 54 |
+
l_masks = []
|
| 55 |
+
for m in mask:
|
| 56 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 57 |
+
|
| 58 |
+
for i in range(len(l_images)):
|
| 59 |
+
orig_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 60 |
+
orig_image = tensor2pil(orig_image)
|
| 61 |
+
orig_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 62 |
+
|
| 63 |
+
mask = Image.new('L', orig_image.size, 'black')
|
| 64 |
+
if channel == "L(LAB)":
|
| 65 |
+
mask, _, _, _ = image_channel_split(orig_image, 'LAB')
|
| 66 |
+
elif channel == "A(Lab)":
|
| 67 |
+
_, mask, _, _ = image_channel_split(orig_image, 'LAB')
|
| 68 |
+
elif channel == "B(Lab)":
|
| 69 |
+
_, _, mask, _ = image_channel_split(orig_image, 'LAB')
|
| 70 |
+
elif channel == "R(RGB)":
|
| 71 |
+
mask, _, _, _ = image_channel_split(orig_image, 'RGB')
|
| 72 |
+
elif channel == "G(RGB)":
|
| 73 |
+
_, mask, _, _ = image_channel_split(orig_image, 'RGB')
|
| 74 |
+
elif channel == "B(RGB)":
|
| 75 |
+
_, _, mask, _ = image_channel_split(orig_image, 'RGB')
|
| 76 |
+
elif channel == "alpha":
|
| 77 |
+
_, _, _, mask = image_channel_split(orig_image, 'RGBA')
|
| 78 |
+
elif channel == "Y(YUV)":
|
| 79 |
+
mask, _, _, _ = image_channel_split(orig_image, 'YCbCr')
|
| 80 |
+
elif channel == "U(YUV)":
|
| 81 |
+
_, mask, _, _ = image_channel_split(orig_image, 'YCbCr')
|
| 82 |
+
elif channel == "V(YUV)":
|
| 83 |
+
_, _, mask, _ = image_channel_split(orig_image, 'YCbCr')
|
| 84 |
+
elif channel == "H(HSV)":
|
| 85 |
+
mask, _, _, _ = image_channel_split(orig_image, 'HSV')
|
| 86 |
+
elif channel == "S(HSV)":
|
| 87 |
+
_, mask, _, _ = image_channel_split(orig_image, 'HSV')
|
| 88 |
+
elif channel == "V(HSV)":
|
| 89 |
+
_, _, mask, _ = image_channel_split(orig_image, 'HSV')
|
| 90 |
+
mask = normalize_gray(mask)
|
| 91 |
+
mask = adjust_levels(mask, black_point, white_point, gray_point,
|
| 92 |
+
0, 255)
|
| 93 |
+
if invert_output_mask:
|
| 94 |
+
mask = ImageChops.invert(mask)
|
| 95 |
+
ret_mask = Image.new('L', mask.size, 'black')
|
| 96 |
+
ret_mask.paste(mask, mask=orig_mask)
|
| 97 |
+
|
| 98 |
+
ret_mask = image2mask(ret_mask)
|
| 99 |
+
|
| 100 |
+
ret_masks.append(ret_mask)
|
| 101 |
+
|
| 102 |
+
return (torch.cat(ret_masks, dim=0), )
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
NODE_CLASS_MAPPINGS = {
|
| 106 |
+
"LayerMask: ImageToMask": ImageToMask
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 110 |
+
"LayerMask: ImageToMask": "LayerMask: Image To Mask"
|
| 111 |
+
}
|
comfyui_layerstyle/py/inner_glow.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageChops
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, step_color, expand_mask, mask_invert, chop_mode, chop_image, step_value
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class InnerGlow:
|
| 7 |
+
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.NODE_NAME = 'InnerGlow'
|
| 10 |
+
|
| 11 |
+
@classmethod
|
| 12 |
+
def INPUT_TYPES(self):
|
| 13 |
+
chop_mode = ['screen', 'add', 'lighter', 'normal', 'multply', 'subtract','difference','darker',
|
| 14 |
+
'color_burn', 'color_dodge', 'linear_burn', 'linear_dodge', 'overlay',
|
| 15 |
+
'soft_light', 'hard_light', 'vivid_light', 'pin_light', 'linear_light', 'hard_mix']
|
| 16 |
+
return {
|
| 17 |
+
"required": {
|
| 18 |
+
"background_image": ("IMAGE", ), #
|
| 19 |
+
"layer_image": ("IMAGE",), #
|
| 20 |
+
"invert_mask": ("BOOLEAN", {"default": True}), # 反转mask
|
| 21 |
+
"blend_mode": (chop_mode,), # 混合模式
|
| 22 |
+
"opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度
|
| 23 |
+
"brightness": ("INT", {"default": 5, "min": 2, "max": 20, "step": 1}), # 迭代
|
| 24 |
+
"glow_range": ("INT", {"default": 48, "min": -9999, "max": 9999, "step": 1}), # 扩张
|
| 25 |
+
"blur": ("INT", {"default": 25, "min": 0, "max": 9999, "step": 1}), # 扩张
|
| 26 |
+
"light_color": ("STRING", {"default": "#FFBF30"}), # 光源中心颜色
|
| 27 |
+
"glow_color": ("STRING", {"default": "#FE0000"}), # 辉光外围颜色
|
| 28 |
+
},
|
| 29 |
+
"optional": {
|
| 30 |
+
"layer_mask": ("MASK",), #
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
RETURN_TYPES = ("IMAGE",)
|
| 35 |
+
RETURN_NAMES = ("image",)
|
| 36 |
+
FUNCTION = 'inner_glow'
|
| 37 |
+
CATEGORY = '😺dzNodes/LayerStyle'
|
| 38 |
+
|
| 39 |
+
def inner_glow(self, background_image, layer_image,
|
| 40 |
+
invert_mask, blend_mode, opacity,
|
| 41 |
+
brightness, glow_range, blur, light_color, glow_color,
|
| 42 |
+
layer_mask=None
|
| 43 |
+
):
|
| 44 |
+
|
| 45 |
+
b_images = []
|
| 46 |
+
l_images = []
|
| 47 |
+
l_masks = []
|
| 48 |
+
ret_images = []
|
| 49 |
+
for b in background_image:
|
| 50 |
+
b_images.append(torch.unsqueeze(b, 0))
|
| 51 |
+
for l in layer_image:
|
| 52 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 53 |
+
m = tensor2pil(l)
|
| 54 |
+
if m.mode == 'RGBA':
|
| 55 |
+
l_masks.append(m.split()[-1])
|
| 56 |
+
if layer_mask is not None:
|
| 57 |
+
if layer_mask.dim() == 2:
|
| 58 |
+
layer_mask = torch.unsqueeze(layer_mask, 0)
|
| 59 |
+
l_masks = []
|
| 60 |
+
for m in layer_mask:
|
| 61 |
+
if invert_mask:
|
| 62 |
+
m = 1 - m
|
| 63 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 64 |
+
if len(l_masks) == 0:
|
| 65 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available mask is not found.", message_type='error')
|
| 66 |
+
return (background_image,)
|
| 67 |
+
max_batch = max(len(b_images), len(l_images), len(l_masks))
|
| 68 |
+
for i in range(max_batch):
|
| 69 |
+
background_image = b_images[i] if i < len(b_images) else b_images[-1]
|
| 70 |
+
layer_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 71 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 72 |
+
# preprocess
|
| 73 |
+
_canvas = tensor2pil(background_image).convert('RGB')
|
| 74 |
+
_layer = tensor2pil(layer_image).convert('RGB')
|
| 75 |
+
|
| 76 |
+
if _mask.size != _layer.size:
|
| 77 |
+
_mask = Image.new('L', _layer.size, 'white')
|
| 78 |
+
log(f"Warning: {self.NODE_NAME} mask mismatch, dropped!", message_type='warning')
|
| 79 |
+
|
| 80 |
+
blur_factor = blur / 20.0
|
| 81 |
+
grow = glow_range
|
| 82 |
+
inner_mask = _mask
|
| 83 |
+
for x in range(brightness):
|
| 84 |
+
blur = int(grow * blur_factor)
|
| 85 |
+
_color = step_color(glow_color, light_color, brightness, x)
|
| 86 |
+
glow_mask = expand_mask(image2mask(inner_mask), -grow, blur) #扩张,模糊
|
| 87 |
+
# 合成glow
|
| 88 |
+
color_image = Image.new("RGB", _layer.size, color=_color)
|
| 89 |
+
alpha = tensor2pil(mask_invert(glow_mask)).convert('L')
|
| 90 |
+
_glow = chop_image(_layer, color_image, blend_mode, int(step_value(1, opacity, brightness, x)))
|
| 91 |
+
_layer.paste(_glow, mask=alpha)
|
| 92 |
+
grow = grow - int(glow_range/brightness)
|
| 93 |
+
# 合成layer
|
| 94 |
+
_layer.paste(_canvas, mask=ImageChops.invert(_mask))
|
| 95 |
+
ret_images.append(pil2tensor(_layer))
|
| 96 |
+
|
| 97 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 98 |
+
return (torch.cat(ret_images, dim=0),)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
NODE_CLASS_MAPPINGS = {
|
| 102 |
+
"LayerStyle: InnerGlow": InnerGlow
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 106 |
+
"LayerStyle: InnerGlow": "LayerStyle: InnerGlow"
|
| 107 |
+
}
|
comfyui_layerstyle/py/inner_glow_v2.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image, ImageChops
|
| 4 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, step_color, expand_mask, mask_invert, chop_mode_v2, chop_image_v2, BLEND_MODES, step_value
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class InnerGlowV2:
|
| 10 |
+
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.NODE_NAME = 'InnerGlowV2'
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def INPUT_TYPES(self):
|
| 16 |
+
|
| 17 |
+
modes = copy.copy(BLEND_MODES)
|
| 18 |
+
chop_mode_list = ["screen", "linear dodge(add)", "color dodge", "lighten", "dodge", "hard light", "linear light"]
|
| 19 |
+
for i in chop_mode_list:
|
| 20 |
+
modes.pop(i)
|
| 21 |
+
chop_mode_list.extend(list(modes.keys()))
|
| 22 |
+
|
| 23 |
+
return {
|
| 24 |
+
"required": {
|
| 25 |
+
"background_image": ("IMAGE", ), #
|
| 26 |
+
"layer_image": ("IMAGE",), #
|
| 27 |
+
"invert_mask": ("BOOLEAN", {"default": True}), # 反转mask
|
| 28 |
+
"blend_mode": (chop_mode_list,), # 混合模式
|
| 29 |
+
"opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度
|
| 30 |
+
"brightness": ("INT", {"default": 5, "min": 2, "max": 20, "step": 1}), # 迭代
|
| 31 |
+
"glow_range": ("INT", {"default": 48, "min": -9999, "max": 9999, "step": 1}), # 扩张
|
| 32 |
+
"blur": ("INT", {"default": 25, "min": 0, "max": 9999, "step": 1}), # 扩张
|
| 33 |
+
"light_color": ("STRING", {"default": "#FFBF30"}), # 光源中心颜色
|
| 34 |
+
"glow_color": ("STRING", {"default": "#FE0000"}), # 辉光外围颜色
|
| 35 |
+
},
|
| 36 |
+
"optional": {
|
| 37 |
+
"layer_mask": ("MASK",), #
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
RETURN_TYPES = ("IMAGE",)
|
| 42 |
+
RETURN_NAMES = ("image",)
|
| 43 |
+
FUNCTION = 'inner_glow_v2'
|
| 44 |
+
CATEGORY = '😺dzNodes/LayerStyle'
|
| 45 |
+
|
| 46 |
+
def inner_glow_v2(self, background_image, layer_image,
|
| 47 |
+
invert_mask, blend_mode, opacity,
|
| 48 |
+
brightness, glow_range, blur, light_color, glow_color,
|
| 49 |
+
layer_mask=None
|
| 50 |
+
):
|
| 51 |
+
|
| 52 |
+
b_images = []
|
| 53 |
+
l_images = []
|
| 54 |
+
l_masks = []
|
| 55 |
+
ret_images = []
|
| 56 |
+
for b in background_image:
|
| 57 |
+
b_images.append(torch.unsqueeze(b, 0))
|
| 58 |
+
for l in layer_image:
|
| 59 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 60 |
+
m = tensor2pil(l)
|
| 61 |
+
if m.mode == 'RGBA':
|
| 62 |
+
l_masks.append(m.split()[-1])
|
| 63 |
+
if layer_mask is not None:
|
| 64 |
+
if layer_mask.dim() == 2:
|
| 65 |
+
layer_mask = torch.unsqueeze(layer_mask, 0)
|
| 66 |
+
l_masks = []
|
| 67 |
+
for m in layer_mask:
|
| 68 |
+
if invert_mask:
|
| 69 |
+
m = 1 - m
|
| 70 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 71 |
+
if len(l_masks) == 0:
|
| 72 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available mask is not found.", message_type='error')
|
| 73 |
+
return (background_image,)
|
| 74 |
+
max_batch = max(len(b_images), len(l_images), len(l_masks))
|
| 75 |
+
for i in range(max_batch):
|
| 76 |
+
background_image = b_images[i] if i < len(b_images) else b_images[-1]
|
| 77 |
+
layer_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 78 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 79 |
+
# preprocess
|
| 80 |
+
_canvas = tensor2pil(background_image).convert('RGB')
|
| 81 |
+
_layer = tensor2pil(layer_image).convert('RGB')
|
| 82 |
+
|
| 83 |
+
if _mask.size != _layer.size:
|
| 84 |
+
_mask = Image.new('L', _layer.size, 'white')
|
| 85 |
+
log(f"Warning: {self.NODE_NAME} mask mismatch, dropped!", message_type='warning')
|
| 86 |
+
|
| 87 |
+
blur_factor = blur / 20.0
|
| 88 |
+
grow = glow_range
|
| 89 |
+
inner_mask = _mask
|
| 90 |
+
for x in range(brightness):
|
| 91 |
+
blur = int(grow * blur_factor)
|
| 92 |
+
_color = step_color(glow_color, light_color, brightness, x)
|
| 93 |
+
glow_mask = expand_mask(image2mask(inner_mask), -grow, blur) #扩张,模糊
|
| 94 |
+
# 合成glow
|
| 95 |
+
color_image = Image.new("RGB", _layer.size, color=_color)
|
| 96 |
+
alpha = tensor2pil(mask_invert(glow_mask)).convert('L')
|
| 97 |
+
_glow = chop_image_v2(_layer, color_image, blend_mode, int(step_value(1, opacity, brightness, x)))
|
| 98 |
+
_layer.paste(_glow, mask=alpha)
|
| 99 |
+
grow = grow - int(glow_range/brightness)
|
| 100 |
+
# 合成layer
|
| 101 |
+
_layer.paste(_canvas, mask=ImageChops.invert(_mask))
|
| 102 |
+
ret_images.append(pil2tensor(_layer))
|
| 103 |
+
|
| 104 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 105 |
+
return (torch.cat(ret_images, dim=0),)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
NODE_CLASS_MAPPINGS = {
|
| 109 |
+
"LayerStyle: InnerGlow V2": InnerGlowV2
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 113 |
+
"LayerStyle: InnerGlow V2": "LayerStyle: InnerGlow V2"
|
| 114 |
+
}
|
comfyui_layerstyle/py/inner_shadow.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageChops
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, shift_image, expand_mask, chop_image, chop_mode
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class InnerShadow:
|
| 7 |
+
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.NODE_NAME = 'InnerShadow'
|
| 10 |
+
|
| 11 |
+
@classmethod
|
| 12 |
+
def INPUT_TYPES(self):
|
| 13 |
+
|
| 14 |
+
return {
|
| 15 |
+
"required": {
|
| 16 |
+
"background_image": ("IMAGE", ), #
|
| 17 |
+
"layer_image": ("IMAGE",), #
|
| 18 |
+
"invert_mask": ("BOOLEAN", {"default": True}), # 反转mask
|
| 19 |
+
"blend_mode": (chop_mode,), # 混合模式
|
| 20 |
+
"opacity": ("INT", {"default": 50, "min": 0, "max": 100, "step": 1}), # 透明度
|
| 21 |
+
"distance_x": ("INT", {"default": 5, "min": -9999, "max": 9999, "step": 1}), # x_偏移
|
| 22 |
+
"distance_y": ("INT", {"default": 5, "min": -9999, "max": 9999, "step": 1}), # y_偏移
|
| 23 |
+
"grow": ("INT", {"default": 2, "min": -9999, "max": 9999, "step": 1}), # 扩张
|
| 24 |
+
"blur": ("INT", {"default": 15, "min": 0, "max": 100, "step": 1}), # 模糊
|
| 25 |
+
"shadow_color": ("STRING", {"default": "#000000"}), # 背景颜色
|
| 26 |
+
},
|
| 27 |
+
"optional": {
|
| 28 |
+
"layer_mask": ("MASK",), #
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
RETURN_TYPES = ("IMAGE",)
|
| 34 |
+
RETURN_NAMES = ("image",)
|
| 35 |
+
FUNCTION = 'inner_shadow'
|
| 36 |
+
CATEGORY = '😺dzNodes/LayerStyle'
|
| 37 |
+
|
| 38 |
+
def inner_shadow(self, background_image, layer_image,
|
| 39 |
+
invert_mask, blend_mode, opacity, distance_x, distance_y,
|
| 40 |
+
grow, blur, shadow_color,
|
| 41 |
+
layer_mask=None
|
| 42 |
+
):
|
| 43 |
+
|
| 44 |
+
b_images = []
|
| 45 |
+
l_images = []
|
| 46 |
+
l_masks = []
|
| 47 |
+
ret_images = []
|
| 48 |
+
for b in background_image:
|
| 49 |
+
b_images.append(torch.unsqueeze(b, 0))
|
| 50 |
+
for l in layer_image:
|
| 51 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 52 |
+
m = tensor2pil(l)
|
| 53 |
+
if m.mode == 'RGBA':
|
| 54 |
+
l_masks.append(m.split()[-1])
|
| 55 |
+
if layer_mask is not None:
|
| 56 |
+
if layer_mask.dim() == 2:
|
| 57 |
+
layer_mask = torch.unsqueeze(layer_mask, 0)
|
| 58 |
+
l_masks = []
|
| 59 |
+
for m in layer_mask:
|
| 60 |
+
if invert_mask:
|
| 61 |
+
m = 1 - m
|
| 62 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 63 |
+
if len(l_masks) == 0:
|
| 64 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available mask is not found.", message_type='error')
|
| 65 |
+
return (background_image,)
|
| 66 |
+
max_batch = max(len(b_images), len(l_images), len(l_masks))
|
| 67 |
+
distance_x = -distance_x
|
| 68 |
+
distance_y = -distance_y
|
| 69 |
+
shadow_color = Image.new("RGB", tensor2pil(l_images[0]).size, color=shadow_color)
|
| 70 |
+
for i in range(max_batch):
|
| 71 |
+
background_image = b_images[i] if i < len(b_images) else b_images[-1]
|
| 72 |
+
layer_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 73 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 74 |
+
# preprocess
|
| 75 |
+
_canvas = tensor2pil(background_image).convert('RGB')
|
| 76 |
+
_layer = tensor2pil(layer_image).convert('RGB')
|
| 77 |
+
if _mask.size != _layer.size:
|
| 78 |
+
_mask = Image.new('L', _layer.size, 'white')
|
| 79 |
+
log(f"Warning: {self.NODE_NAME} mask mismatch, dropped!", message_type='warning')
|
| 80 |
+
|
| 81 |
+
if distance_x != 0 or distance_y != 0:
|
| 82 |
+
__mask = shift_image(_mask, distance_x, distance_y) # 位移
|
| 83 |
+
shadow_mask = expand_mask(image2mask(__mask), grow, blur) #扩张,模糊
|
| 84 |
+
# 合成阴影
|
| 85 |
+
alpha = tensor2pil(shadow_mask).convert('L')
|
| 86 |
+
_shadow = chop_image(_layer, shadow_color, blend_mode, opacity)
|
| 87 |
+
_layer.paste(_shadow, mask=ImageChops.invert(alpha))
|
| 88 |
+
# 合成layer
|
| 89 |
+
_canvas.paste(_layer, mask=_mask)
|
| 90 |
+
|
| 91 |
+
ret_images.append(pil2tensor(_canvas))
|
| 92 |
+
|
| 93 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 94 |
+
return (torch.cat(ret_images, dim=0),)
|
| 95 |
+
|
| 96 |
+
NODE_CLASS_MAPPINGS = {
|
| 97 |
+
"LayerStyle: InnerShadow": InnerShadow
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 101 |
+
"LayerStyle: InnerShadow": "LayerStyle: InnerShadow"
|
| 102 |
+
}
|
comfyui_layerstyle/py/inner_shadow_v2.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageChops
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, shift_image, expand_mask, chop_image_v2, chop_mode_v2
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class InnerShadowV2:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'InnerShadowV2'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"background_image": ("IMAGE", ), #
|
| 18 |
+
"layer_image": ("IMAGE",), #
|
| 19 |
+
"invert_mask": ("BOOLEAN", {"default": True}), # 反转mask
|
| 20 |
+
"blend_mode": (chop_mode_v2,), # 混合模式
|
| 21 |
+
"opacity": ("INT", {"default": 50, "min": 0, "max": 100, "step": 1}), # 透明度
|
| 22 |
+
"distance_x": ("INT", {"default": 5, "min": -9999, "max": 9999, "step": 1}), # x_偏移
|
| 23 |
+
"distance_y": ("INT", {"default": 5, "min": -9999, "max": 9999, "step": 1}), # y_偏移
|
| 24 |
+
"grow": ("INT", {"default": 2, "min": -9999, "max": 9999, "step": 1}), # 扩张
|
| 25 |
+
"blur": ("INT", {"default": 15, "min": 0, "max": 100, "step": 1}), # 模糊
|
| 26 |
+
"shadow_color": ("STRING", {"default": "#000000"}), # 背景颜色
|
| 27 |
+
},
|
| 28 |
+
"optional": {
|
| 29 |
+
"layer_mask": ("MASK",), #
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
RETURN_TYPES = ("IMAGE",)
|
| 35 |
+
RETURN_NAMES = ("image",)
|
| 36 |
+
FUNCTION = 'inner_shadow_v2'
|
| 37 |
+
CATEGORY = '😺dzNodes/LayerStyle'
|
| 38 |
+
|
| 39 |
+
def inner_shadow_v2(self, background_image, layer_image,
|
| 40 |
+
invert_mask, blend_mode, opacity, distance_x, distance_y,
|
| 41 |
+
grow, blur, shadow_color,
|
| 42 |
+
layer_mask=None
|
| 43 |
+
):
|
| 44 |
+
|
| 45 |
+
b_images = []
|
| 46 |
+
l_images = []
|
| 47 |
+
l_masks = []
|
| 48 |
+
ret_images = []
|
| 49 |
+
for b in background_image:
|
| 50 |
+
b_images.append(torch.unsqueeze(b, 0))
|
| 51 |
+
for l in layer_image:
|
| 52 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 53 |
+
m = tensor2pil(l)
|
| 54 |
+
if m.mode == 'RGBA':
|
| 55 |
+
l_masks.append(m.split()[-1])
|
| 56 |
+
if layer_mask is not None:
|
| 57 |
+
if layer_mask.dim() == 2:
|
| 58 |
+
layer_mask = torch.unsqueeze(layer_mask, 0)
|
| 59 |
+
l_masks = []
|
| 60 |
+
for m in layer_mask:
|
| 61 |
+
if invert_mask:
|
| 62 |
+
m = 1 - m
|
| 63 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 64 |
+
if len(l_masks) == 0:
|
| 65 |
+
log(f"Error: {self.NODE_NAME} skipped, because the available mask is not found.", message_type='error')
|
| 66 |
+
return (background_image,)
|
| 67 |
+
max_batch = max(len(b_images), len(l_images), len(l_masks))
|
| 68 |
+
distance_x = -distance_x
|
| 69 |
+
distance_y = -distance_y
|
| 70 |
+
shadow_color = Image.new("RGB", tensor2pil(l_images[0]).size, color=shadow_color)
|
| 71 |
+
for i in range(max_batch):
|
| 72 |
+
background_image = b_images[i] if i < len(b_images) else b_images[-1]
|
| 73 |
+
layer_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 74 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 75 |
+
# preprocess
|
| 76 |
+
_canvas = tensor2pil(background_image).convert('RGB')
|
| 77 |
+
_layer = tensor2pil(layer_image).convert('RGB')
|
| 78 |
+
if _mask.size != _layer.size:
|
| 79 |
+
_mask = Image.new('L', _layer.size, 'white')
|
| 80 |
+
log(f"Warning: {self.NODE_NAME} mask mismatch, dropped!", message_type='warning')
|
| 81 |
+
|
| 82 |
+
if distance_x != 0 or distance_y != 0:
|
| 83 |
+
__mask = shift_image(_mask, distance_x, distance_y) # 位移
|
| 84 |
+
shadow_mask = expand_mask(image2mask(__mask), grow, blur) #扩张,模糊
|
| 85 |
+
# 合成阴影
|
| 86 |
+
alpha = tensor2pil(shadow_mask).convert('L')
|
| 87 |
+
_shadow = chop_image_v2(_layer, shadow_color, blend_mode, opacity)
|
| 88 |
+
_layer.paste(_shadow, mask=ImageChops.invert(alpha))
|
| 89 |
+
# 合成layer
|
| 90 |
+
_canvas.paste(_layer, mask=_mask)
|
| 91 |
+
|
| 92 |
+
ret_images.append(pil2tensor(_canvas))
|
| 93 |
+
|
| 94 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 95 |
+
return (torch.cat(ret_images, dim=0),)
|
| 96 |
+
|
| 97 |
+
NODE_CLASS_MAPPINGS = {
|
| 98 |
+
"LayerStyle: InnerShadow V2": InnerShadowV2
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 102 |
+
"LayerStyle: InnerShadow V2": "LayerStyle: InnerShadow V2"
|
| 103 |
+
}
|
comfyui_layerstyle/py/layer_image_transform.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image_rotate_extend_with_alpha, RGB2RGBA
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class LayerImageTransform:
|
| 7 |
+
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.NODE_NAME = 'LayerImageTransform'
|
| 10 |
+
|
| 11 |
+
@classmethod
|
| 12 |
+
def INPUT_TYPES(self):
|
| 13 |
+
mirror_mode = ['None', 'horizontal', 'vertical']
|
| 14 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"image": ("IMAGE",), #
|
| 18 |
+
"x": ("INT", {"default": 0, "min": -99999, "max": 99999, "step": 1}),
|
| 19 |
+
"y": ("INT", {"default": 0, "min": -99999, "max": 99999, "step": 1}),
|
| 20 |
+
"mirror": (mirror_mode,), # 镜像翻转
|
| 21 |
+
"scale": ("FLOAT", {"default": 1, "min": 0.01, "max": 100, "step": 0.01}),
|
| 22 |
+
"aspect_ratio": ("FLOAT", {"default": 1, "min": 0.01, "max": 100, "step": 0.01}),
|
| 23 |
+
"rotate": ("FLOAT", {"default": 0, "min": -999999, "max": 999999, "step": 0.01}),
|
| 24 |
+
"transform_method": (method_mode,),
|
| 25 |
+
"anti_aliasing": ("INT", {"default": 2, "min": 0, "max": 16, "step": 1}),
|
| 26 |
+
},
|
| 27 |
+
"optional": {
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
RETURN_TYPES = ("IMAGE",)
|
| 32 |
+
RETURN_NAMES = ("image",)
|
| 33 |
+
FUNCTION = 'layer_image_transform'
|
| 34 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 35 |
+
|
| 36 |
+
def layer_image_transform(self, image, x, y, mirror, scale, aspect_ratio, rotate,
|
| 37 |
+
transform_method, anti_aliasing,
|
| 38 |
+
):
|
| 39 |
+
|
| 40 |
+
l_images = []
|
| 41 |
+
l_masks = []
|
| 42 |
+
ret_images = []
|
| 43 |
+
|
| 44 |
+
for l in image:
|
| 45 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 46 |
+
m = tensor2pil(l)
|
| 47 |
+
if m.mode == 'RGBA':
|
| 48 |
+
l_masks.append(m.split()[-1])
|
| 49 |
+
|
| 50 |
+
for i in range(len(l_images)):
|
| 51 |
+
layer_image = l_images[i] if i < len(l_images) else l_images[-1]
|
| 52 |
+
_image = tensor2pil(layer_image).convert('RGB')
|
| 53 |
+
if i < len(l_masks):
|
| 54 |
+
_mask = l_masks[i]
|
| 55 |
+
else:
|
| 56 |
+
_mask = Image.new('L', size=_image.size, color='white')
|
| 57 |
+
_image_canvas = Image.new('RGB', size=_image.size, color='black')
|
| 58 |
+
_mask_canvas = Image.new('L', size=_mask.size, color='black')
|
| 59 |
+
orig_layer_width = _image.width
|
| 60 |
+
orig_layer_height = _image.height
|
| 61 |
+
target_layer_width = int(orig_layer_width * scale)
|
| 62 |
+
target_layer_height = int(orig_layer_height * scale * aspect_ratio)
|
| 63 |
+
# mirror
|
| 64 |
+
if mirror == 'horizontal':
|
| 65 |
+
_image = _image.transpose(Image.FLIP_LEFT_RIGHT)
|
| 66 |
+
_mask = _mask.transpose(Image.FLIP_LEFT_RIGHT)
|
| 67 |
+
elif mirror == 'vertical':
|
| 68 |
+
_image = _image.transpose(Image.FLIP_TOP_BOTTOM)
|
| 69 |
+
_mask = _mask.transpose(Image.FLIP_TOP_BOTTOM)
|
| 70 |
+
# scale
|
| 71 |
+
_image = _image.resize((target_layer_width, target_layer_height))
|
| 72 |
+
_mask = _mask.resize((target_layer_width, target_layer_height))
|
| 73 |
+
# rotate
|
| 74 |
+
_image, _mask, _ = image_rotate_extend_with_alpha(_image, rotate, _mask, transform_method, anti_aliasing)
|
| 75 |
+
# composit layer
|
| 76 |
+
paste_x = (orig_layer_width - _image.width) // 2 + x
|
| 77 |
+
paste_y = (orig_layer_height - _image.height) // 2 + y
|
| 78 |
+
_image_canvas.paste(_image, (paste_x, paste_y))
|
| 79 |
+
_mask_canvas.paste(_mask, (paste_x, paste_y))
|
| 80 |
+
if tensor2pil(layer_image).mode == 'RGBA':
|
| 81 |
+
_image_canvas = RGB2RGBA(_image_canvas, _mask_canvas)
|
| 82 |
+
|
| 83 |
+
ret_images.append(pil2tensor(_image_canvas))
|
| 84 |
+
|
| 85 |
+
log(f"{self.NODE_NAME} Processed {len(l_images)} image(s).", message_type='finish')
|
| 86 |
+
return (torch.cat(ret_images, dim=0),)
|
| 87 |
+
|
| 88 |
+
NODE_CLASS_MAPPINGS = {
|
| 89 |
+
"LayerUtility: LayerImageTransform": LayerImageTransform
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 93 |
+
"LayerUtility: LayerImageTransform": "LayerUtility: LayerImageTransform"
|
| 94 |
+
}
|
comfyui_layerstyle/py/layer_mask_transform.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, image_rotate_extend_with_alpha, RGB2RGBA
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LayerMaskTransform:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'LayerMaskTransform'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
mirror_mode = ['None', 'horizontal', 'vertical']
|
| 15 |
+
method_mode = ['lanczos', 'bicubic', 'hamming', 'bilinear', 'box', 'nearest']
|
| 16 |
+
return {
|
| 17 |
+
"required": {
|
| 18 |
+
"mask": ("MASK",), #
|
| 19 |
+
"x": ("INT", {"default": 0, "min": -99999, "max": 99999, "step": 1}),
|
| 20 |
+
"y": ("INT", {"default": 0, "min": -99999, "max": 99999, "step": 1}),
|
| 21 |
+
"mirror": (mirror_mode,), # 镜像翻转
|
| 22 |
+
"scale": ("FLOAT", {"default": 1, "min": 0.01, "max": 100, "step": 0.01}),
|
| 23 |
+
"aspect_ratio": ("FLOAT", {"default": 1, "min": 0.01, "max": 100, "step": 0.01}),
|
| 24 |
+
"rotate": ("FLOAT", {"default": 0, "min": -999999, "max": 999999, "step": 0.01}),
|
| 25 |
+
"transform_method": (method_mode,),
|
| 26 |
+
"anti_aliasing": ("INT", {"default": 2, "min": 0, "max": 16, "step": 1}),
|
| 27 |
+
},
|
| 28 |
+
"optional": {
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
RETURN_TYPES = ("MASK",)
|
| 33 |
+
RETURN_NAMES = ("mask",)
|
| 34 |
+
FUNCTION = 'layer_mask_transform'
|
| 35 |
+
CATEGORY = '😺dzNodes/LayerUtility'
|
| 36 |
+
|
| 37 |
+
def layer_mask_transform(self, mask, x, y, mirror, scale, aspect_ratio, rotate,
|
| 38 |
+
transform_method, anti_aliasing,
|
| 39 |
+
):
|
| 40 |
+
|
| 41 |
+
l_masks = []
|
| 42 |
+
ret_masks = []
|
| 43 |
+
|
| 44 |
+
if mask.dim() == 2:
|
| 45 |
+
mask = torch.unsqueeze(mask, 0)
|
| 46 |
+
for m in mask:
|
| 47 |
+
l_masks.append(torch.unsqueeze(m, 0))
|
| 48 |
+
for i in range(len(l_masks)):
|
| 49 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 50 |
+
_mask = tensor2pil(_mask).convert('L')
|
| 51 |
+
_mask_canvas = Image.new('L', size=_mask.size, color='black')
|
| 52 |
+
orig_width = _mask.width
|
| 53 |
+
orig_height = _mask.height
|
| 54 |
+
target_layer_width = int(orig_width * scale)
|
| 55 |
+
target_layer_height = int(orig_height * scale * aspect_ratio)
|
| 56 |
+
# mirror
|
| 57 |
+
if mirror == 'horizontal':
|
| 58 |
+
_mask = _mask.transpose(Image.FLIP_LEFT_RIGHT)
|
| 59 |
+
elif mirror == 'vertical':
|
| 60 |
+
_mask = _mask.transpose(Image.FLIP_TOP_BOTTOM)
|
| 61 |
+
# scale
|
| 62 |
+
_mask = _mask.resize((target_layer_width, target_layer_height))
|
| 63 |
+
# rotate
|
| 64 |
+
_, _mask, _ = image_rotate_extend_with_alpha(_mask.convert('RGB'), rotate, _mask, transform_method, anti_aliasing)
|
| 65 |
+
paste_x = (orig_width - _mask.width) // 2 + x
|
| 66 |
+
paste_y = (orig_height - _mask.height) // 2 + y
|
| 67 |
+
# composit layer
|
| 68 |
+
_mask_canvas.paste(_mask, (paste_x, paste_y))
|
| 69 |
+
|
| 70 |
+
ret_masks.append(image2mask(_mask_canvas))
|
| 71 |
+
|
| 72 |
+
log(f"{self.NODE_NAME} Processed {len(l_masks)} mask(s).", message_type='finish')
|
| 73 |
+
return (torch.cat(ret_masks, dim=0),)
|
| 74 |
+
|
| 75 |
+
NODE_CLASS_MAPPINGS = {
|
| 76 |
+
"LayerUtility: LayerMaskTransform": LayerMaskTransform
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 80 |
+
"LayerUtility: LayerMaskTransform": "LayerUtility: LayerMaskTransform"
|
| 81 |
+
}
|
comfyui_layerstyle/py/light_leak.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
import torch
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from .imagefunc import log, tensor2pil, pil2tensor, load_light_leak_images, image_hue_offset, image_gray_offset, image_channel_merge, fit_resize_image, chop_image
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
blend_mode = 'screen'
|
| 11 |
+
|
| 12 |
+
class LightLeak:
|
| 13 |
+
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.NODE_NAME = 'LightLeak'
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def INPUT_TYPES(self):
|
| 19 |
+
light_list = ['random', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
|
| 20 |
+
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
|
| 21 |
+
'21', '22', '23', '24', '25', '26', '27', '28', '29', '30',
|
| 22 |
+
'31', '32']
|
| 23 |
+
corner_list = ['left_top', 'right_top', 'left_bottom', 'right_bottom']
|
| 24 |
+
return {
|
| 25 |
+
"required": {
|
| 26 |
+
"image": ("IMAGE", ),
|
| 27 |
+
"light": (light_list,),
|
| 28 |
+
"corner": (corner_list,),
|
| 29 |
+
"hue": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}),
|
| 30 |
+
"saturation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}),
|
| 31 |
+
"opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1})
|
| 32 |
+
},
|
| 33 |
+
"optional": {
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
RETURN_TYPES = ("IMAGE",)
|
| 38 |
+
RETURN_NAMES = ("image",)
|
| 39 |
+
FUNCTION = 'light_leak'
|
| 40 |
+
CATEGORY = '😺dzNodes/LayerFilter'
|
| 41 |
+
|
| 42 |
+
def light_leak(self, image, light, corner, hue, saturation, opacity):
|
| 43 |
+
|
| 44 |
+
ret_images = []
|
| 45 |
+
light_leak_images = load_light_leak_images()
|
| 46 |
+
if light == 'random':
|
| 47 |
+
random.seed(time.time())
|
| 48 |
+
light_index = random.randint(0,31)
|
| 49 |
+
else:
|
| 50 |
+
light_index = int(light) - 1
|
| 51 |
+
|
| 52 |
+
for i in image:
|
| 53 |
+
i = torch.unsqueeze(i, 0)
|
| 54 |
+
_canvas = tensor2pil(i).convert('RGB')
|
| 55 |
+
_light = light_leak_images[light_index]
|
| 56 |
+
if _canvas.width < _canvas.height:
|
| 57 |
+
_light = _light.transpose(Image.ROTATE_90).transpose(Image.FLIP_TOP_BOTTOM)
|
| 58 |
+
if corner == 'right_top':
|
| 59 |
+
_light = _light.transpose(Image.FLIP_LEFT_RIGHT)
|
| 60 |
+
elif corner == 'left_bottom':
|
| 61 |
+
_light = _light.transpose(Image.FLIP_TOP_BOTTOM)
|
| 62 |
+
elif corner == 'right_bottom':
|
| 63 |
+
_light = _light.transpose(Image.ROTATE_180)
|
| 64 |
+
if hue != 0 or saturation != 0:
|
| 65 |
+
_h, _s, _v = _light.convert('HSV').split()
|
| 66 |
+
if hue != 0:
|
| 67 |
+
_h = image_hue_offset(_h, hue)
|
| 68 |
+
if saturation != 0:
|
| 69 |
+
_s = image_gray_offset(_s, saturation)
|
| 70 |
+
_light = image_channel_merge((_h, _s, _v), 'HSV')
|
| 71 |
+
resize_sampler = Image.BILINEAR
|
| 72 |
+
_light = fit_resize_image(_light, _canvas.width, _canvas.height, fit='crop', resize_sampler=resize_sampler)
|
| 73 |
+
ret_image = chop_image(_canvas, _light, blend_mode=blend_mode, opacity = opacity)
|
| 74 |
+
ret_images.append(pil2tensor(ret_image))
|
| 75 |
+
|
| 76 |
+
log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish')
|
| 77 |
+
return (torch.cat(ret_images, dim=0),)
|
| 78 |
+
|
| 79 |
+
NODE_CLASS_MAPPINGS = {
|
| 80 |
+
"LayerFilter: LightLeak": LightLeak
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 84 |
+
"LayerFilter: LightLeak": "LayerFilter: LightLeak"
|
| 85 |
+
}
|
comfyui_layerstyle/py/mask_box_detect.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, gaussian_blur, mask2image
|
| 4 |
+
from .imagefunc import min_bounding_rect, max_inscribed_rect, mask_area, draw_rect
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MaskBoxDetect:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'MaskBoxDetect'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
detect_mode = ['min_bounding_rect', 'max_inscribed_rect', 'mask_area']
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"mask": ("MASK", ),
|
| 18 |
+
"detect": (detect_mode,), # 探测类型:最小外接矩形/最大内接矩形
|
| 19 |
+
"x_adjust": ("INT", {"default": 0, "min": -9999, "max": 9999, "step": 1}), # x轴修正
|
| 20 |
+
"y_adjust": ("INT", {"default": 0, "min": -9999, "max": 9999, "step": 1}), # y轴修正
|
| 21 |
+
"scale_adjust": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100, "step": 0.01}), # 比例修正
|
| 22 |
+
},
|
| 23 |
+
"optional": {
|
| 24 |
+
}
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
RETURN_TYPES = ("IMAGE", "FLOAT", "FLOAT", "INT", "INT", "INT", "INT",)
|
| 28 |
+
RETURN_NAMES = ("box_preview", "x_percent", "y_percent", "width", "height", "x", "y",)
|
| 29 |
+
FUNCTION = 'mask_box_detect'
|
| 30 |
+
CATEGORY = '😺dzNodes/LayerMask'
|
| 31 |
+
|
| 32 |
+
def mask_box_detect(self,mask, detect, x_adjust, y_adjust, scale_adjust):
|
| 33 |
+
|
| 34 |
+
if mask.dim() == 2:
|
| 35 |
+
mask = torch.unsqueeze(mask, 0)
|
| 36 |
+
|
| 37 |
+
if mask.shape[0] > 0:
|
| 38 |
+
mask = torch.unsqueeze(mask[0], 0)
|
| 39 |
+
|
| 40 |
+
_mask = mask2image(mask).convert('RGB')
|
| 41 |
+
|
| 42 |
+
_mask = gaussian_blur(_mask, 20).convert('L')
|
| 43 |
+
x = 0
|
| 44 |
+
y = 0
|
| 45 |
+
width = 0
|
| 46 |
+
height = 0
|
| 47 |
+
|
| 48 |
+
if detect == "min_bounding_rect":
|
| 49 |
+
(x, y, width, height) = min_bounding_rect(_mask)
|
| 50 |
+
elif detect == "max_inscribed_rect":
|
| 51 |
+
(x, y, width, height) = max_inscribed_rect(_mask)
|
| 52 |
+
else:
|
| 53 |
+
(x, y, width, height) = mask_area(_mask)
|
| 54 |
+
log(f"{self.NODE_NAME}: Box detected. x={x},y={y},width={width},height={height}")
|
| 55 |
+
_width = width
|
| 56 |
+
_height = height
|
| 57 |
+
if scale_adjust != 1.0:
|
| 58 |
+
_width = int(width * scale_adjust)
|
| 59 |
+
_height = int(height * scale_adjust)
|
| 60 |
+
x = x - int((_width - width) / 2)
|
| 61 |
+
y = y - int((_height - height) / 2)
|
| 62 |
+
x += x_adjust
|
| 63 |
+
y += y_adjust
|
| 64 |
+
x_percent = (x + _width / 2) / _mask.width * 100
|
| 65 |
+
y_percent = (y + _height / 2) / _mask.height * 100
|
| 66 |
+
preview_image = tensor2pil(mask).convert('RGB')
|
| 67 |
+
preview_image = draw_rect(preview_image, x - x_adjust, y - y_adjust, width, height, line_color="#F00000", line_width=int(preview_image.height / 60))
|
| 68 |
+
preview_image = draw_rect(preview_image, x, y, width, height, line_color="#00F000", line_width=int(preview_image.height / 40))
|
| 69 |
+
log(f"{self.NODE_NAME} Processed.", message_type='finish')
|
| 70 |
+
return ( pil2tensor(preview_image), round(x_percent, 2), round(y_percent, 2), _width, _height, x, y,)
|
| 71 |
+
|
| 72 |
+
NODE_CLASS_MAPPINGS = {
|
| 73 |
+
"LayerMask: MaskBoxDetect": MaskBoxDetect
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 77 |
+
"LayerMask: MaskBoxDetect": "LayerMask: MaskBoxDetect"
|
| 78 |
+
}
|
comfyui_layerstyle/py/mask_by_color.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, create_mask_from_color_tensor, mask_fix
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class MaskByColor:
|
| 9 |
+
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.NODE_NAME = 'MaskByColor'
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def INPUT_TYPES(s):
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"image": ("IMAGE", ),
|
| 18 |
+
"color": ("COLOR", {"default": "#FFFFFF"},),
|
| 19 |
+
"color_in_HEX": ("STRING", {"default": ""}),
|
| 20 |
+
"threshold": ("INT", { "default": 50, "min": 0, "max": 100, "step": 1, }),
|
| 21 |
+
"fix_gap": ("INT", {"default": 2, "min": 0, "max": 32, "step": 1}),
|
| 22 |
+
"fix_threshold": ("FLOAT", {"default": 0.75, "min": 0.01, "max": 0.99, "step": 0.01}),
|
| 23 |
+
"invert_mask": ("BOOLEAN", {"default": False}), # 反转mask
|
| 24 |
+
},
|
| 25 |
+
"optional": {
|
| 26 |
+
"mask": ("MASK",), #
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
RETURN_TYPES = ("MASK",)
|
| 31 |
+
RETURN_NAMES = ("mask",)
|
| 32 |
+
FUNCTION = "mask_by_color"
|
| 33 |
+
CATEGORY = '😺dzNodes/LayerMask'
|
| 34 |
+
|
| 35 |
+
def mask_by_color(self, image, color, color_in_HEX, threshold,
|
| 36 |
+
fix_gap, fix_threshold, invert_mask, mask=None):
|
| 37 |
+
|
| 38 |
+
if color_in_HEX != "" and color_in_HEX.startswith('#') and len(color_in_HEX) == 7:
|
| 39 |
+
color = color_in_HEX
|
| 40 |
+
|
| 41 |
+
ret_masks = []
|
| 42 |
+
l_images = []
|
| 43 |
+
l_masks = []
|
| 44 |
+
|
| 45 |
+
for l in image:
|
| 46 |
+
l_images.append(torch.unsqueeze(l, 0))
|
| 47 |
+
m = tensor2pil(l)
|
| 48 |
+
if m.mode == 'RGBA':
|
| 49 |
+
l_masks.append(m.split()[-1])
|
| 50 |
+
else:
|
| 51 |
+
l_masks.append(Image.new('L', m.size, 'white'))
|
| 52 |
+
if mask is not None:
|
| 53 |
+
if mask.dim() == 2:
|
| 54 |
+
mask = torch.unsqueeze(mask, 0)
|
| 55 |
+
l_masks = []
|
| 56 |
+
for m in mask:
|
| 57 |
+
if invert_mask:
|
| 58 |
+
m = 1 - m
|
| 59 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 60 |
+
|
| 61 |
+
for i in range(len(l_images)):
|
| 62 |
+
img = l_images[i] if i < len(l_images) else l_images[-1]
|
| 63 |
+
img = tensor2pil(img)
|
| 64 |
+
_mask = l_masks[i] if i < len(l_masks) else l_masks[-1]
|
| 65 |
+
|
| 66 |
+
mask = Image.new('L', _mask.size, 'black')
|
| 67 |
+
mask.paste(create_mask_from_color_tensor(img, color, threshold), mask=_mask)
|
| 68 |
+
mask = image2mask(mask)
|
| 69 |
+
if invert_mask:
|
| 70 |
+
mask = 1 - mask
|
| 71 |
+
if fix_gap:
|
| 72 |
+
mask = mask_fix(mask, 1, fix_gap, fix_threshold, fix_threshold)
|
| 73 |
+
ret_masks.append(mask)
|
| 74 |
+
|
| 75 |
+
return (torch.cat(ret_masks, dim=0), )
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
NODE_CLASS_MAPPINGS = {
|
| 79 |
+
"LayerMask: MaskByColor": MaskByColor
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 83 |
+
"LayerMask: MaskByColor": "LayerMask: Mask by Color"
|
| 84 |
+
}
|
comfyui_layerstyle/py/mask_edge_shrink.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from PIL import Image, ImageChops
|
| 3 |
+
from .imagefunc import log, tensor2pil, pil2tensor, image2mask, mask_invert, step_color, expand_mask, step_value, chop_image
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MaskEdgeShrink:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.NODE_NAME = 'MaskEdgeShrink'
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def INPUT_TYPES(self):
|
| 14 |
+
|
| 15 |
+
return {
|
| 16 |
+
"required": {
|
| 17 |
+
"mask": ("MASK", ), #
|
| 18 |
+
"invert_mask": ("BOOLEAN", {"default": True}), # 反转mask
|
| 19 |
+
"shrink_level": ("INT", {"default": 4, "min": 0, "max": 16, "step": 1}),
|
| 20 |
+
"soft": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),
|
| 21 |
+
"edge_shrink": ("INT", {"default": 1, "min": 0, "max": 999, "step": 1}),
|
| 22 |
+
"edge_reserve": ("INT", {"default": 25, "min": 0, "max": 100, "step": 1}), # 透明度
|
| 23 |
+
},
|
| 24 |
+
"optional": {
|
| 25 |
+
}
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
RETURN_TYPES = ("MASK",)
|
| 29 |
+
RETURN_NAMES = ("mask",)
|
| 30 |
+
FUNCTION = 'mask_edge_shrink'
|
| 31 |
+
CATEGORY = '😺dzNodes/LayerMask'
|
| 32 |
+
|
| 33 |
+
def mask_edge_shrink(self, mask, invert_mask, shrink_level, soft, edge_shrink, edge_reserve):
|
| 34 |
+
|
| 35 |
+
l_masks = []
|
| 36 |
+
ret_masks = []
|
| 37 |
+
|
| 38 |
+
if mask.dim() == 2:
|
| 39 |
+
mask = torch.unsqueeze(mask, 0)
|
| 40 |
+
|
| 41 |
+
for m in mask:
|
| 42 |
+
if invert_mask:
|
| 43 |
+
m = 1 - m
|
| 44 |
+
l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
|
| 45 |
+
|
| 46 |
+
glow_range = shrink_level * soft
|
| 47 |
+
blur = 12
|
| 48 |
+
|
| 49 |
+
for i in range(len(l_masks)):
|
| 50 |
+
_mask = l_masks[i]
|
| 51 |
+
_canvas = Image.new('RGB', size=_mask.size, color='black')
|
| 52 |
+
_layer = Image.new('RGB', size=_mask.size, color='white')
|
| 53 |
+
loop_grow = glow_range
|
| 54 |
+
inner_mask = _mask
|
| 55 |
+
for x in range(shrink_level):
|
| 56 |
+
_color = step_color('#FFFFFF', '#000000', shrink_level, x)
|
| 57 |
+
glow_mask = expand_mask(image2mask(inner_mask), -loop_grow, blur / (x+0.1)) #扩张,模糊
|
| 58 |
+
# 合成
|
| 59 |
+
color_image = Image.new("RGB", _layer.size, color=_color)
|
| 60 |
+
alpha = tensor2pil(mask_invert(glow_mask)).convert('L')
|
| 61 |
+
_glow = chop_image(_layer, color_image, 'subtract', int(step_value(1, 100, shrink_level, x)))
|
| 62 |
+
_layer.paste(_glow, mask=alpha)
|
| 63 |
+
loop_grow = loop_grow - int(glow_range / shrink_level)
|
| 64 |
+
# 合成layer
|
| 65 |
+
_edge = tensor2pil(expand_mask(image2mask(_mask), -edge_shrink, 0)).convert('RGB')
|
| 66 |
+
_layer = chop_image(_layer, _edge, 'normal', edge_reserve)
|
| 67 |
+
_layer.paste(_canvas, mask=ImageChops.invert(_mask))
|
| 68 |
+
|
| 69 |
+
ret_masks.append(image2mask(_layer))
|
| 70 |
+
|
| 71 |
+
log(f"{self.NODE_NAME} Processed {len(ret_masks)} mask(s).", message_type='finish')
|
| 72 |
+
return (torch.cat(ret_masks, dim=0),)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
NODE_CLASS_MAPPINGS = {
|
| 76 |
+
"LayerMask: MaskEdgeShrink": MaskEdgeShrink
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 80 |
+
"LayerMask: MaskEdgeShrink": "LayerMask: MaskEdgeShrink"
|
| 81 |
+
}
|