surena26 commited on
Commit
e6fc40b
·
verified ·
1 Parent(s): 614e660

Upload ComfyUI/nodes.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ComfyUI/nodes.py +1982 -0
ComfyUI/nodes.py ADDED
@@ -0,0 +1,1982 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import os
4
+ import sys
5
+ import json
6
+ import hashlib
7
+ import traceback
8
+ import math
9
+ import time
10
+ import random
11
+ import logging
12
+
13
+ from PIL import Image, ImageOps, ImageSequence, ImageFile
14
+ from PIL.PngImagePlugin import PngInfo
15
+
16
+ import numpy as np
17
+ import safetensors.torch
18
+
19
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
20
+
21
+ import comfy.diffusers_load
22
+ import comfy.samplers
23
+ import comfy.sample
24
+ import comfy.sd
25
+ import comfy.utils
26
+ import comfy.controlnet
27
+
28
+ import comfy.clip_vision
29
+
30
+ import comfy.model_management
31
+ from comfy.cli_args import args
32
+
33
+ import importlib
34
+
35
+ import folder_paths
36
+ import latent_preview
37
+ import node_helpers
38
+
39
+ def before_node_execution():
40
+ comfy.model_management.throw_exception_if_processing_interrupted()
41
+
42
+ def interrupt_processing(value=True):
43
+ comfy.model_management.interrupt_current_processing(value)
44
+
45
+ MAX_RESOLUTION=16384
46
+
47
+ class CLIPTextEncode:
48
+ @classmethod
49
+ def INPUT_TYPES(s):
50
+ return {"required": {"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", )}}
51
+ RETURN_TYPES = ("CONDITIONING",)
52
+ FUNCTION = "encode"
53
+
54
+ CATEGORY = "conditioning"
55
+
56
+ def encode(self, clip, text):
57
+ tokens = clip.tokenize(text)
58
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
59
+ return ([[cond, {"pooled_output": pooled}]], )
60
+
61
+ class ConditioningCombine:
62
+ @classmethod
63
+ def INPUT_TYPES(s):
64
+ return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
65
+ RETURN_TYPES = ("CONDITIONING",)
66
+ FUNCTION = "combine"
67
+
68
+ CATEGORY = "conditioning"
69
+
70
+ def combine(self, conditioning_1, conditioning_2):
71
+ return (conditioning_1 + conditioning_2, )
72
+
73
+ class ConditioningAverage :
74
+ @classmethod
75
+ def INPUT_TYPES(s):
76
+ return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
77
+ "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
78
+ }}
79
+ RETURN_TYPES = ("CONDITIONING",)
80
+ FUNCTION = "addWeighted"
81
+
82
+ CATEGORY = "conditioning"
83
+
84
+ def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
85
+ out = []
86
+
87
+ if len(conditioning_from) > 1:
88
+ logging.warning("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
89
+
90
+ cond_from = conditioning_from[0][0]
91
+ pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
92
+
93
+ for i in range(len(conditioning_to)):
94
+ t1 = conditioning_to[i][0]
95
+ pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
96
+ t0 = cond_from[:,:t1.shape[1]]
97
+ if t0.shape[1] < t1.shape[1]:
98
+ t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)
99
+
100
+ tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
101
+ t_to = conditioning_to[i][1].copy()
102
+ if pooled_output_from is not None and pooled_output_to is not None:
103
+ t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
104
+ elif pooled_output_from is not None:
105
+ t_to["pooled_output"] = pooled_output_from
106
+
107
+ n = [tw, t_to]
108
+ out.append(n)
109
+ return (out, )
110
+
111
+ class ConditioningConcat:
112
+ @classmethod
113
+ def INPUT_TYPES(s):
114
+ return {"required": {
115
+ "conditioning_to": ("CONDITIONING",),
116
+ "conditioning_from": ("CONDITIONING",),
117
+ }}
118
+ RETURN_TYPES = ("CONDITIONING",)
119
+ FUNCTION = "concat"
120
+
121
+ CATEGORY = "conditioning"
122
+
123
+ def concat(self, conditioning_to, conditioning_from):
124
+ out = []
125
+
126
+ if len(conditioning_from) > 1:
127
+ logging.warning("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
128
+
129
+ cond_from = conditioning_from[0][0]
130
+
131
+ for i in range(len(conditioning_to)):
132
+ t1 = conditioning_to[i][0]
133
+ tw = torch.cat((t1, cond_from),1)
134
+ n = [tw, conditioning_to[i][1].copy()]
135
+ out.append(n)
136
+
137
+ return (out, )
138
+
139
+ class ConditioningSetArea:
140
+ @classmethod
141
+ def INPUT_TYPES(s):
142
+ return {"required": {"conditioning": ("CONDITIONING", ),
143
+ "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
144
+ "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
145
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
146
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
147
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
148
+ }}
149
+ RETURN_TYPES = ("CONDITIONING",)
150
+ FUNCTION = "append"
151
+
152
+ CATEGORY = "conditioning"
153
+
154
+ def append(self, conditioning, width, height, x, y, strength):
155
+ c = node_helpers.conditioning_set_values(conditioning, {"area": (height // 8, width // 8, y // 8, x // 8),
156
+ "strength": strength,
157
+ "set_area_to_bounds": False})
158
+ return (c, )
159
+
160
+ class ConditioningSetAreaPercentage:
161
+ @classmethod
162
+ def INPUT_TYPES(s):
163
+ return {"required": {"conditioning": ("CONDITIONING", ),
164
+ "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
165
+ "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
166
+ "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
167
+ "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
168
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
169
+ }}
170
+ RETURN_TYPES = ("CONDITIONING",)
171
+ FUNCTION = "append"
172
+
173
+ CATEGORY = "conditioning"
174
+
175
+ def append(self, conditioning, width, height, x, y, strength):
176
+ c = node_helpers.conditioning_set_values(conditioning, {"area": ("percentage", height, width, y, x),
177
+ "strength": strength,
178
+ "set_area_to_bounds": False})
179
+ return (c, )
180
+
181
+ class ConditioningSetAreaStrength:
182
+ @classmethod
183
+ def INPUT_TYPES(s):
184
+ return {"required": {"conditioning": ("CONDITIONING", ),
185
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
186
+ }}
187
+ RETURN_TYPES = ("CONDITIONING",)
188
+ FUNCTION = "append"
189
+
190
+ CATEGORY = "conditioning"
191
+
192
+ def append(self, conditioning, strength):
193
+ c = node_helpers.conditioning_set_values(conditioning, {"strength": strength})
194
+ return (c, )
195
+
196
+
197
+ class ConditioningSetMask:
198
+ @classmethod
199
+ def INPUT_TYPES(s):
200
+ return {"required": {"conditioning": ("CONDITIONING", ),
201
+ "mask": ("MASK", ),
202
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
203
+ "set_cond_area": (["default", "mask bounds"],),
204
+ }}
205
+ RETURN_TYPES = ("CONDITIONING",)
206
+ FUNCTION = "append"
207
+
208
+ CATEGORY = "conditioning"
209
+
210
+ def append(self, conditioning, mask, set_cond_area, strength):
211
+ set_area_to_bounds = False
212
+ if set_cond_area != "default":
213
+ set_area_to_bounds = True
214
+ if len(mask.shape) < 3:
215
+ mask = mask.unsqueeze(0)
216
+
217
+ c = node_helpers.conditioning_set_values(conditioning, {"mask": mask,
218
+ "set_area_to_bounds": set_area_to_bounds,
219
+ "mask_strength": strength})
220
+ return (c, )
221
+
222
+ class ConditioningZeroOut:
223
+ @classmethod
224
+ def INPUT_TYPES(s):
225
+ return {"required": {"conditioning": ("CONDITIONING", )}}
226
+ RETURN_TYPES = ("CONDITIONING",)
227
+ FUNCTION = "zero_out"
228
+
229
+ CATEGORY = "advanced/conditioning"
230
+
231
+ def zero_out(self, conditioning):
232
+ c = []
233
+ for t in conditioning:
234
+ d = t[1].copy()
235
+ if "pooled_output" in d:
236
+ d["pooled_output"] = torch.zeros_like(d["pooled_output"])
237
+ n = [torch.zeros_like(t[0]), d]
238
+ c.append(n)
239
+ return (c, )
240
+
241
+ class ConditioningSetTimestepRange:
242
+ @classmethod
243
+ def INPUT_TYPES(s):
244
+ return {"required": {"conditioning": ("CONDITIONING", ),
245
+ "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
246
+ "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
247
+ }}
248
+ RETURN_TYPES = ("CONDITIONING",)
249
+ FUNCTION = "set_range"
250
+
251
+ CATEGORY = "advanced/conditioning"
252
+
253
+ def set_range(self, conditioning, start, end):
254
+ c = node_helpers.conditioning_set_values(conditioning, {"start_percent": start,
255
+ "end_percent": end})
256
+ return (c, )
257
+
258
+ class VAEDecode:
259
+ @classmethod
260
+ def INPUT_TYPES(s):
261
+ return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
262
+ RETURN_TYPES = ("IMAGE",)
263
+ FUNCTION = "decode"
264
+
265
+ CATEGORY = "latent"
266
+
267
+ def decode(self, vae, samples):
268
+ return (vae.decode(samples["samples"]), )
269
+
270
+ class VAEDecodeTiled:
271
+ @classmethod
272
+ def INPUT_TYPES(s):
273
+ return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
274
+ "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
275
+ }}
276
+ RETURN_TYPES = ("IMAGE",)
277
+ FUNCTION = "decode"
278
+
279
+ CATEGORY = "_for_testing"
280
+
281
+ def decode(self, vae, samples, tile_size):
282
+ return (vae.decode_tiled(samples["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ), )
283
+
284
+ class VAEEncode:
285
+ @classmethod
286
+ def INPUT_TYPES(s):
287
+ return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
288
+ RETURN_TYPES = ("LATENT",)
289
+ FUNCTION = "encode"
290
+
291
+ CATEGORY = "latent"
292
+
293
+ def encode(self, vae, pixels):
294
+ t = vae.encode(pixels[:,:,:,:3])
295
+ return ({"samples":t}, )
296
+
297
+ class VAEEncodeTiled:
298
+ @classmethod
299
+ def INPUT_TYPES(s):
300
+ return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
301
+ "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
302
+ }}
303
+ RETURN_TYPES = ("LATENT",)
304
+ FUNCTION = "encode"
305
+
306
+ CATEGORY = "_for_testing"
307
+
308
+ def encode(self, vae, pixels, tile_size):
309
+ t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, )
310
+ return ({"samples":t}, )
311
+
312
+ class VAEEncodeForInpaint:
313
+ @classmethod
314
+ def INPUT_TYPES(s):
315
+ return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
316
+ RETURN_TYPES = ("LATENT",)
317
+ FUNCTION = "encode"
318
+
319
+ CATEGORY = "latent/inpaint"
320
+
321
+ def encode(self, vae, pixels, mask, grow_mask_by=6):
322
+ x = (pixels.shape[1] // vae.downscale_ratio) * vae.downscale_ratio
323
+ y = (pixels.shape[2] // vae.downscale_ratio) * vae.downscale_ratio
324
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
325
+
326
+ pixels = pixels.clone()
327
+ if pixels.shape[1] != x or pixels.shape[2] != y:
328
+ x_offset = (pixels.shape[1] % vae.downscale_ratio) // 2
329
+ y_offset = (pixels.shape[2] % vae.downscale_ratio) // 2
330
+ pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
331
+ mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
332
+
333
+ #grow mask by a few pixels to keep things seamless in latent space
334
+ if grow_mask_by == 0:
335
+ mask_erosion = mask
336
+ else:
337
+ kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
338
+ padding = math.ceil((grow_mask_by - 1) / 2)
339
+
340
+ mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)
341
+
342
+ m = (1.0 - mask.round()).squeeze(1)
343
+ for i in range(3):
344
+ pixels[:,:,:,i] -= 0.5
345
+ pixels[:,:,:,i] *= m
346
+ pixels[:,:,:,i] += 0.5
347
+ t = vae.encode(pixels)
348
+
349
+ return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
350
+
351
+
352
+ class InpaintModelConditioning:
353
+ @classmethod
354
+ def INPUT_TYPES(s):
355
+ return {"required": {"positive": ("CONDITIONING", ),
356
+ "negative": ("CONDITIONING", ),
357
+ "vae": ("VAE", ),
358
+ "pixels": ("IMAGE", ),
359
+ "mask": ("MASK", ),
360
+ }}
361
+
362
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
363
+ RETURN_NAMES = ("positive", "negative", "latent")
364
+ FUNCTION = "encode"
365
+
366
+ CATEGORY = "conditioning/inpaint"
367
+
368
+ def encode(self, positive, negative, pixels, vae, mask):
369
+ x = (pixels.shape[1] // 8) * 8
370
+ y = (pixels.shape[2] // 8) * 8
371
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
372
+
373
+ orig_pixels = pixels
374
+ pixels = orig_pixels.clone()
375
+ if pixels.shape[1] != x or pixels.shape[2] != y:
376
+ x_offset = (pixels.shape[1] % 8) // 2
377
+ y_offset = (pixels.shape[2] % 8) // 2
378
+ pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
379
+ mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
380
+
381
+ m = (1.0 - mask.round()).squeeze(1)
382
+ for i in range(3):
383
+ pixels[:,:,:,i] -= 0.5
384
+ pixels[:,:,:,i] *= m
385
+ pixels[:,:,:,i] += 0.5
386
+ concat_latent = vae.encode(pixels)
387
+ orig_latent = vae.encode(orig_pixels)
388
+
389
+ out_latent = {}
390
+
391
+ out_latent["samples"] = orig_latent
392
+ out_latent["noise_mask"] = mask
393
+
394
+ out = []
395
+ for conditioning in [positive, negative]:
396
+ c = node_helpers.conditioning_set_values(conditioning, {"concat_latent_image": concat_latent,
397
+ "concat_mask": mask})
398
+ out.append(c)
399
+ return (out[0], out[1], out_latent)
400
+
401
+
402
+ class SaveLatent:
403
+ def __init__(self):
404
+ self.output_dir = folder_paths.get_output_directory()
405
+
406
+ @classmethod
407
+ def INPUT_TYPES(s):
408
+ return {"required": { "samples": ("LATENT", ),
409
+ "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
410
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
411
+ }
412
+ RETURN_TYPES = ()
413
+ FUNCTION = "save"
414
+
415
+ OUTPUT_NODE = True
416
+
417
+ CATEGORY = "_for_testing"
418
+
419
+ def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
420
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
421
+
422
+ # support save metadata for latent sharing
423
+ prompt_info = ""
424
+ if prompt is not None:
425
+ prompt_info = json.dumps(prompt)
426
+
427
+ metadata = None
428
+ if not args.disable_metadata:
429
+ metadata = {"prompt": prompt_info}
430
+ if extra_pnginfo is not None:
431
+ for x in extra_pnginfo:
432
+ metadata[x] = json.dumps(extra_pnginfo[x])
433
+
434
+ file = f"{filename}_{counter:05}_.latent"
435
+
436
+ results = list()
437
+ results.append({
438
+ "filename": file,
439
+ "subfolder": subfolder,
440
+ "type": "output"
441
+ })
442
+
443
+ file = os.path.join(full_output_folder, file)
444
+
445
+ output = {}
446
+ output["latent_tensor"] = samples["samples"]
447
+ output["latent_format_version_0"] = torch.tensor([])
448
+
449
+ comfy.utils.save_torch_file(output, file, metadata=metadata)
450
+ return { "ui": { "latents": results } }
451
+
452
+
453
+ class LoadLatent:
454
+ @classmethod
455
+ def INPUT_TYPES(s):
456
+ input_dir = folder_paths.get_input_directory()
457
+ files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
458
+ return {"required": {"latent": [sorted(files), ]}, }
459
+
460
+ CATEGORY = "_for_testing"
461
+
462
+ RETURN_TYPES = ("LATENT", )
463
+ FUNCTION = "load"
464
+
465
+ def load(self, latent):
466
+ latent_path = folder_paths.get_annotated_filepath(latent)
467
+ latent = safetensors.torch.load_file(latent_path, device="cpu")
468
+ multiplier = 1.0
469
+ if "latent_format_version_0" not in latent:
470
+ multiplier = 1.0 / 0.18215
471
+ samples = {"samples": latent["latent_tensor"].float() * multiplier}
472
+ return (samples, )
473
+
474
+ @classmethod
475
+ def IS_CHANGED(s, latent):
476
+ image_path = folder_paths.get_annotated_filepath(latent)
477
+ m = hashlib.sha256()
478
+ with open(image_path, 'rb') as f:
479
+ m.update(f.read())
480
+ return m.digest().hex()
481
+
482
+ @classmethod
483
+ def VALIDATE_INPUTS(s, latent):
484
+ if not folder_paths.exists_annotated_filepath(latent):
485
+ return "Invalid latent file: {}".format(latent)
486
+ return True
487
+
488
+
489
+ class CheckpointLoader:
490
+ @classmethod
491
+ def INPUT_TYPES(s):
492
+ return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
493
+ "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
494
+ RETURN_TYPES = ("MODEL", "CLIP", "VAE")
495
+ FUNCTION = "load_checkpoint"
496
+
497
+ CATEGORY = "advanced/loaders"
498
+
499
+ def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
500
+ config_path = folder_paths.get_full_path("configs", config_name)
501
+ ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
502
+ return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
503
+
504
+ class CheckpointLoaderSimple:
505
+ @classmethod
506
+ def INPUT_TYPES(s):
507
+ return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
508
+ }}
509
+ RETURN_TYPES = ("MODEL", "CLIP", "VAE")
510
+ FUNCTION = "load_checkpoint"
511
+
512
+ CATEGORY = "loaders"
513
+
514
+ def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
515
+ ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
516
+ out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
517
+ return out[:3]
518
+
519
+ class DiffusersLoader:
520
+ @classmethod
521
+ def INPUT_TYPES(cls):
522
+ paths = []
523
+ for search_path in folder_paths.get_folder_paths("diffusers"):
524
+ if os.path.exists(search_path):
525
+ for root, subdir, files in os.walk(search_path, followlinks=True):
526
+ if "model_index.json" in files:
527
+ paths.append(os.path.relpath(root, start=search_path))
528
+
529
+ return {"required": {"model_path": (paths,), }}
530
+ RETURN_TYPES = ("MODEL", "CLIP", "VAE")
531
+ FUNCTION = "load_checkpoint"
532
+
533
+ CATEGORY = "advanced/loaders/deprecated"
534
+
535
+ def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
536
+ for search_path in folder_paths.get_folder_paths("diffusers"):
537
+ if os.path.exists(search_path):
538
+ path = os.path.join(search_path, model_path)
539
+ if os.path.exists(path):
540
+ model_path = path
541
+ break
542
+
543
+ return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
544
+
545
+
546
+ class unCLIPCheckpointLoader:
547
+ @classmethod
548
+ def INPUT_TYPES(s):
549
+ return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
550
+ }}
551
+ RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
552
+ FUNCTION = "load_checkpoint"
553
+
554
+ CATEGORY = "loaders"
555
+
556
+ def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
557
+ ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
558
+ out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
559
+ return out
560
+
561
+ class CLIPSetLastLayer:
562
+ @classmethod
563
+ def INPUT_TYPES(s):
564
+ return {"required": { "clip": ("CLIP", ),
565
+ "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
566
+ }}
567
+ RETURN_TYPES = ("CLIP",)
568
+ FUNCTION = "set_last_layer"
569
+
570
+ CATEGORY = "conditioning"
571
+
572
+ def set_last_layer(self, clip, stop_at_clip_layer):
573
+ clip = clip.clone()
574
+ clip.clip_layer(stop_at_clip_layer)
575
+ return (clip,)
576
+
577
+ class LoraLoader:
578
+ def __init__(self):
579
+ self.loaded_lora = None
580
+
581
+ @classmethod
582
+ def INPUT_TYPES(s):
583
+ return {"required": { "model": ("MODEL",),
584
+ "clip": ("CLIP", ),
585
+ "lora_name": (folder_paths.get_filename_list("loras"), ),
586
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
587
+ "strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
588
+ }}
589
+ RETURN_TYPES = ("MODEL", "CLIP")
590
+ FUNCTION = "load_lora"
591
+
592
+ CATEGORY = "loaders"
593
+
594
+ def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
595
+ if strength_model == 0 and strength_clip == 0:
596
+ return (model, clip)
597
+
598
+ lora_path = folder_paths.get_full_path("loras", lora_name)
599
+ lora = None
600
+ if self.loaded_lora is not None:
601
+ if self.loaded_lora[0] == lora_path:
602
+ lora = self.loaded_lora[1]
603
+ else:
604
+ temp = self.loaded_lora
605
+ self.loaded_lora = None
606
+ del temp
607
+
608
+ if lora is None:
609
+ lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
610
+ self.loaded_lora = (lora_path, lora)
611
+
612
+ model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
613
+ return (model_lora, clip_lora)
614
+
615
+ class LoraLoaderModelOnly(LoraLoader):
616
+ @classmethod
617
+ def INPUT_TYPES(s):
618
+ return {"required": { "model": ("MODEL",),
619
+ "lora_name": (folder_paths.get_filename_list("loras"), ),
620
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
621
+ }}
622
+ RETURN_TYPES = ("MODEL",)
623
+ FUNCTION = "load_lora_model_only"
624
+
625
+ def load_lora_model_only(self, model, lora_name, strength_model):
626
+ return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)
627
+
628
+ class VAELoader:
629
+ @staticmethod
630
+ def vae_list():
631
+ vaes = folder_paths.get_filename_list("vae")
632
+ approx_vaes = folder_paths.get_filename_list("vae_approx")
633
+ sdxl_taesd_enc = False
634
+ sdxl_taesd_dec = False
635
+ sd1_taesd_enc = False
636
+ sd1_taesd_dec = False
637
+
638
+ for v in approx_vaes:
639
+ if v.startswith("taesd_decoder."):
640
+ sd1_taesd_dec = True
641
+ elif v.startswith("taesd_encoder."):
642
+ sd1_taesd_enc = True
643
+ elif v.startswith("taesdxl_decoder."):
644
+ sdxl_taesd_dec = True
645
+ elif v.startswith("taesdxl_encoder."):
646
+ sdxl_taesd_enc = True
647
+ if sd1_taesd_dec and sd1_taesd_enc:
648
+ vaes.append("taesd")
649
+ if sdxl_taesd_dec and sdxl_taesd_enc:
650
+ vaes.append("taesdxl")
651
+ return vaes
652
+
653
+ @staticmethod
654
+ def load_taesd(name):
655
+ sd = {}
656
+ approx_vaes = folder_paths.get_filename_list("vae_approx")
657
+
658
+ encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
659
+ decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))
660
+
661
+ enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
662
+ for k in enc:
663
+ sd["taesd_encoder.{}".format(k)] = enc[k]
664
+
665
+ dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
666
+ for k in dec:
667
+ sd["taesd_decoder.{}".format(k)] = dec[k]
668
+
669
+ if name == "taesd":
670
+ sd["vae_scale"] = torch.tensor(0.18215)
671
+ elif name == "taesdxl":
672
+ sd["vae_scale"] = torch.tensor(0.13025)
673
+ return sd
674
+
675
+ @classmethod
676
+ def INPUT_TYPES(s):
677
+ return {"required": { "vae_name": (s.vae_list(), )}}
678
+ RETURN_TYPES = ("VAE",)
679
+ FUNCTION = "load_vae"
680
+
681
+ CATEGORY = "loaders"
682
+
683
+ #TODO: scale factor?
684
+ def load_vae(self, vae_name):
685
+ if vae_name in ["taesd", "taesdxl"]:
686
+ sd = self.load_taesd(vae_name)
687
+ else:
688
+ vae_path = folder_paths.get_full_path("vae", vae_name)
689
+ sd = comfy.utils.load_torch_file(vae_path)
690
+ vae = comfy.sd.VAE(sd=sd)
691
+ return (vae,)
692
+
693
+ class ControlNetLoader:
694
+ @classmethod
695
+ def INPUT_TYPES(s):
696
+ return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
697
+
698
+ RETURN_TYPES = ("CONTROL_NET",)
699
+ FUNCTION = "load_controlnet"
700
+
701
+ CATEGORY = "loaders"
702
+
703
+ def load_controlnet(self, control_net_name):
704
+ controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
705
+ controlnet = comfy.controlnet.load_controlnet(controlnet_path)
706
+ return (controlnet,)
707
+
708
+ class DiffControlNetLoader:
709
+ @classmethod
710
+ def INPUT_TYPES(s):
711
+ return {"required": { "model": ("MODEL",),
712
+ "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
713
+
714
+ RETURN_TYPES = ("CONTROL_NET",)
715
+ FUNCTION = "load_controlnet"
716
+
717
+ CATEGORY = "loaders"
718
+
719
+ def load_controlnet(self, model, control_net_name):
720
+ controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
721
+ controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
722
+ return (controlnet,)
723
+
724
+
725
+ class ControlNetApply:
726
+ @classmethod
727
+ def INPUT_TYPES(s):
728
+ return {"required": {"conditioning": ("CONDITIONING", ),
729
+ "control_net": ("CONTROL_NET", ),
730
+ "image": ("IMAGE", ),
731
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
732
+ }}
733
+ RETURN_TYPES = ("CONDITIONING",)
734
+ FUNCTION = "apply_controlnet"
735
+
736
+ CATEGORY = "conditioning"
737
+
738
+ def apply_controlnet(self, conditioning, control_net, image, strength):
739
+ if strength == 0:
740
+ return (conditioning, )
741
+
742
+ c = []
743
+ control_hint = image.movedim(-1,1)
744
+ for t in conditioning:
745
+ n = [t[0], t[1].copy()]
746
+ c_net = control_net.copy().set_cond_hint(control_hint, strength)
747
+ if 'control' in t[1]:
748
+ c_net.set_previous_controlnet(t[1]['control'])
749
+ n[1]['control'] = c_net
750
+ n[1]['control_apply_to_uncond'] = True
751
+ c.append(n)
752
+ return (c, )
753
+
754
+
755
+ class ControlNetApplyAdvanced:
756
+ @classmethod
757
+ def INPUT_TYPES(s):
758
+ return {"required": {"positive": ("CONDITIONING", ),
759
+ "negative": ("CONDITIONING", ),
760
+ "control_net": ("CONTROL_NET", ),
761
+ "image": ("IMAGE", ),
762
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
763
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
764
+ "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
765
+ }}
766
+
767
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING")
768
+ RETURN_NAMES = ("positive", "negative")
769
+ FUNCTION = "apply_controlnet"
770
+
771
+ CATEGORY = "conditioning"
772
+
773
+ def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent):
774
+ if strength == 0:
775
+ return (positive, negative)
776
+
777
+ control_hint = image.movedim(-1,1)
778
+ cnets = {}
779
+
780
+ out = []
781
+ for conditioning in [positive, negative]:
782
+ c = []
783
+ for t in conditioning:
784
+ d = t[1].copy()
785
+
786
+ prev_cnet = d.get('control', None)
787
+ if prev_cnet in cnets:
788
+ c_net = cnets[prev_cnet]
789
+ else:
790
+ c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent))
791
+ c_net.set_previous_controlnet(prev_cnet)
792
+ cnets[prev_cnet] = c_net
793
+
794
+ d['control'] = c_net
795
+ d['control_apply_to_uncond'] = False
796
+ n = [t[0], d]
797
+ c.append(n)
798
+ out.append(c)
799
+ return (out[0], out[1])
800
+
801
+
802
+ class UNETLoader:
803
+ @classmethod
804
+ def INPUT_TYPES(s):
805
+ return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
806
+ }}
807
+ RETURN_TYPES = ("MODEL",)
808
+ FUNCTION = "load_unet"
809
+
810
+ CATEGORY = "advanced/loaders"
811
+
812
+ def load_unet(self, unet_name):
813
+ unet_path = folder_paths.get_full_path("unet", unet_name)
814
+ model = comfy.sd.load_unet(unet_path)
815
+ return (model,)
816
+
817
+ class CLIPLoader:
818
+ @classmethod
819
+ def INPUT_TYPES(s):
820
+ return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
821
+ "type": (["stable_diffusion", "stable_cascade"], ),
822
+ }}
823
+ RETURN_TYPES = ("CLIP",)
824
+ FUNCTION = "load_clip"
825
+
826
+ CATEGORY = "advanced/loaders"
827
+
828
+ def load_clip(self, clip_name, type="stable_diffusion"):
829
+ clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
830
+ if type == "stable_cascade":
831
+ clip_type = comfy.sd.CLIPType.STABLE_CASCADE
832
+
833
+ clip_path = folder_paths.get_full_path("clip", clip_name)
834
+ clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
835
+ return (clip,)
836
+
837
+ class DualCLIPLoader:
838
+ @classmethod
839
+ def INPUT_TYPES(s):
840
+ return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
841
+ }}
842
+ RETURN_TYPES = ("CLIP",)
843
+ FUNCTION = "load_clip"
844
+
845
+ CATEGORY = "advanced/loaders"
846
+
847
+ def load_clip(self, clip_name1, clip_name2):
848
+ clip_path1 = folder_paths.get_full_path("clip", clip_name1)
849
+ clip_path2 = folder_paths.get_full_path("clip", clip_name2)
850
+ clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
851
+ return (clip,)
852
+
853
+ class CLIPVisionLoader:
854
+ @classmethod
855
+ def INPUT_TYPES(s):
856
+ return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
857
+ }}
858
+ RETURN_TYPES = ("CLIP_VISION",)
859
+ FUNCTION = "load_clip"
860
+
861
+ CATEGORY = "loaders"
862
+
863
+ def load_clip(self, clip_name):
864
+ clip_path = folder_paths.get_full_path("clip_vision", clip_name)
865
+ clip_vision = comfy.clip_vision.load(clip_path)
866
+ return (clip_vision,)
867
+
868
+ class CLIPVisionEncode:
869
+ @classmethod
870
+ def INPUT_TYPES(s):
871
+ return {"required": { "clip_vision": ("CLIP_VISION",),
872
+ "image": ("IMAGE",)
873
+ }}
874
+ RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
875
+ FUNCTION = "encode"
876
+
877
+ CATEGORY = "conditioning"
878
+
879
+ def encode(self, clip_vision, image):
880
+ output = clip_vision.encode_image(image)
881
+ return (output,)
882
+
883
+ class StyleModelLoader:
884
+ @classmethod
885
+ def INPUT_TYPES(s):
886
+ return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
887
+
888
+ RETURN_TYPES = ("STYLE_MODEL",)
889
+ FUNCTION = "load_style_model"
890
+
891
+ CATEGORY = "loaders"
892
+
893
+ def load_style_model(self, style_model_name):
894
+ style_model_path = folder_paths.get_full_path("style_models", style_model_name)
895
+ style_model = comfy.sd.load_style_model(style_model_path)
896
+ return (style_model,)
897
+
898
+
899
+ class StyleModelApply:
900
+ @classmethod
901
+ def INPUT_TYPES(s):
902
+ return {"required": {"conditioning": ("CONDITIONING", ),
903
+ "style_model": ("STYLE_MODEL", ),
904
+ "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
905
+ }}
906
+ RETURN_TYPES = ("CONDITIONING",)
907
+ FUNCTION = "apply_stylemodel"
908
+
909
+ CATEGORY = "conditioning/style_model"
910
+
911
+ def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
912
+ cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
913
+ c = []
914
+ for t in conditioning:
915
+ n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
916
+ c.append(n)
917
+ return (c, )
918
+
919
+ class unCLIPConditioning:
920
+ @classmethod
921
+ def INPUT_TYPES(s):
922
+ return {"required": {"conditioning": ("CONDITIONING", ),
923
+ "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
924
+ "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
925
+ "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
926
+ }}
927
+ RETURN_TYPES = ("CONDITIONING",)
928
+ FUNCTION = "apply_adm"
929
+
930
+ CATEGORY = "conditioning"
931
+
932
+ def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
933
+ if strength == 0:
934
+ return (conditioning, )
935
+
936
+ c = []
937
+ for t in conditioning:
938
+ o = t[1].copy()
939
+ x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
940
+ if "unclip_conditioning" in o:
941
+ o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
942
+ else:
943
+ o["unclip_conditioning"] = [x]
944
+ n = [t[0], o]
945
+ c.append(n)
946
+ return (c, )
947
+
948
+ class GLIGENLoader:
949
+ @classmethod
950
+ def INPUT_TYPES(s):
951
+ return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}
952
+
953
+ RETURN_TYPES = ("GLIGEN",)
954
+ FUNCTION = "load_gligen"
955
+
956
+ CATEGORY = "loaders"
957
+
958
+ def load_gligen(self, gligen_name):
959
+ gligen_path = folder_paths.get_full_path("gligen", gligen_name)
960
+ gligen = comfy.sd.load_gligen(gligen_path)
961
+ return (gligen,)
962
+
963
+ class GLIGENTextBoxApply:
964
+ @classmethod
965
+ def INPUT_TYPES(s):
966
+ return {"required": {"conditioning_to": ("CONDITIONING", ),
967
+ "clip": ("CLIP", ),
968
+ "gligen_textbox_model": ("GLIGEN", ),
969
+ "text": ("STRING", {"multiline": True, "dynamicPrompts": True}),
970
+ "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
971
+ "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
972
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
973
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
974
+ }}
975
+ RETURN_TYPES = ("CONDITIONING",)
976
+ FUNCTION = "append"
977
+
978
+ CATEGORY = "conditioning/gligen"
979
+
980
+ def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
981
+ c = []
982
+ cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled="unprojected")
983
+ for t in conditioning_to:
984
+ n = [t[0], t[1].copy()]
985
+ position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
986
+ prev = []
987
+ if "gligen" in n[1]:
988
+ prev = n[1]['gligen'][2]
989
+
990
+ n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
991
+ c.append(n)
992
+ return (c, )
993
+
994
+ class EmptyLatentImage:
995
+ def __init__(self):
996
+ self.device = comfy.model_management.intermediate_device()
997
+
998
+ @classmethod
999
+ def INPUT_TYPES(s):
1000
+ return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
1001
+ "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
1002
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
1003
+ RETURN_TYPES = ("LATENT",)
1004
+ FUNCTION = "generate"
1005
+
1006
+ CATEGORY = "latent"
1007
+
1008
+ def generate(self, width, height, batch_size=1):
1009
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
1010
+ return ({"samples":latent}, )
1011
+
1012
+
1013
+ class LatentFromBatch:
1014
+ @classmethod
1015
+ def INPUT_TYPES(s):
1016
+ return {"required": { "samples": ("LATENT",),
1017
+ "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
1018
+ "length": ("INT", {"default": 1, "min": 1, "max": 64}),
1019
+ }}
1020
+ RETURN_TYPES = ("LATENT",)
1021
+ FUNCTION = "frombatch"
1022
+
1023
+ CATEGORY = "latent/batch"
1024
+
1025
+ def frombatch(self, samples, batch_index, length):
1026
+ s = samples.copy()
1027
+ s_in = samples["samples"]
1028
+ batch_index = min(s_in.shape[0] - 1, batch_index)
1029
+ length = min(s_in.shape[0] - batch_index, length)
1030
+ s["samples"] = s_in[batch_index:batch_index + length].clone()
1031
+ if "noise_mask" in samples:
1032
+ masks = samples["noise_mask"]
1033
+ if masks.shape[0] == 1:
1034
+ s["noise_mask"] = masks.clone()
1035
+ else:
1036
+ if masks.shape[0] < s_in.shape[0]:
1037
+ masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
1038
+ s["noise_mask"] = masks[batch_index:batch_index + length].clone()
1039
+ if "batch_index" not in s:
1040
+ s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
1041
+ else:
1042
+ s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
1043
+ return (s,)
1044
+
1045
+ class RepeatLatentBatch:
1046
+ @classmethod
1047
+ def INPUT_TYPES(s):
1048
+ return {"required": { "samples": ("LATENT",),
1049
+ "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
1050
+ }}
1051
+ RETURN_TYPES = ("LATENT",)
1052
+ FUNCTION = "repeat"
1053
+
1054
+ CATEGORY = "latent/batch"
1055
+
1056
+ def repeat(self, samples, amount):
1057
+ s = samples.copy()
1058
+ s_in = samples["samples"]
1059
+
1060
+ s["samples"] = s_in.repeat((amount, 1,1,1))
1061
+ if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
1062
+ masks = samples["noise_mask"]
1063
+ if masks.shape[0] < s_in.shape[0]:
1064
+ masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
1065
+ s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
1066
+ if "batch_index" in s:
1067
+ offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
1068
+ s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
1069
+ return (s,)
1070
+
1071
+ class LatentUpscale:
1072
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1073
+ crop_methods = ["disabled", "center"]
1074
+
1075
+ @classmethod
1076
+ def INPUT_TYPES(s):
1077
+ return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1078
+ "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1079
+ "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1080
+ "crop": (s.crop_methods,)}}
1081
+ RETURN_TYPES = ("LATENT",)
1082
+ FUNCTION = "upscale"
1083
+
1084
+ CATEGORY = "latent"
1085
+
1086
+ def upscale(self, samples, upscale_method, width, height, crop):
1087
+ if width == 0 and height == 0:
1088
+ s = samples
1089
+ else:
1090
+ s = samples.copy()
1091
+
1092
+ if width == 0:
1093
+ height = max(64, height)
1094
+ width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
1095
+ elif height == 0:
1096
+ width = max(64, width)
1097
+ height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
1098
+ else:
1099
+ width = max(64, width)
1100
+ height = max(64, height)
1101
+
1102
+ s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
1103
+ return (s,)
1104
+
1105
+ class LatentUpscaleBy:
1106
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1107
+
1108
+ @classmethod
1109
+ def INPUT_TYPES(s):
1110
+ return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1111
+ "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
1112
+ RETURN_TYPES = ("LATENT",)
1113
+ FUNCTION = "upscale"
1114
+
1115
+ CATEGORY = "latent"
1116
+
1117
+ def upscale(self, samples, upscale_method, scale_by):
1118
+ s = samples.copy()
1119
+ width = round(samples["samples"].shape[3] * scale_by)
1120
+ height = round(samples["samples"].shape[2] * scale_by)
1121
+ s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
1122
+ return (s,)
1123
+
1124
+ class LatentRotate:
1125
+ @classmethod
1126
+ def INPUT_TYPES(s):
1127
+ return {"required": { "samples": ("LATENT",),
1128
+ "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
1129
+ }}
1130
+ RETURN_TYPES = ("LATENT",)
1131
+ FUNCTION = "rotate"
1132
+
1133
+ CATEGORY = "latent/transform"
1134
+
1135
+ def rotate(self, samples, rotation):
1136
+ s = samples.copy()
1137
+ rotate_by = 0
1138
+ if rotation.startswith("90"):
1139
+ rotate_by = 1
1140
+ elif rotation.startswith("180"):
1141
+ rotate_by = 2
1142
+ elif rotation.startswith("270"):
1143
+ rotate_by = 3
1144
+
1145
+ s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
1146
+ return (s,)
1147
+
1148
+ class LatentFlip:
1149
+ @classmethod
1150
+ def INPUT_TYPES(s):
1151
+ return {"required": { "samples": ("LATENT",),
1152
+ "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
1153
+ }}
1154
+ RETURN_TYPES = ("LATENT",)
1155
+ FUNCTION = "flip"
1156
+
1157
+ CATEGORY = "latent/transform"
1158
+
1159
+ def flip(self, samples, flip_method):
1160
+ s = samples.copy()
1161
+ if flip_method.startswith("x"):
1162
+ s["samples"] = torch.flip(samples["samples"], dims=[2])
1163
+ elif flip_method.startswith("y"):
1164
+ s["samples"] = torch.flip(samples["samples"], dims=[3])
1165
+
1166
+ return (s,)
1167
+
1168
+ class LatentComposite:
1169
+ @classmethod
1170
+ def INPUT_TYPES(s):
1171
+ return {"required": { "samples_to": ("LATENT",),
1172
+ "samples_from": ("LATENT",),
1173
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1174
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1175
+ "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1176
+ }}
1177
+ RETURN_TYPES = ("LATENT",)
1178
+ FUNCTION = "composite"
1179
+
1180
+ CATEGORY = "latent"
1181
+
1182
+ def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
1183
+ x = x // 8
1184
+ y = y // 8
1185
+ feather = feather // 8
1186
+ samples_out = samples_to.copy()
1187
+ s = samples_to["samples"].clone()
1188
+ samples_to = samples_to["samples"]
1189
+ samples_from = samples_from["samples"]
1190
+ if feather == 0:
1191
+ s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
1192
+ else:
1193
+ samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
1194
+ mask = torch.ones_like(samples_from)
1195
+ for t in range(feather):
1196
+ if y != 0:
1197
+ mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))
1198
+
1199
+ if y + samples_from.shape[2] < samples_to.shape[2]:
1200
+ mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
1201
+ if x != 0:
1202
+ mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
1203
+ if x + samples_from.shape[3] < samples_to.shape[3]:
1204
+ mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
1205
+ rev_mask = torch.ones_like(mask) - mask
1206
+ s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
1207
+ samples_out["samples"] = s
1208
+ return (samples_out,)
1209
+
1210
+ class LatentBlend:
1211
+ @classmethod
1212
+ def INPUT_TYPES(s):
1213
+ return {"required": {
1214
+ "samples1": ("LATENT",),
1215
+ "samples2": ("LATENT",),
1216
+ "blend_factor": ("FLOAT", {
1217
+ "default": 0.5,
1218
+ "min": 0,
1219
+ "max": 1,
1220
+ "step": 0.01
1221
+ }),
1222
+ }}
1223
+
1224
+ RETURN_TYPES = ("LATENT",)
1225
+ FUNCTION = "blend"
1226
+
1227
+ CATEGORY = "_for_testing"
1228
+
1229
+ def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
1230
+
1231
+ samples_out = samples1.copy()
1232
+ samples1 = samples1["samples"]
1233
+ samples2 = samples2["samples"]
1234
+
1235
+ if samples1.shape != samples2.shape:
1236
+ samples2.permute(0, 3, 1, 2)
1237
+ samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
1238
+ samples2.permute(0, 2, 3, 1)
1239
+
1240
+ samples_blended = self.blend_mode(samples1, samples2, blend_mode)
1241
+ samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
1242
+ samples_out["samples"] = samples_blended
1243
+ return (samples_out,)
1244
+
1245
+ def blend_mode(self, img1, img2, mode):
1246
+ if mode == "normal":
1247
+ return img2
1248
+ else:
1249
+ raise ValueError(f"Unsupported blend mode: {mode}")
1250
+
1251
+ class LatentCrop:
1252
+ @classmethod
1253
+ def INPUT_TYPES(s):
1254
+ return {"required": { "samples": ("LATENT",),
1255
+ "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1256
+ "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1257
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1258
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1259
+ }}
1260
+ RETURN_TYPES = ("LATENT",)
1261
+ FUNCTION = "crop"
1262
+
1263
+ CATEGORY = "latent/transform"
1264
+
1265
+ def crop(self, samples, width, height, x, y):
1266
+ s = samples.copy()
1267
+ samples = samples['samples']
1268
+ x = x // 8
1269
+ y = y // 8
1270
+
1271
+ #enfonce minimum size of 64
1272
+ if x > (samples.shape[3] - 8):
1273
+ x = samples.shape[3] - 8
1274
+ if y > (samples.shape[2] - 8):
1275
+ y = samples.shape[2] - 8
1276
+
1277
+ new_height = height // 8
1278
+ new_width = width // 8
1279
+ to_x = new_width + x
1280
+ to_y = new_height + y
1281
+ s['samples'] = samples[:,:,y:to_y, x:to_x]
1282
+ return (s,)
1283
+
1284
+ class SetLatentNoiseMask:
1285
+ @classmethod
1286
+ def INPUT_TYPES(s):
1287
+ return {"required": { "samples": ("LATENT",),
1288
+ "mask": ("MASK",),
1289
+ }}
1290
+ RETURN_TYPES = ("LATENT",)
1291
+ FUNCTION = "set_mask"
1292
+
1293
+ CATEGORY = "latent/inpaint"
1294
+
1295
+ def set_mask(self, samples, mask):
1296
+ s = samples.copy()
1297
+ s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1298
+ return (s,)
1299
+
1300
+ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1301
+ latent_image = latent["samples"]
1302
+ if disable_noise:
1303
+ noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
1304
+ else:
1305
+ batch_inds = latent["batch_index"] if "batch_index" in latent else None
1306
+ noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
1307
+
1308
+ noise_mask = None
1309
+ if "noise_mask" in latent:
1310
+ noise_mask = latent["noise_mask"]
1311
+
1312
+ callback = latent_preview.prepare_callback(model, steps)
1313
+ disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
1314
+ samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
1315
+ denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
1316
+ force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
1317
+ out = latent.copy()
1318
+ out["samples"] = samples
1319
+ return (out, )
1320
+
1321
+ class KSampler:
1322
+ @classmethod
1323
+ def INPUT_TYPES(s):
1324
+ return {"required":
1325
+ {"model": ("MODEL",),
1326
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
1327
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1328
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
1329
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
1330
+ "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
1331
+ "positive": ("CONDITIONING", ),
1332
+ "negative": ("CONDITIONING", ),
1333
+ "latent_image": ("LATENT", ),
1334
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
1335
+ }
1336
+ }
1337
+
1338
+ RETURN_TYPES = ("LATENT",)
1339
+ FUNCTION = "sample"
1340
+
1341
+ CATEGORY = "sampling"
1342
+
1343
+ def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
1344
+ return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
1345
+
1346
+ class KSamplerAdvanced:
1347
+ @classmethod
1348
+ def INPUT_TYPES(s):
1349
+ return {"required":
1350
+ {"model": ("MODEL",),
1351
+ "add_noise": (["enable", "disable"], ),
1352
+ "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
1353
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1354
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
1355
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
1356
+ "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
1357
+ "positive": ("CONDITIONING", ),
1358
+ "negative": ("CONDITIONING", ),
1359
+ "latent_image": ("LATENT", ),
1360
+ "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
1361
+ "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
1362
+ "return_with_leftover_noise": (["disable", "enable"], ),
1363
+ }
1364
+ }
1365
+
1366
+ RETURN_TYPES = ("LATENT",)
1367
+ FUNCTION = "sample"
1368
+
1369
+ CATEGORY = "sampling"
1370
+
1371
+ def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
1372
+ force_full_denoise = True
1373
+ if return_with_leftover_noise == "enable":
1374
+ force_full_denoise = False
1375
+ disable_noise = False
1376
+ if add_noise == "disable":
1377
+ disable_noise = True
1378
+ return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
1379
+
1380
+ class SaveImage:
1381
+ def __init__(self):
1382
+ self.output_dir = folder_paths.get_output_directory()
1383
+ self.type = "output"
1384
+ self.prefix_append = ""
1385
+ self.compress_level = 4
1386
+
1387
+ @classmethod
1388
+ def INPUT_TYPES(s):
1389
+ return {"required":
1390
+ {"images": ("IMAGE", ),
1391
+ "filename_prefix": ("STRING", {"default": "ComfyUI"})},
1392
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
1393
+ }
1394
+
1395
+ RETURN_TYPES = ()
1396
+ FUNCTION = "save_images"
1397
+
1398
+ OUTPUT_NODE = True
1399
+
1400
+ CATEGORY = "image"
1401
+
1402
+ def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1403
+ filename_prefix += self.prefix_append
1404
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
1405
+ results = list()
1406
+ for (batch_number, image) in enumerate(images):
1407
+ i = 255. * image.cpu().numpy()
1408
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1409
+ metadata = None
1410
+ if not args.disable_metadata:
1411
+ metadata = PngInfo()
1412
+ if prompt is not None:
1413
+ metadata.add_text("prompt", json.dumps(prompt))
1414
+ if extra_pnginfo is not None:
1415
+ for x in extra_pnginfo:
1416
+ metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1417
+
1418
+ filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
1419
+ file = f"{filename_with_batch_num}_{counter:05}_.png"
1420
+ img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
1421
+ results.append({
1422
+ "filename": file,
1423
+ "subfolder": subfolder,
1424
+ "type": self.type
1425
+ })
1426
+ counter += 1
1427
+
1428
+ return { "ui": { "images": results } }
1429
+
1430
+ class PreviewImage(SaveImage):
1431
+ def __init__(self):
1432
+ self.output_dir = folder_paths.get_temp_directory()
1433
+ self.type = "temp"
1434
+ self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
1435
+ self.compress_level = 1
1436
+
1437
+ @classmethod
1438
+ def INPUT_TYPES(s):
1439
+ return {"required":
1440
+ {"images": ("IMAGE", ), },
1441
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
1442
+ }
1443
+
1444
+ class LoadImage:
1445
+ @classmethod
1446
+ def INPUT_TYPES(s):
1447
+ input_dir = folder_paths.get_input_directory()
1448
+ files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1449
+ return {"required":
1450
+ {"image": (sorted(files), {"image_upload": True})},
1451
+ }
1452
+
1453
+ CATEGORY = "image"
1454
+
1455
+ RETURN_TYPES = ("IMAGE", "MASK")
1456
+ FUNCTION = "load_image"
1457
+ def load_image(self, image):
1458
+ image_path = folder_paths.get_annotated_filepath(image)
1459
+
1460
+ img = node_helpers.pillow(Image.open, image_path)
1461
+
1462
+ output_images = []
1463
+ output_masks = []
1464
+ w, h = None, None
1465
+
1466
+ excluded_formats = ['MPO']
1467
+
1468
+ for i in ImageSequence.Iterator(img):
1469
+ i = node_helpers.pillow(ImageOps.exif_transpose, i)
1470
+
1471
+ if i.mode == 'I':
1472
+ i = i.point(lambda i: i * (1 / 255))
1473
+ image = i.convert("RGB")
1474
+
1475
+ if len(output_images) == 0:
1476
+ w = image.size[0]
1477
+ h = image.size[1]
1478
+
1479
+ if image.size[0] != w or image.size[1] != h:
1480
+ continue
1481
+
1482
+ image = np.array(image).astype(np.float32) / 255.0
1483
+ image = torch.from_numpy(image)[None,]
1484
+ if 'A' in i.getbands():
1485
+ mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
1486
+ mask = 1. - torch.from_numpy(mask)
1487
+ else:
1488
+ mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1489
+ output_images.append(image)
1490
+ output_masks.append(mask.unsqueeze(0))
1491
+
1492
+ if len(output_images) > 1 and img.format not in excluded_formats:
1493
+ output_image = torch.cat(output_images, dim=0)
1494
+ output_mask = torch.cat(output_masks, dim=0)
1495
+ else:
1496
+ output_image = output_images[0]
1497
+ output_mask = output_masks[0]
1498
+
1499
+ return (output_image, output_mask)
1500
+
1501
+ @classmethod
1502
+ def IS_CHANGED(s, image):
1503
+ image_path = folder_paths.get_annotated_filepath(image)
1504
+ m = hashlib.sha256()
1505
+ with open(image_path, 'rb') as f:
1506
+ m.update(f.read())
1507
+ return m.digest().hex()
1508
+
1509
+ @classmethod
1510
+ def VALIDATE_INPUTS(s, image):
1511
+ if not folder_paths.exists_annotated_filepath(image):
1512
+ return "Invalid image file: {}".format(image)
1513
+
1514
+ return True
1515
+
1516
+ class LoadImageMask:
1517
+ _color_channels = ["alpha", "red", "green", "blue"]
1518
+ @classmethod
1519
+ def INPUT_TYPES(s):
1520
+ input_dir = folder_paths.get_input_directory()
1521
+ files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1522
+ return {"required":
1523
+ {"image": (sorted(files), {"image_upload": True}),
1524
+ "channel": (s._color_channels, ), }
1525
+ }
1526
+
1527
+ CATEGORY = "mask"
1528
+
1529
+ RETURN_TYPES = ("MASK",)
1530
+ FUNCTION = "load_image"
1531
+ def load_image(self, image, channel):
1532
+ image_path = folder_paths.get_annotated_filepath(image)
1533
+ i = node_helpers.pillow(Image.open, image_path)
1534
+ i = node_helpers.pillow(ImageOps.exif_transpose, i)
1535
+ if i.getbands() != ("R", "G", "B", "A"):
1536
+ if i.mode == 'I':
1537
+ i = i.point(lambda i: i * (1 / 255))
1538
+ i = i.convert("RGBA")
1539
+ mask = None
1540
+ c = channel[0].upper()
1541
+ if c in i.getbands():
1542
+ mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
1543
+ mask = torch.from_numpy(mask)
1544
+ if c == 'A':
1545
+ mask = 1. - mask
1546
+ else:
1547
+ mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1548
+ return (mask.unsqueeze(0),)
1549
+
1550
+ @classmethod
1551
+ def IS_CHANGED(s, image, channel):
1552
+ image_path = folder_paths.get_annotated_filepath(image)
1553
+ m = hashlib.sha256()
1554
+ with open(image_path, 'rb') as f:
1555
+ m.update(f.read())
1556
+ return m.digest().hex()
1557
+
1558
+ @classmethod
1559
+ def VALIDATE_INPUTS(s, image):
1560
+ if not folder_paths.exists_annotated_filepath(image):
1561
+ return "Invalid image file: {}".format(image)
1562
+
1563
+ return True
1564
+
1565
+ class ImageScale:
1566
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
1567
+ crop_methods = ["disabled", "center"]
1568
+
1569
+ @classmethod
1570
+ def INPUT_TYPES(s):
1571
+ return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1572
+ "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
1573
+ "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
1574
+ "crop": (s.crop_methods,)}}
1575
+ RETURN_TYPES = ("IMAGE",)
1576
+ FUNCTION = "upscale"
1577
+
1578
+ CATEGORY = "image/upscaling"
1579
+
1580
+ def upscale(self, image, upscale_method, width, height, crop):
1581
+ if width == 0 and height == 0:
1582
+ s = image
1583
+ else:
1584
+ samples = image.movedim(-1,1)
1585
+
1586
+ if width == 0:
1587
+ width = max(1, round(samples.shape[3] * height / samples.shape[2]))
1588
+ elif height == 0:
1589
+ height = max(1, round(samples.shape[2] * width / samples.shape[3]))
1590
+
1591
+ s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
1592
+ s = s.movedim(1,-1)
1593
+ return (s,)
1594
+
1595
+ class ImageScaleBy:
1596
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
1597
+
1598
+ @classmethod
1599
+ def INPUT_TYPES(s):
1600
+ return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1601
+ "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
1602
+ RETURN_TYPES = ("IMAGE",)
1603
+ FUNCTION = "upscale"
1604
+
1605
+ CATEGORY = "image/upscaling"
1606
+
1607
+ def upscale(self, image, upscale_method, scale_by):
1608
+ samples = image.movedim(-1,1)
1609
+ width = round(samples.shape[3] * scale_by)
1610
+ height = round(samples.shape[2] * scale_by)
1611
+ s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
1612
+ s = s.movedim(1,-1)
1613
+ return (s,)
1614
+
1615
+ class ImageInvert:
1616
+
1617
+ @classmethod
1618
+ def INPUT_TYPES(s):
1619
+ return {"required": { "image": ("IMAGE",)}}
1620
+
1621
+ RETURN_TYPES = ("IMAGE",)
1622
+ FUNCTION = "invert"
1623
+
1624
+ CATEGORY = "image"
1625
+
1626
+ def invert(self, image):
1627
+ s = 1.0 - image
1628
+ return (s,)
1629
+
1630
+ class ImageBatch:
1631
+
1632
+ @classmethod
1633
+ def INPUT_TYPES(s):
1634
+ return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}
1635
+
1636
+ RETURN_TYPES = ("IMAGE",)
1637
+ FUNCTION = "batch"
1638
+
1639
+ CATEGORY = "image"
1640
+
1641
+ def batch(self, image1, image2):
1642
+ if image1.shape[1:] != image2.shape[1:]:
1643
+ image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
1644
+ s = torch.cat((image1, image2), dim=0)
1645
+ return (s,)
1646
+
1647
+ class EmptyImage:
1648
+ def __init__(self, device="cpu"):
1649
+ self.device = device
1650
+
1651
+ @classmethod
1652
+ def INPUT_TYPES(s):
1653
+ return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
1654
+ "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
1655
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
1656
+ "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
1657
+ }}
1658
+ RETURN_TYPES = ("IMAGE",)
1659
+ FUNCTION = "generate"
1660
+
1661
+ CATEGORY = "image"
1662
+
1663
+ def generate(self, width, height, batch_size=1, color=0):
1664
+ r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
1665
+ g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
1666
+ b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
1667
+ return (torch.cat((r, g, b), dim=-1), )
1668
+
1669
+ class ImagePadForOutpaint:
1670
+
1671
+ @classmethod
1672
+ def INPUT_TYPES(s):
1673
+ return {
1674
+ "required": {
1675
+ "image": ("IMAGE",),
1676
+ "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1677
+ "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1678
+ "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1679
+ "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1680
+ "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
1681
+ }
1682
+ }
1683
+
1684
+ RETURN_TYPES = ("IMAGE", "MASK")
1685
+ FUNCTION = "expand_image"
1686
+
1687
+ CATEGORY = "image"
1688
+
1689
+ def expand_image(self, image, left, top, right, bottom, feathering):
1690
+ d1, d2, d3, d4 = image.size()
1691
+
1692
+ new_image = torch.ones(
1693
+ (d1, d2 + top + bottom, d3 + left + right, d4),
1694
+ dtype=torch.float32,
1695
+ ) * 0.5
1696
+
1697
+ new_image[:, top:top + d2, left:left + d3, :] = image
1698
+
1699
+ mask = torch.ones(
1700
+ (d2 + top + bottom, d3 + left + right),
1701
+ dtype=torch.float32,
1702
+ )
1703
+
1704
+ t = torch.zeros(
1705
+ (d2, d3),
1706
+ dtype=torch.float32
1707
+ )
1708
+
1709
+ if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1710
+
1711
+ for i in range(d2):
1712
+ for j in range(d3):
1713
+ dt = i if top != 0 else d2
1714
+ db = d2 - i if bottom != 0 else d2
1715
+
1716
+ dl = j if left != 0 else d3
1717
+ dr = d3 - j if right != 0 else d3
1718
+
1719
+ d = min(dt, db, dl, dr)
1720
+
1721
+ if d >= feathering:
1722
+ continue
1723
+
1724
+ v = (feathering - d) / feathering
1725
+
1726
+ t[i, j] = v * v
1727
+
1728
+ mask[top:top + d2, left:left + d3] = t
1729
+
1730
+ return (new_image, mask)
1731
+
1732
+
1733
+ NODE_CLASS_MAPPINGS = {
1734
+ "KSampler": KSampler,
1735
+ "CheckpointLoaderSimple": CheckpointLoaderSimple,
1736
+ "CLIPTextEncode": CLIPTextEncode,
1737
+ "CLIPSetLastLayer": CLIPSetLastLayer,
1738
+ "VAEDecode": VAEDecode,
1739
+ "VAEEncode": VAEEncode,
1740
+ "VAEEncodeForInpaint": VAEEncodeForInpaint,
1741
+ "VAELoader": VAELoader,
1742
+ "EmptyLatentImage": EmptyLatentImage,
1743
+ "LatentUpscale": LatentUpscale,
1744
+ "LatentUpscaleBy": LatentUpscaleBy,
1745
+ "LatentFromBatch": LatentFromBatch,
1746
+ "RepeatLatentBatch": RepeatLatentBatch,
1747
+ "SaveImage": SaveImage,
1748
+ "PreviewImage": PreviewImage,
1749
+ "LoadImage": LoadImage,
1750
+ "LoadImageMask": LoadImageMask,
1751
+ "ImageScale": ImageScale,
1752
+ "ImageScaleBy": ImageScaleBy,
1753
+ "ImageInvert": ImageInvert,
1754
+ "ImageBatch": ImageBatch,
1755
+ "ImagePadForOutpaint": ImagePadForOutpaint,
1756
+ "EmptyImage": EmptyImage,
1757
+ "ConditioningAverage": ConditioningAverage ,
1758
+ "ConditioningCombine": ConditioningCombine,
1759
+ "ConditioningConcat": ConditioningConcat,
1760
+ "ConditioningSetArea": ConditioningSetArea,
1761
+ "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
1762
+ "ConditioningSetAreaStrength": ConditioningSetAreaStrength,
1763
+ "ConditioningSetMask": ConditioningSetMask,
1764
+ "KSamplerAdvanced": KSamplerAdvanced,
1765
+ "SetLatentNoiseMask": SetLatentNoiseMask,
1766
+ "LatentComposite": LatentComposite,
1767
+ "LatentBlend": LatentBlend,
1768
+ "LatentRotate": LatentRotate,
1769
+ "LatentFlip": LatentFlip,
1770
+ "LatentCrop": LatentCrop,
1771
+ "LoraLoader": LoraLoader,
1772
+ "CLIPLoader": CLIPLoader,
1773
+ "UNETLoader": UNETLoader,
1774
+ "DualCLIPLoader": DualCLIPLoader,
1775
+ "CLIPVisionEncode": CLIPVisionEncode,
1776
+ "StyleModelApply": StyleModelApply,
1777
+ "unCLIPConditioning": unCLIPConditioning,
1778
+ "ControlNetApply": ControlNetApply,
1779
+ "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
1780
+ "ControlNetLoader": ControlNetLoader,
1781
+ "DiffControlNetLoader": DiffControlNetLoader,
1782
+ "StyleModelLoader": StyleModelLoader,
1783
+ "CLIPVisionLoader": CLIPVisionLoader,
1784
+ "VAEDecodeTiled": VAEDecodeTiled,
1785
+ "VAEEncodeTiled": VAEEncodeTiled,
1786
+ "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1787
+ "GLIGENLoader": GLIGENLoader,
1788
+ "GLIGENTextBoxApply": GLIGENTextBoxApply,
1789
+ "InpaintModelConditioning": InpaintModelConditioning,
1790
+
1791
+ "CheckpointLoader": CheckpointLoader,
1792
+ "DiffusersLoader": DiffusersLoader,
1793
+
1794
+ "LoadLatent": LoadLatent,
1795
+ "SaveLatent": SaveLatent,
1796
+
1797
+ "ConditioningZeroOut": ConditioningZeroOut,
1798
+ "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
1799
+ "LoraLoaderModelOnly": LoraLoaderModelOnly,
1800
+ }
1801
+
1802
+ NODE_DISPLAY_NAME_MAPPINGS = {
1803
+ # Sampling
1804
+ "KSampler": "KSampler",
1805
+ "KSamplerAdvanced": "KSampler (Advanced)",
1806
+ # Loaders
1807
+ "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
1808
+ "CheckpointLoaderSimple": "Load Checkpoint",
1809
+ "VAELoader": "Load VAE",
1810
+ "LoraLoader": "Load LoRA",
1811
+ "CLIPLoader": "Load CLIP",
1812
+ "ControlNetLoader": "Load ControlNet Model",
1813
+ "DiffControlNetLoader": "Load ControlNet Model (diff)",
1814
+ "StyleModelLoader": "Load Style Model",
1815
+ "CLIPVisionLoader": "Load CLIP Vision",
1816
+ "UpscaleModelLoader": "Load Upscale Model",
1817
+ # Conditioning
1818
+ "CLIPVisionEncode": "CLIP Vision Encode",
1819
+ "StyleModelApply": "Apply Style Model",
1820
+ "CLIPTextEncode": "CLIP Text Encode (Prompt)",
1821
+ "CLIPSetLastLayer": "CLIP Set Last Layer",
1822
+ "ConditioningCombine": "Conditioning (Combine)",
1823
+ "ConditioningAverage ": "Conditioning (Average)",
1824
+ "ConditioningConcat": "Conditioning (Concat)",
1825
+ "ConditioningSetArea": "Conditioning (Set Area)",
1826
+ "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
1827
+ "ConditioningSetMask": "Conditioning (Set Mask)",
1828
+ "ControlNetApply": "Apply ControlNet",
1829
+ "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
1830
+ # Latent
1831
+ "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
1832
+ "SetLatentNoiseMask": "Set Latent Noise Mask",
1833
+ "VAEDecode": "VAE Decode",
1834
+ "VAEEncode": "VAE Encode",
1835
+ "LatentRotate": "Rotate Latent",
1836
+ "LatentFlip": "Flip Latent",
1837
+ "LatentCrop": "Crop Latent",
1838
+ "EmptyLatentImage": "Empty Latent Image",
1839
+ "LatentUpscale": "Upscale Latent",
1840
+ "LatentUpscaleBy": "Upscale Latent By",
1841
+ "LatentComposite": "Latent Composite",
1842
+ "LatentBlend": "Latent Blend",
1843
+ "LatentFromBatch" : "Latent From Batch",
1844
+ "RepeatLatentBatch": "Repeat Latent Batch",
1845
+ # Image
1846
+ "SaveImage": "Save Image",
1847
+ "PreviewImage": "Preview Image",
1848
+ "LoadImage": "Load Image",
1849
+ "LoadImageMask": "Load Image (as Mask)",
1850
+ "ImageScale": "Upscale Image",
1851
+ "ImageScaleBy": "Upscale Image By",
1852
+ "ImageUpscaleWithModel": "Upscale Image (using Model)",
1853
+ "ImageInvert": "Invert Image",
1854
+ "ImagePadForOutpaint": "Pad Image for Outpainting",
1855
+ "ImageBatch": "Batch Images",
1856
+ # _for_testing
1857
+ "VAEDecodeTiled": "VAE Decode (Tiled)",
1858
+ "VAEEncodeTiled": "VAE Encode (Tiled)",
1859
+ }
1860
+
1861
+ EXTENSION_WEB_DIRS = {}
1862
+
1863
+ def load_custom_node(module_path, ignore=set()):
1864
+ module_name = os.path.basename(module_path)
1865
+ if os.path.isfile(module_path):
1866
+ sp = os.path.splitext(module_path)
1867
+ module_name = sp[0]
1868
+ try:
1869
+ logging.debug("Trying to load custom node {}".format(module_path))
1870
+ if os.path.isfile(module_path):
1871
+ module_spec = importlib.util.spec_from_file_location(module_name, module_path)
1872
+ module_dir = os.path.split(module_path)[0]
1873
+ else:
1874
+ module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
1875
+ module_dir = module_path
1876
+
1877
+ module = importlib.util.module_from_spec(module_spec)
1878
+ sys.modules[module_name] = module
1879
+ module_spec.loader.exec_module(module)
1880
+
1881
+ if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
1882
+ web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
1883
+ if os.path.isdir(web_dir):
1884
+ EXTENSION_WEB_DIRS[module_name] = web_dir
1885
+
1886
+ if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1887
+ for name in module.NODE_CLASS_MAPPINGS:
1888
+ if name not in ignore:
1889
+ NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
1890
+ if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
1891
+ NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1892
+ return True
1893
+ else:
1894
+ logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1895
+ return False
1896
+ except Exception as e:
1897
+ logging.warning(traceback.format_exc())
1898
+ logging.warning(f"Cannot import {module_path} module for custom nodes: {e}")
1899
+ return False
1900
+
1901
+ def load_custom_nodes():
1902
+ base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1903
+ node_paths = folder_paths.get_folder_paths("custom_nodes")
1904
+ node_import_times = []
1905
+ for custom_node_path in node_paths:
1906
+ possible_modules = os.listdir(os.path.realpath(custom_node_path))
1907
+ if "__pycache__" in possible_modules:
1908
+ possible_modules.remove("__pycache__")
1909
+
1910
+ for possible_module in possible_modules:
1911
+ module_path = os.path.join(custom_node_path, possible_module)
1912
+ if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1913
+ if module_path.endswith(".disabled"): continue
1914
+ time_before = time.perf_counter()
1915
+ success = load_custom_node(module_path, base_node_names)
1916
+ node_import_times.append((time.perf_counter() - time_before, module_path, success))
1917
+
1918
+ if len(node_import_times) > 0:
1919
+ logging.info("\nImport times for custom nodes:")
1920
+ for n in sorted(node_import_times):
1921
+ if n[2]:
1922
+ import_message = ""
1923
+ else:
1924
+ import_message = " (IMPORT FAILED)"
1925
+ logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
1926
+ logging.info("")
1927
+
1928
+ def init_custom_nodes():
1929
+ extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
1930
+ extras_files = [
1931
+ "nodes_latent.py",
1932
+ "nodes_hypernetwork.py",
1933
+ "nodes_upscale_model.py",
1934
+ "nodes_post_processing.py",
1935
+ "nodes_mask.py",
1936
+ "nodes_compositing.py",
1937
+ "nodes_rebatch.py",
1938
+ "nodes_model_merging.py",
1939
+ "nodes_tomesd.py",
1940
+ "nodes_clip_sdxl.py",
1941
+ "nodes_canny.py",
1942
+ "nodes_freelunch.py",
1943
+ "nodes_custom_sampler.py",
1944
+ "nodes_hypertile.py",
1945
+ "nodes_model_advanced.py",
1946
+ "nodes_model_downscale.py",
1947
+ "nodes_images.py",
1948
+ "nodes_video_model.py",
1949
+ "nodes_sag.py",
1950
+ "nodes_perpneg.py",
1951
+ "nodes_stable3d.py",
1952
+ "nodes_sdupscale.py",
1953
+ "nodes_photomaker.py",
1954
+ "nodes_cond.py",
1955
+ "nodes_morphology.py",
1956
+ "nodes_stable_cascade.py",
1957
+ "nodes_differential_diffusion.py",
1958
+ "nodes_ip2p.py",
1959
+ "nodes_model_merging_model_specific.py",
1960
+ "nodes_pag.py",
1961
+ "nodes_align_your_steps.py",
1962
+ "nodes_attention_multiply.py",
1963
+ "nodes_advanced_samplers.py",
1964
+ ]
1965
+
1966
+ import_failed = []
1967
+ for node_file in extras_files:
1968
+ if not load_custom_node(os.path.join(extras_dir, node_file)):
1969
+ import_failed.append(node_file)
1970
+
1971
+ load_custom_nodes()
1972
+
1973
+ if len(import_failed) > 0:
1974
+ logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
1975
+ for node in import_failed:
1976
+ logging.warning("IMPORT FAILED: {}".format(node))
1977
+ logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
1978
+ if args.windows_standalone_build:
1979
+ logging.warning("Please run the update script: update/update_comfyui.bat")
1980
+ else:
1981
+ logging.warning("Please do a: pip install -r requirements.txt")
1982
+ logging.warning("")