WhiteAiZ commited on
Commit
72561cd
·
verified ·
1 Parent(s): 5de532a

update reforge

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +6 -0
  2. README.md +3 -10
  3. extensions-builtin/reForge-advanced_model_sampling/advanced_model_sampling/nodes_model_advanced.py +6 -0
  4. extensions-builtin/sd_forge_controlnet/scripts/controlnet.py +2 -2
  5. extensions-builtin/sd_forge_freeu/scripts/forge_freeu.py +1 -1
  6. extensions-builtin/sd_forge_hypertile/scripts/forge_hypertile.py +1 -1
  7. extensions-builtin/sd_forge_kohya_hrfix/scripts/kohya_hrfix.py +1 -1
  8. extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py +1 -1
  9. extensions-builtin/sd_forge_photomaker/scripts/forge_photomaker.py +1 -1
  10. extensions-builtin/sd_forge_sag/scripts/forge_sag.py +1 -1
  11. extensions-builtin/sd_forge_svd/scripts/forge_svd.py +2 -2
  12. extensions-builtin/sd_forge_z123/scripts/forge_z123.py +2 -2
  13. extensions-builtin/soft-inpainting/scripts/soft_inpainting.py +137 -91
  14. ldm_patched/contrib/nodes.py +0 -0
  15. ldm_patched/contrib/nodes_ace.py +50 -0
  16. ldm_patched/contrib/nodes_advanced_samplers.py +112 -0
  17. ldm_patched/contrib/nodes_align_your_steps.py +54 -0
  18. ldm_patched/contrib/nodes_apg.py +77 -0
  19. ldm_patched/contrib/nodes_attention_multiply.py +121 -0
  20. ldm_patched/contrib/nodes_audio.py +371 -0
  21. ldm_patched/contrib/nodes_camera_trajectory.py +219 -0
  22. ldm_patched/contrib/nodes_canny.py +304 -0
  23. ldm_patched/contrib/nodes_cfg.py +73 -0
  24. ldm_patched/contrib/nodes_clip_sdxl.py +61 -0
  25. ldm_patched/contrib/nodes_compositing.py +207 -0
  26. ldm_patched/contrib/nodes_cond.py +50 -0
  27. ldm_patched/contrib/nodes_controlnet.py +61 -0
  28. ldm_patched/contrib/nodes_cosmos.py +129 -0
  29. ldm_patched/contrib/nodes_custom_sampler.py +3 -2
  30. ldm_patched/contrib/nodes_differential_diffusion.py +43 -0
  31. ldm_patched/contrib/nodes_edit_model.py +27 -0
  32. ldm_patched/contrib/nodes_flux.py +109 -0
  33. ldm_patched/contrib/nodes_freelunch.py +116 -0
  34. ldm_patched/contrib/nodes_fresca.py +104 -0
  35. ldm_patched/contrib/nodes_gits.py +370 -0
  36. ldm_patched/contrib/nodes_hidream.py +56 -0
  37. ldm_patched/contrib/nodes_hooks.py +746 -0
  38. ldm_patched/contrib/nodes_hunyuan.py +124 -0
  39. ldm_patched/contrib/nodes_hunyuan3d.py +635 -0
  40. ldm_patched/contrib/nodes_hypernetwork.py +124 -0
  41. ldm_patched/contrib/nodes_hypertile.py +85 -0
  42. ldm_patched/contrib/nodes_images.py +180 -0
  43. ldm_patched/contrib/nodes_ip2p.py +46 -0
  44. ldm_patched/contrib/nodes_latent.py +266 -0
  45. ldm_patched/contrib/nodes_load_3d.py +183 -0
  46. ldm_patched/contrib/nodes_lotus.py +30 -0
  47. ldm_patched/contrib/nodes_lt.py +475 -0
  48. ldm_patched/contrib/nodes_lumina2.py +105 -0
  49. ldm_patched/contrib/nodes_mahiro.py +42 -0
  50. ldm_patched/contrib/nodes_mask.py +373 -0
.gitignore CHANGED
@@ -79,3 +79,9 @@ webui-user-7980-gpu1_main.bat
79
  main.obj
80
  main.lib
81
  main.exp
 
 
 
 
 
 
 
79
  main.obj
80
  main.lib
81
  main.exp
82
+ /extensions/*
83
+ !/extensions/put extensions here.txt
84
+ webui-user-8160-gpu3-nopass.sh
85
+ webui-user-8160-gpu3-nopass_fp8.sh
86
+ /models/VAE/*
87
+ !/models/VAE/Put VAE here.txt
README.md CHANGED
@@ -1,7 +1,3 @@
1
- # reForge development has stopped.
2
-
3
- For more info, see https://github.com/Panchovix/stable-diffusion-webui-reForge/discussions/354
4
-
5
  # Stable Diffusion WebUI Forge/reForge
6
 
7
  Stable Diffusion WebUI Forge/reForge is a platform on top of [Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (based on [Gradio](https://www.gradio.app/)) to make development easier, optimize resource management, speed up inference, and study experimental features.
@@ -11,13 +7,10 @@ The name "Forge" is inspired from "Minecraft Forge". This project is aimed at be
11
  # Important: Branches
12
 
13
  * main: Has all the possible upstream changes from A1111, new samplers/schedulers/sd options/etc and now, comfy backend updated to stream, so this deprecated the old forge backend.
14
- * dev: At this point (2025-03-13), it is the same as main branch.
15
- * dev2: Same as dev branch, but it uses gradio 4.0 instead of 3.42. This is to test some extensions and changes, then it will be moved to dev, and then to main branch.
16
- * experimental: This branch will have some experimental changes that may have major new features, but they may be incomplete or have major bugs, based on the dev2 branch. This branch will be mostly inactive until I want to test things.
17
  * main-old: Branch with old forge backend. Kept as backup in any case, but it won't receive updates.
18
- * dev_upstream: Deprecated, see more https://github.com/Panchovix/stable-diffusion-webui-reForge/discussions/175
19
- * dev_upsteam_experimental: Deprecated, see more https://github.com/Panchovix/stable-diffusion-webui-reForge/discussions/175
20
- * main_new_forge: Deprecated, see more https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/981.
21
 
22
  # Installing Forge/reForge
23
 
 
 
 
 
 
1
  # Stable Diffusion WebUI Forge/reForge
2
 
3
  Stable Diffusion WebUI Forge/reForge is a platform on top of [Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (based on [Gradio](https://www.gradio.app/)) to make development easier, optimize resource management, speed up inference, and study experimental features.
 
7
  # Important: Branches
8
 
9
  * main: Has all the possible upstream changes from A1111, new samplers/schedulers/sd options/etc and now, comfy backend updated to stream, so this deprecated the old forge backend.
10
+ * dev: At this point (2025-07-20), it is the same as main branch.
11
+ * dev2 and experimental: More unstable than dev, for now same as dev.
12
+ * experimental: same as dev2 but with gradio 4.
13
  * main-old: Branch with old forge backend. Kept as backup in any case, but it won't receive updates.
 
 
 
14
 
15
  # Installing Forge/reForge
16
 
extensions-builtin/reForge-advanced_model_sampling/advanced_model_sampling/nodes_model_advanced.py CHANGED
@@ -18,6 +18,10 @@ class LCM(model_sampling.EPS):
18
  class X0(model_sampling.EPS):
19
  def calculate_denoised(self, sigma, model_output, model_input):
20
  return model_output
 
 
 
 
21
 
22
  class ModelSamplingDiscreteDistilled(model_sampling.ModelSamplingDiscrete):
23
  original_timesteps = 50
@@ -79,6 +83,8 @@ class ModelSamplingDiscrete:
79
  sampling_base = ModelSamplingDiscreteDistilled
80
  elif sampling == "x0":
81
  sampling_type = X0
 
 
82
 
83
  class ModelSamplingAdvanced(sampling_base, sampling_type):
84
  pass
 
18
  class X0(model_sampling.EPS):
19
  def calculate_denoised(self, sigma, model_output, model_input):
20
  return model_output
21
+
22
+ class Lotus(X0):
23
+ def calculate_input(self, sigma, noise):
24
+ return noise
25
 
26
  class ModelSamplingDiscreteDistilled(model_sampling.ModelSamplingDiscrete):
27
  original_timesteps = 50
 
83
  sampling_base = ModelSamplingDiscreteDistilled
84
  elif sampling == "x0":
85
  sampling_type = X0
86
+ elif sampling == "lotus":
87
+ sampling_type = Lotus
88
 
89
  class ModelSamplingAdvanced(sampling_base, sampling_type):
90
  pass
extensions-builtin/sd_forge_controlnet/scripts/controlnet.py CHANGED
@@ -472,10 +472,10 @@ class ControlNetForForgeOfficial(scripts.Script):
472
  return
473
 
474
  if is_hr_pass:
475
- cond = torch.split(params.control_cond_for_hr_fix, p.batch_size)[p.iteration]
476
  mask = params.control_mask_for_hr_fix
477
  else:
478
- cond = torch.split(params.control_cond, p.batch_size)[p.iteration]
479
  mask = params.control_mask
480
 
481
  kwargs.update(dict(
 
472
  return
473
 
474
  if is_hr_pass:
475
+ cond = torch.split(params.control_cond_for_hr_fix, p.batch_size)[p.iteration] if isinstance(params.control_cond_for_hr_fix, torch.Tensor) else params.control_cond_for_hr_fix
476
  mask = params.control_mask_for_hr_fix
477
  else:
478
+ cond = torch.split(params.control_cond, p.batch_size)[p.iteration] if isinstance(params.control_cond, torch.Tensor) else params.control_cond
479
  mask = params.control_mask
480
 
481
  kwargs.update(dict(
extensions-builtin/sd_forge_freeu/scripts/forge_freeu.py CHANGED
@@ -7,7 +7,7 @@ from typing import Any
7
  from functools import partial
8
 
9
  from modules import script_callbacks, scripts
10
- from ldm_patched.contrib.external_freelunch import FreeU_V2
11
 
12
 
13
  opFreeU_V2 = FreeU_V2()
 
7
  from functools import partial
8
 
9
  from modules import script_callbacks, scripts
10
+ from ldm_patched.contrib.nodes_freelunch import FreeU_V2
11
 
12
 
13
  opFreeU_V2 = FreeU_V2()
extensions-builtin/sd_forge_hypertile/scripts/forge_hypertile.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
 
3
  from modules import scripts
4
- from ldm_patched.contrib.external_hypertile import HyperTile
5
 
6
 
7
  opHyperTile = HyperTile()
 
1
  import gradio as gr
2
 
3
  from modules import scripts
4
+ from ldm_patched.contrib.nodes_hypertile import HyperTile
5
 
6
 
7
  opHyperTile = HyperTile()
extensions-builtin/sd_forge_kohya_hrfix/scripts/kohya_hrfix.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
 
3
  from modules import scripts
4
- from ldm_patched.contrib.external_model_downscale import PatchModelAddDownscale
5
 
6
 
7
  opPatchModelAddDownscale = PatchModelAddDownscale()
 
1
  import gradio as gr
2
 
3
  from modules import scripts
4
+ from ldm_patched.contrib.nodes_model_downscale import PatchModelAddDownscale
5
 
6
 
7
  opPatchModelAddDownscale = PatchModelAddDownscale()
extensions-builtin/sd_forge_multidiffusion/lib_multidiffusion/tiled_diffusion.py CHANGED
@@ -12,7 +12,7 @@ from weakref import WeakSet
12
  import ldm_patched.modules.utils
13
  import ldm_patched.modules.model_patcher
14
  import ldm_patched.modules.model_management
15
- from ldm_patched.contrib.external import ImageScale
16
  from ldm_patched.modules.model_base import BaseModel
17
  from ldm_patched.modules.model_patcher import ModelPatcher
18
  from ldm_patched.modules.controlnet import ControlNet, T2IAdapter
 
12
  import ldm_patched.modules.utils
13
  import ldm_patched.modules.model_patcher
14
  import ldm_patched.modules.model_management
15
+ from ldm_patched.contrib.nodes import ImageScale
16
  from ldm_patched.modules.model_base import BaseModel
17
  from ldm_patched.modules.model_patcher import ModelPatcher
18
  from ldm_patched.modules.controlnet import ControlNet, T2IAdapter
extensions-builtin/sd_forge_photomaker/scripts/forge_photomaker.py CHANGED
@@ -2,7 +2,7 @@ from modules_forge.supported_preprocessor import Preprocessor, PreprocessorParam
2
  from modules_forge.shared import add_supported_preprocessor
3
  from modules_forge.shared import add_supported_control_model
4
  from modules_forge.supported_controlnet import ControlModelPatcher
5
- from ldm_patched.contrib.external_photomaker import PhotoMakerEncode, PhotoMakerIDEncoder
6
 
7
 
8
  opPhotoMakerEncode = PhotoMakerEncode().apply_photomaker
 
2
  from modules_forge.shared import add_supported_preprocessor
3
  from modules_forge.shared import add_supported_control_model
4
  from modules_forge.supported_controlnet import ControlModelPatcher
5
+ from ldm_patched.contrib.nodes_photomaker import PhotoMakerEncode, PhotoMakerIDEncoder
6
 
7
 
8
  opPhotoMakerEncode = PhotoMakerEncode().apply_photomaker
extensions-builtin/sd_forge_sag/scripts/forge_sag.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
 
3
  from modules import scripts
4
- from ldm_patched.contrib.external_sag import SelfAttentionGuidance
5
 
6
 
7
  opSelfAttentionGuidance = SelfAttentionGuidance()
 
1
  import gradio as gr
2
 
3
  from modules import scripts
4
+ from ldm_patched.contrib.nodes_sag import SelfAttentionGuidance
5
 
6
 
7
  opSelfAttentionGuidance = SelfAttentionGuidance()
extensions-builtin/sd_forge_svd/scripts/forge_svd.py CHANGED
@@ -12,8 +12,8 @@ from modules import shared
12
 
13
  from modules_forge.forge_util import numpy_to_pytorch, pytorch_to_numpy, write_images_to_mp4
14
  from ldm_patched.modules.sd import load_checkpoint_guess_config
15
- from ldm_patched.contrib.external_video_model import VideoLinearCFGGuidance, SVD_img2vid_Conditioning
16
- from ldm_patched.contrib.external import KSampler, VAEDecode
17
 
18
 
19
  opVideoLinearCFGGuidance = VideoLinearCFGGuidance()
 
12
 
13
  from modules_forge.forge_util import numpy_to_pytorch, pytorch_to_numpy, write_images_to_mp4
14
  from ldm_patched.modules.sd import load_checkpoint_guess_config
15
+ from ldm_patched.contrib.nodes_video_model import VideoLinearCFGGuidance, SVD_img2vid_Conditioning
16
+ from ldm_patched.contrib.nodes import KSampler, VAEDecode
17
 
18
 
19
  opVideoLinearCFGGuidance = VideoLinearCFGGuidance()
extensions-builtin/sd_forge_z123/scripts/forge_z123.py CHANGED
@@ -11,8 +11,8 @@ from modules import shared
11
 
12
  from modules_forge.forge_util import numpy_to_pytorch, pytorch_to_numpy
13
  from ldm_patched.modules.sd import load_checkpoint_guess_config
14
- from ldm_patched.contrib.external_stable3d import StableZero123_Conditioning
15
- from ldm_patched.contrib.external import KSampler, VAEDecode
16
 
17
 
18
  opStableZero123_Conditioning = StableZero123_Conditioning()
 
11
 
12
  from modules_forge.forge_util import numpy_to_pytorch, pytorch_to_numpy
13
  from ldm_patched.modules.sd import load_checkpoint_guess_config
14
+ from ldm_patched.contrib.nodes_stable3d import StableZero123_Conditioning
15
+ from ldm_patched.contrib.nodes import KSampler, VAEDecode
16
 
17
 
18
  opStableZero123_Conditioning = StableZero123_Conditioning()
extensions-builtin/soft-inpainting/scripts/soft_inpainting.py CHANGED
@@ -4,6 +4,10 @@ import math
4
  from modules.ui_components import InputAccordion
5
  import modules.scripts as scripts
6
 
 
 
 
 
7
 
8
  class SoftInpaintingSettings:
9
  def __init__(self,
@@ -231,7 +235,76 @@ def apply_masks(
231
  return masks_for_overlay
232
 
233
 
234
- def weighted_histogram_filter(img, kernel, kernel_center, percentile_min=0.0, percentile_max=1.0, min_width=1.0):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
  """
236
  Generalization convolution filter capable of applying
237
  weighted mean, median, maximum, and minimum filters
@@ -258,101 +331,74 @@ def weighted_histogram_filter(img, kernel, kernel_center, percentile_min=0.0, pe
258
  (nparray): A filtered copy of the input image "img", a 2-D array of floats.
259
  """
260
 
261
- # Converts an index tuple into a vector.
262
- def vec(x):
263
- return np.array(x)
264
-
265
- kernel_min = -kernel_center
266
- kernel_max = vec(kernel.shape) - kernel_center
 
 
 
 
267
 
268
  def weighted_histogram_filter_single(idx):
269
- idx = vec(idx)
270
- min_index = np.maximum(0, idx + kernel_min)
271
- max_index = np.minimum(vec(img.shape), idx + kernel_max)
272
- window_shape = max_index - min_index
273
-
274
- class WeightedElement:
275
- """
276
- An element of the histogram, its weight
277
- and bounds.
278
- """
279
-
280
- def __init__(self, value, weight):
281
- self.value: float = value
282
- self.weight: float = weight
283
- self.window_min: float = 0.0
284
- self.window_max: float = 1.0
285
-
286
- # Collect the values in the image as WeightedElements,
287
- # weighted by their corresponding kernel values.
288
- values = []
289
- for window_tup in np.ndindex(tuple(window_shape)):
290
- window_index = vec(window_tup)
291
- image_index = window_index + min_index
292
- centered_kernel_index = image_index - idx
293
- kernel_index = centered_kernel_index + kernel_center
294
- element = WeightedElement(img[tuple(image_index)], kernel[tuple(kernel_index)])
295
- values.append(element)
296
-
297
- def sort_key(x: WeightedElement):
298
- return x.value
299
-
300
- values.sort(key=sort_key)
301
-
302
- # Calculate the height of the stack (sum)
303
- # and each sample's range they occupy in the stack
304
- sum = 0
305
- for i in range(len(values)):
306
- values[i].window_min = sum
307
- sum += values[i].weight
308
- values[i].window_max = sum
309
-
310
- # Calculate what range of this stack ("window")
311
- # we want to get the weighted average across.
312
- window_min = sum * percentile_min
313
- window_max = sum * percentile_max
314
- window_width = window_max - window_min
315
 
316
- # Ensure the window is within the stack and at least a certain size.
317
  if window_width < min_width:
318
  window_center = (window_min + window_max) / 2
319
- window_min = window_center - min_width / 2
320
- window_max = window_center + min_width / 2
321
-
322
- if window_max > sum:
323
- window_max = sum
324
- window_min = sum - min_width
325
-
326
- if window_min < 0:
327
- window_min = 0
328
- window_max = min_width
329
-
330
- value = 0
331
- value_weight = 0
332
-
333
- # Get the weighted average of all the samples
334
- # that overlap with the window, weighted
335
- # by the size of their overlap.
336
- for i in range(len(values)):
337
- if window_min >= values[i].window_max:
338
- continue
339
- if window_max <= values[i].window_min:
340
- break
341
-
342
- s = max(window_min, values[i].window_min)
343
- e = min(window_max, values[i].window_max)
344
- w = e - s
345
-
346
- value += values[i].value * w
347
- value_weight += w
348
-
349
- return value / value_weight if value_weight != 0 else 0
350
-
351
- img_out = img.copy()
352
-
353
- # Apply the kernel operation over each pixel.
354
- for index in np.ndindex(img.shape):
355
- img_out[index] = weighted_histogram_filter_single(index)
356
 
357
  return img_out
358
 
 
4
  from modules.ui_components import InputAccordion
5
  import modules.scripts as scripts
6
 
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from scipy.ndimage import convolve
9
+ from joblib import Parallel, delayed, cpu_count
10
+
11
 
12
  class SoftInpaintingSettings:
13
  def __init__(self,
 
235
  return masks_for_overlay
236
 
237
 
238
+
239
+
240
+ def weighted_histogram_filter_single_pixel(idx, img, kernel, kernel_center, percentile_min, percentile_max, min_width):
241
+ """
242
+ Apply the weighted histogram filter to a single pixel.
243
+ This function is now refactored to be accessible for parallelization.
244
+ """
245
+ idx = np.array(idx)
246
+ kernel_min = -kernel_center
247
+ kernel_max = np.array(kernel.shape) - kernel_center
248
+
249
+ # Precompute the minimum and maximum valid indices for the kernel
250
+ min_index = np.maximum(0, idx + kernel_min)
251
+ max_index = np.minimum(np.array(img.shape), idx + kernel_max)
252
+ window_shape = max_index - min_index
253
+
254
+ # Initialize values and weights arrays
255
+ values = []
256
+ weights = []
257
+
258
+ for window_tup in np.ndindex(*window_shape):
259
+ window_index = np.array(window_tup)
260
+ image_index = window_index + min_index
261
+ centered_kernel_index = image_index - idx
262
+ kernel_index = centered_kernel_index + kernel_center
263
+ values.append(img[tuple(image_index)])
264
+ weights.append(kernel[tuple(kernel_index)])
265
+
266
+ # Convert to NumPy arrays
267
+ values = np.array(values)
268
+ weights = np.array(weights)
269
+
270
+ # Sort values and weights by values
271
+ sorted_indices = np.argsort(values)
272
+ values = values[sorted_indices]
273
+ weights = weights[sorted_indices]
274
+
275
+ # Calculate cumulative weights
276
+ cumulative_weights = np.cumsum(weights)
277
+
278
+ # Define window boundaries
279
+ sum_weights = cumulative_weights[-1]
280
+ window_min = sum_weights * percentile_min
281
+ window_max = sum_weights * percentile_max
282
+ window_width = window_max - window_min
283
+
284
+ # Ensure window is at least `min_width` wide
285
+ if window_width < min_width:
286
+ window_center = (window_min + window_max) / 2
287
+ window_min = window_center - min_width / 2
288
+ window_max = window_center + min_width / 2
289
+
290
+ if window_max > sum_weights:
291
+ window_max = sum_weights
292
+ window_min = sum_weights - min_width
293
+
294
+ if window_min < 0:
295
+ window_min = 0
296
+ window_max = min_width
297
+
298
+ # Calculate overlap for each value
299
+ overlap_start = np.maximum(window_min, np.concatenate(([0], cumulative_weights[:-1])))
300
+ overlap_end = np.minimum(window_max, cumulative_weights)
301
+ overlap = np.maximum(0, overlap_end - overlap_start)
302
+
303
+ # Weighted average calculation
304
+ result = np.sum(values * overlap) / np.sum(overlap) if np.sum(overlap) > 0 else 0
305
+ return result
306
+
307
+ def weighted_histogram_filter(img, kernel, kernel_center, percentile_min=0.0, percentile_max=1.0, min_width=1.0, n_jobs=-1):
308
  """
309
  Generalization convolution filter capable of applying
310
  weighted mean, median, maximum, and minimum filters
 
331
  (nparray): A filtered copy of the input image "img", a 2-D array of floats.
332
  """
333
 
334
+ # Ensure kernel_center is a 1D array
335
+ if isinstance(kernel_center, int):
336
+ kernel_center = np.array([kernel_center, kernel_center])
337
+ elif len(kernel_center) == 1:
338
+ kernel_center = np.array([kernel_center[0], kernel_center[0]])
339
+ kernel_radius = max(kernel_center)
340
+ padded_img = np.pad(img, kernel_radius, mode='constant', constant_values=0)
341
+ img_out = np.zeros_like(img)
342
+ img_shape = img.shape
343
+ pixel_coords = [(i, j) for i in range(img_shape[0]) for j in range(img_shape[1])]
344
 
345
  def weighted_histogram_filter_single(idx):
346
+ """
347
+ Single-pixel weighted histogram calculation.
348
+ """
349
+ row, col = idx
350
+ idx = (row + kernel_radius, col + kernel_radius)
351
+ min_index = np.array(idx) - kernel_center
352
+ max_index = min_index + kernel.shape
353
+
354
+ window = padded_img[min_index[0]:max_index[0], min_index[1]:max_index[1]]
355
+ window_values = window.flatten()
356
+ window_weights = kernel.flatten()
357
+
358
+ sorted_indices = np.argsort(window_values)
359
+ values = window_values[sorted_indices]
360
+ weights = window_weights[sorted_indices]
361
+
362
+ cumulative_weights = np.cumsum(weights)
363
+ sum_weights = cumulative_weights[-1]
364
+ window_min = max(0, sum_weights * percentile_min)
365
+ window_max = min(sum_weights, sum_weights * percentile_max)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
+ window_width = window_max - window_min
368
  if window_width < min_width:
369
  window_center = (window_min + window_max) / 2
370
+ window_min = max(0, window_center - min_width / 2)
371
+ window_max = min(sum_weights, window_center + min_width / 2)
372
+
373
+ overlap_start = np.maximum(window_min, np.concatenate(([0], cumulative_weights[:-1])))
374
+ overlap_end = np.minimum(window_max, cumulative_weights)
375
+ overlap = np.maximum(0, overlap_end - overlap_start)
376
+
377
+ return np.sum(values * overlap) / np.sum(overlap) if np.sum(overlap) > 0 else 0
378
+
379
+ # Split pixel_coords into equal chunks based on n_jobs
380
+ n_jobs = -1
381
+ if cpu_count() > 6:
382
+ n_jobs = 6 # More than 6 isn't worth unless it's more than 3000x3000px
383
+
384
+ chunk_size = len(pixel_coords) // n_jobs
385
+ pixel_chunks = [pixel_coords[i:i + chunk_size] for i in range(0, len(pixel_coords), chunk_size)]
386
+
387
+ # joblib to process chunks in parallel
388
+ def process_chunk(chunk):
389
+ chunk_result = {}
390
+ for idx in chunk:
391
+ chunk_result[idx] = weighted_histogram_filter_single(idx)
392
+ return chunk_result
393
+
394
+ results = Parallel(n_jobs=n_jobs, backend="loky")( # loky is fastest in my configuration
395
+ delayed(process_chunk)(chunk) for chunk in pixel_chunks
396
+ )
397
+
398
+ # Combine results into the output image
399
+ for chunk_result in results:
400
+ for (row, col), value in chunk_result.items():
401
+ img_out[row, col] = value
 
 
 
 
 
402
 
403
  return img_out
404
 
ldm_patched/contrib/nodes.py ADDED
The diff for this file is too large to render. See raw diff
 
ldm_patched/contrib/nodes_ace.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import ldm_patched.modules.model_management
3
+ import ldm_patched.contrib.node_helpers
4
+
5
+ class TextEncodeAceStepAudio:
6
+ @classmethod
7
+ def INPUT_TYPES(s):
8
+ return {"required": {
9
+ "clip": ("CLIP", ),
10
+ "tags": ("STRING", {"multiline": True, "dynamicPrompts": True}),
11
+ "lyrics": ("STRING", {"multiline": True, "dynamicPrompts": True}),
12
+ "lyrics_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
13
+ }}
14
+ RETURN_TYPES = ("CONDITIONING",)
15
+ FUNCTION = "encode"
16
+
17
+ CATEGORY = "conditioning"
18
+
19
+ def encode(self, clip, tags, lyrics, lyrics_strength):
20
+ tokens = clip.tokenize(tags, lyrics=lyrics)
21
+ conditioning = clip.encode_from_tokens_scheduled(tokens)
22
+ conditioning = ldm_patched.contrib.node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength})
23
+ return (conditioning, )
24
+
25
+
26
+ class EmptyAceStepLatentAudio:
27
+ def __init__(self):
28
+ self.device = ldm_patched.modules.model_management.intermediate_device()
29
+
30
+ @classmethod
31
+ def INPUT_TYPES(s):
32
+ return {"required": {"seconds": ("FLOAT", {"default": 120.0, "min": 1.0, "max": 1000.0, "step": 0.1}),
33
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}),
34
+ }}
35
+ RETURN_TYPES = ("LATENT",)
36
+ FUNCTION = "generate"
37
+
38
+ CATEGORY = "latent/audio"
39
+
40
+ def generate(self, seconds, batch_size):
41
+ length = int(seconds * 44100 / 512 / 8)
42
+ latent = torch.zeros([batch_size, 8, 16, length], device=self.device)
43
+ return ({"samples": latent, "type": "audio"}, )
44
+
45
+
46
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
47
+ NODE_CLASS_MAPPINGS = {
48
+ "TextEncodeAceStepAudio": TextEncodeAceStepAudio,
49
+ "EmptyAceStepLatentAudio": EmptyAceStepLatentAudio,
50
+ }
ldm_patched/contrib/nodes_advanced_samplers.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ldm_patched.modules.samplers
2
+ import ldm_patched.modules.utils
3
+ import torch
4
+ import numpy as np
5
+ from tqdm.auto import trange
6
+
7
+
8
+ @torch.no_grad()
9
+ def sample_lcm_upscale(model, x, sigmas, extra_args=None, callback=None, disable=None, total_upscale=2.0, upscale_method="bislerp", upscale_steps=None):
10
+ extra_args = {} if extra_args is None else extra_args
11
+
12
+ if upscale_steps is None:
13
+ upscale_steps = max(len(sigmas) // 2 + 1, 2)
14
+ else:
15
+ upscale_steps += 1
16
+ upscale_steps = min(upscale_steps, len(sigmas) + 1)
17
+
18
+ upscales = np.linspace(1.0, total_upscale, upscale_steps)[1:]
19
+
20
+ orig_shape = x.size()
21
+ s_in = x.new_ones([x.shape[0]])
22
+ for i in trange(len(sigmas) - 1, disable=disable):
23
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
24
+ if callback is not None:
25
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
26
+
27
+ x = denoised
28
+ if i < len(upscales):
29
+ x = ldm_patched.modules.utils.common_upscale(x, round(orig_shape[-1] * upscales[i]), round(orig_shape[-2] * upscales[i]), upscale_method, "disabled")
30
+
31
+ if sigmas[i + 1] > 0:
32
+ x += sigmas[i + 1] * torch.randn_like(x)
33
+ return x
34
+
35
+
36
+ class SamplerLCMUpscale:
37
+ upscale_methods = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
38
+
39
+ @classmethod
40
+ def INPUT_TYPES(s):
41
+ return {"required":
42
+ {"scale_ratio": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 20.0, "step": 0.01}),
43
+ "scale_steps": ("INT", {"default": -1, "min": -1, "max": 1000, "step": 1}),
44
+ "upscale_method": (s.upscale_methods,),
45
+ }
46
+ }
47
+ RETURN_TYPES = ("SAMPLER",)
48
+ CATEGORY = "sampling/custom_sampling/samplers"
49
+
50
+ FUNCTION = "get_sampler"
51
+
52
+ def get_sampler(self, scale_ratio, scale_steps, upscale_method):
53
+ if scale_steps < 0:
54
+ scale_steps = None
55
+ sampler = ldm_patched.modules.samplers.KSAMPLER(sample_lcm_upscale, extra_options={"total_upscale": scale_ratio, "upscale_steps": scale_steps, "upscale_method": upscale_method})
56
+ return (sampler, )
57
+
58
+ from comfy.k_diffusion.sampling import to_d
59
+ import ldm_patched.modules.model_patcher
60
+
61
+ @torch.no_grad()
62
+ def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
63
+ extra_args = {} if extra_args is None else extra_args
64
+
65
+ temp = [0]
66
+ def post_cfg_function(args):
67
+ temp[0] = args["uncond_denoised"]
68
+ return args["denoised"]
69
+
70
+ model_options = extra_args.get("model_options", {}).copy()
71
+ extra_args["model_options"] = ldm_patched.modules.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
72
+
73
+ s_in = x.new_ones([x.shape[0]])
74
+ for i in trange(len(sigmas) - 1, disable=disable):
75
+ sigma_hat = sigmas[i]
76
+ denoised = model(x, sigma_hat * s_in, **extra_args)
77
+ d = to_d(x - denoised + temp[0], sigmas[i], denoised)
78
+ if callback is not None:
79
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
80
+ dt = sigmas[i + 1] - sigma_hat
81
+ x = x + d * dt
82
+ return x
83
+
84
+
85
+ class SamplerEulerCFGpp:
86
+ @classmethod
87
+ def INPUT_TYPES(s):
88
+ return {"required":
89
+ {"version": (["regular", "alternative"],),}
90
+ }
91
+ RETURN_TYPES = ("SAMPLER",)
92
+ # CATEGORY = "sampling/custom_sampling/samplers"
93
+ CATEGORY = "_for_testing"
94
+
95
+ FUNCTION = "get_sampler"
96
+
97
+ def get_sampler(self, version):
98
+ if version == "alternative":
99
+ sampler = ldm_patched.modules.samplers.KSAMPLER(sample_euler_pp)
100
+ else:
101
+ sampler = ldm_patched.modules.samplers.ksampler("euler_cfg_pp")
102
+ return (sampler, )
103
+
104
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
105
+ NODE_CLASS_MAPPINGS = {
106
+ "SamplerLCMUpscale": SamplerLCMUpscale,
107
+ "SamplerEulerCFGpp": SamplerEulerCFGpp,
108
+ }
109
+
110
+ NODE_DISPLAY_NAME_MAPPINGS = {
111
+ "SamplerEulerCFGpp": "SamplerEulerCFG++",
112
+ }
ldm_patched/contrib/nodes_align_your_steps.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
2
+ import numpy as np
3
+ import torch
4
+
5
+ def loglinear_interp(t_steps, num_steps):
6
+ """
7
+ Performs log-linear interpolation of a given array of decreasing numbers.
8
+ """
9
+ xs = np.linspace(0, 1, len(t_steps))
10
+ ys = np.log(t_steps[::-1])
11
+
12
+ new_xs = np.linspace(0, 1, num_steps)
13
+ new_ys = np.interp(new_xs, xs, ys)
14
+
15
+ interped_ys = np.exp(new_ys)[::-1].copy()
16
+ return interped_ys
17
+
18
+ NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
19
+ "SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
20
+ "SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]}
21
+
22
+ class AlignYourStepsScheduler:
23
+ @classmethod
24
+ def INPUT_TYPES(s):
25
+ return {"required":
26
+ {"model_type": (["SD1", "SDXL", "SVD"], ),
27
+ "steps": ("INT", {"default": 10, "min": 1, "max": 10000}),
28
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
29
+ }
30
+ }
31
+ RETURN_TYPES = ("SIGMAS",)
32
+ CATEGORY = "sampling/custom_sampling/schedulers"
33
+
34
+ FUNCTION = "get_sigmas"
35
+
36
+ def get_sigmas(self, model_type, steps, denoise):
37
+ total_steps = steps
38
+ if denoise < 1.0:
39
+ if denoise <= 0.0:
40
+ return (torch.FloatTensor([]),)
41
+ total_steps = round(steps * denoise)
42
+
43
+ sigmas = NOISE_LEVELS[model_type][:]
44
+ if (steps + 1) != len(sigmas):
45
+ sigmas = loglinear_interp(sigmas, steps + 1)
46
+
47
+ sigmas = sigmas[-(total_steps + 1):]
48
+ sigmas[-1] = 0
49
+ return (torch.FloatTensor(sigmas), )
50
+
51
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
52
+ NODE_CLASS_MAPPINGS = {
53
+ "AlignYourStepsScheduler": AlignYourStepsScheduler,
54
+ }
ldm_patched/contrib/nodes_apg.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def project(v0, v1):
4
+ v1 = torch.nn.functional.normalize(v1, dim=[-1, -2, -3])
5
+ v0_parallel = (v0 * v1).sum(dim=[-1, -2, -3], keepdim=True) * v1
6
+ v0_orthogonal = v0 - v0_parallel
7
+ return v0_parallel, v0_orthogonal
8
+
9
+ class APG:
10
+ @classmethod
11
+ def INPUT_TYPES(s):
12
+ return {
13
+ "required": {
14
+ "model": ("MODEL",),
15
+ "eta": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01, "tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1."}),
16
+ "norm_threshold": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1, "tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0."}),
17
+ "momentum": ("FLOAT", {"default": 0.0, "min": -5.0, "max": 1.0, "step": 0.01, "tooltip":"Controls a running average of guidance during diffusion, disabled at a setting of 0."}),
18
+ }
19
+ }
20
+ RETURN_TYPES = ("MODEL",)
21
+ FUNCTION = "patch"
22
+ CATEGORY = "sampling/custom_sampling"
23
+
24
+ def patch(self, model, eta, norm_threshold, momentum):
25
+ running_avg = 0
26
+ prev_sigma = None
27
+
28
+ def pre_cfg_function(args):
29
+ nonlocal running_avg, prev_sigma
30
+
31
+ if len(args["conds_out"]) == 1: return args["conds_out"]
32
+
33
+ cond = args["conds_out"][0]
34
+ uncond = args["conds_out"][1]
35
+ sigma = args["sigma"][0]
36
+ cond_scale = args["cond_scale"]
37
+
38
+ if prev_sigma is not None and sigma > prev_sigma:
39
+ running_avg = 0
40
+ prev_sigma = sigma
41
+
42
+ guidance = cond - uncond
43
+
44
+ if momentum != 0:
45
+ if not torch.is_tensor(running_avg):
46
+ running_avg = guidance
47
+ else:
48
+ running_avg = momentum * running_avg + guidance
49
+ guidance = running_avg
50
+
51
+ if norm_threshold > 0:
52
+ guidance_norm = guidance.norm(p=2, dim=[-1, -2, -3], keepdim=True)
53
+ scale = torch.minimum(
54
+ torch.ones_like(guidance_norm),
55
+ norm_threshold / guidance_norm
56
+ )
57
+ guidance = guidance * scale
58
+
59
+ guidance_parallel, guidance_orthogonal = project(guidance, cond)
60
+ modified_guidance = guidance_orthogonal + eta * guidance_parallel
61
+
62
+ modified_cond = (uncond + modified_guidance) + (cond - uncond) / cond_scale
63
+
64
+ return [modified_cond, uncond] + args["conds_out"][2:]
65
+
66
+ m = model.clone()
67
+ m.set_model_sampler_pre_cfg_function(pre_cfg_function)
68
+ return (m,)
69
+
70
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
71
+ NODE_CLASS_MAPPINGS = {
72
+ "APG": APG,
73
+ }
74
+
75
+ NODE_DISPLAY_NAME_MAPPINGS = {
76
+ "APG": "Adaptive Projected Guidance",
77
+ }
ldm_patched/contrib/nodes_attention_multiply.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def attention_multiply(attn, model, q, k, v, out):
3
+ m = model.clone()
4
+ sd = model.model_state_dict()
5
+
6
+ for key in sd:
7
+ if key.endswith("{}.to_q.bias".format(attn)) or key.endswith("{}.to_q.weight".format(attn)):
8
+ m.add_patches({key: (None,)}, 0.0, q)
9
+ if key.endswith("{}.to_k.bias".format(attn)) or key.endswith("{}.to_k.weight".format(attn)):
10
+ m.add_patches({key: (None,)}, 0.0, k)
11
+ if key.endswith("{}.to_v.bias".format(attn)) or key.endswith("{}.to_v.weight".format(attn)):
12
+ m.add_patches({key: (None,)}, 0.0, v)
13
+ if key.endswith("{}.to_out.0.bias".format(attn)) or key.endswith("{}.to_out.0.weight".format(attn)):
14
+ m.add_patches({key: (None,)}, 0.0, out)
15
+
16
+ return m
17
+
18
+
19
+ class UNetSelfAttentionMultiply:
20
+ @classmethod
21
+ def INPUT_TYPES(s):
22
+ return {"required": { "model": ("MODEL",),
23
+ "q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
24
+ "k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
25
+ "v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
26
+ "out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
27
+ }}
28
+ RETURN_TYPES = ("MODEL",)
29
+ FUNCTION = "patch"
30
+
31
+ CATEGORY = "_for_testing/attention_experiments"
32
+
33
+ def patch(self, model, q, k, v, out):
34
+ m = attention_multiply("attn1", model, q, k, v, out)
35
+ return (m, )
36
+
37
+ class UNetCrossAttentionMultiply:
38
+ @classmethod
39
+ def INPUT_TYPES(s):
40
+ return {"required": { "model": ("MODEL",),
41
+ "q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
42
+ "k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
43
+ "v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
44
+ "out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
45
+ }}
46
+ RETURN_TYPES = ("MODEL",)
47
+ FUNCTION = "patch"
48
+
49
+ CATEGORY = "_for_testing/attention_experiments"
50
+
51
+ def patch(self, model, q, k, v, out):
52
+ m = attention_multiply("attn2", model, q, k, v, out)
53
+ return (m, )
54
+
55
+ class CLIPAttentionMultiply:
56
+ @classmethod
57
+ def INPUT_TYPES(s):
58
+ return {"required": { "clip": ("CLIP",),
59
+ "q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
60
+ "k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
61
+ "v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
62
+ "out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
63
+ }}
64
+ RETURN_TYPES = ("CLIP",)
65
+ FUNCTION = "patch"
66
+
67
+ CATEGORY = "_for_testing/attention_experiments"
68
+
69
+ def patch(self, clip, q, k, v, out):
70
+ m = clip.clone()
71
+ sd = m.patcher.model_state_dict()
72
+
73
+ for key in sd:
74
+ if key.endswith("self_attn.q_proj.weight") or key.endswith("self_attn.q_proj.bias"):
75
+ m.add_patches({key: (None,)}, 0.0, q)
76
+ if key.endswith("self_attn.k_proj.weight") or key.endswith("self_attn.k_proj.bias"):
77
+ m.add_patches({key: (None,)}, 0.0, k)
78
+ if key.endswith("self_attn.v_proj.weight") or key.endswith("self_attn.v_proj.bias"):
79
+ m.add_patches({key: (None,)}, 0.0, v)
80
+ if key.endswith("self_attn.out_proj.weight") or key.endswith("self_attn.out_proj.bias"):
81
+ m.add_patches({key: (None,)}, 0.0, out)
82
+ return (m, )
83
+
84
+ class UNetTemporalAttentionMultiply:
85
+ @classmethod
86
+ def INPUT_TYPES(s):
87
+ return {"required": { "model": ("MODEL",),
88
+ "self_structural": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
89
+ "self_temporal": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
90
+ "cross_structural": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
91
+ "cross_temporal": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
92
+ }}
93
+ RETURN_TYPES = ("MODEL",)
94
+ FUNCTION = "patch"
95
+
96
+ CATEGORY = "_for_testing/attention_experiments"
97
+
98
+ def patch(self, model, self_structural, self_temporal, cross_structural, cross_temporal):
99
+ m = model.clone()
100
+ sd = model.model_state_dict()
101
+
102
+ for k in sd:
103
+ if (k.endswith("attn1.to_out.0.bias") or k.endswith("attn1.to_out.0.weight")):
104
+ if '.time_stack.' in k:
105
+ m.add_patches({k: (None,)}, 0.0, self_temporal)
106
+ else:
107
+ m.add_patches({k: (None,)}, 0.0, self_structural)
108
+ elif (k.endswith("attn2.to_out.0.bias") or k.endswith("attn2.to_out.0.weight")):
109
+ if '.time_stack.' in k:
110
+ m.add_patches({k: (None,)}, 0.0, cross_temporal)
111
+ else:
112
+ m.add_patches({k: (None,)}, 0.0, cross_structural)
113
+ return (m, )
114
+
115
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
116
+ NODE_CLASS_MAPPINGS = {
117
+ "UNetSelfAttentionMultiply": UNetSelfAttentionMultiply,
118
+ "UNetCrossAttentionMultiply": UNetCrossAttentionMultiply,
119
+ "CLIPAttentionMultiply": CLIPAttentionMultiply,
120
+ "UNetTemporalAttentionMultiply": UNetTemporalAttentionMultiply,
121
+ }
ldm_patched/contrib/nodes_audio.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from __future__ import annotations
2
+
3
+ # import av
4
+ # import torchaudio
5
+ # import torch
6
+ # import ldm_patched.modules.model_management
7
+ # import folder_paths
8
+ # import os
9
+ # import io
10
+ # import json
11
+ # import random
12
+ # import hashlib
13
+ # import ldm_patched.contrib.node_helpers
14
+ # # from ldm_patched.modules.args_parser import args
15
+ # from ldm_patched.modules.ldmpatched_types.node_typing import FileLocator
16
+
17
+ # class EmptyLatentAudio:
18
+ # def __init__(self):
19
+ # self.device = ldm_patched.modules.model_management.intermediate_device()
20
+
21
+ # @classmethod
22
+ # def INPUT_TYPES(s):
23
+ # return {"required": {"seconds": ("FLOAT", {"default": 47.6, "min": 1.0, "max": 1000.0, "step": 0.1}),
24
+ # "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}),
25
+ # }}
26
+ # RETURN_TYPES = ("LATENT",)
27
+ # FUNCTION = "generate"
28
+
29
+ # CATEGORY = "latent/audio"
30
+
31
+ # def generate(self, seconds, batch_size):
32
+ # length = round((seconds * 44100 / 2048) / 2) * 2
33
+ # latent = torch.zeros([batch_size, 64, length], device=self.device)
34
+ # return ({"samples":latent, "type": "audio"}, )
35
+
36
+ # class ConditioningStableAudio:
37
+ # @classmethod
38
+ # def INPUT_TYPES(s):
39
+ # return {"required": {"positive": ("CONDITIONING", ),
40
+ # "negative": ("CONDITIONING", ),
41
+ # "seconds_start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.1}),
42
+ # "seconds_total": ("FLOAT", {"default": 47.0, "min": 0.0, "max": 1000.0, "step": 0.1}),
43
+ # }}
44
+
45
+ # RETURN_TYPES = ("CONDITIONING","CONDITIONING")
46
+ # RETURN_NAMES = ("positive", "negative")
47
+
48
+ # FUNCTION = "append"
49
+
50
+ # CATEGORY = "conditioning"
51
+
52
+ # def append(self, positive, negative, seconds_start, seconds_total):
53
+ # positive = ldm_patched.contrib.node_helpers.conditioning_set_values(positive, {"seconds_start": seconds_start, "seconds_total": seconds_total})
54
+ # negative = ldm_patched.contrib.node_helpers.conditioning_set_values(negative, {"seconds_start": seconds_start, "seconds_total": seconds_total})
55
+ # return (positive, negative)
56
+
57
+ # class VAEEncodeAudio:
58
+ # @classmethod
59
+ # def INPUT_TYPES(s):
60
+ # return {"required": { "audio": ("AUDIO", ), "vae": ("VAE", )}}
61
+ # RETURN_TYPES = ("LATENT",)
62
+ # FUNCTION = "encode"
63
+
64
+ # CATEGORY = "latent/audio"
65
+
66
+ # def encode(self, vae, audio):
67
+ # sample_rate = audio["sample_rate"]
68
+ # if 44100 != sample_rate:
69
+ # waveform = torchaudio.functional.resample(audio["waveform"], sample_rate, 44100)
70
+ # else:
71
+ # waveform = audio["waveform"]
72
+
73
+ # t = vae.encode(waveform.movedim(1, -1))
74
+ # return ({"samples":t}, )
75
+
76
+ # class VAEDecodeAudio:
77
+ # @classmethod
78
+ # def INPUT_TYPES(s):
79
+ # return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
80
+ # RETURN_TYPES = ("AUDIO",)
81
+ # FUNCTION = "decode"
82
+
83
+ # CATEGORY = "latent/audio"
84
+
85
+ # def decode(self, vae, samples):
86
+ # audio = vae.decode(samples["samples"]).movedim(-1, 1)
87
+ # std = torch.std(audio, dim=[1,2], keepdim=True) * 5.0
88
+ # std[std < 1.0] = 1.0
89
+ # audio /= std
90
+ # return ({"waveform": audio, "sample_rate": 44100}, )
91
+
92
+
93
+ # def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None, quality="128k"):
94
+
95
+ # filename_prefix += self.prefix_append
96
+ # full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
97
+ # results: list[FileLocator] = []
98
+
99
+ # # # Prepare metadata dictionary
100
+ # # metadata = {}
101
+ # # if not args.disable_metadata:
102
+ # # if prompt is not None:
103
+ # # metadata["prompt"] = json.dumps(prompt)
104
+ # # if extra_pnginfo is not None:
105
+ # # for x in extra_pnginfo:
106
+ # # metadata[x] = json.dumps(extra_pnginfo[x])
107
+
108
+ # # Opus supported sample rates
109
+ # OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
110
+
111
+ # for (batch_number, waveform) in enumerate(audio["waveform"].cpu()):
112
+ # filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
113
+ # file = f"{filename_with_batch_num}_{counter:05}_.{format}"
114
+ # output_path = os.path.join(full_output_folder, file)
115
+
116
+ # # Use original sample rate initially
117
+ # sample_rate = audio["sample_rate"]
118
+
119
+ # # Handle Opus sample rate requirements
120
+ # if format == "opus":
121
+ # if sample_rate > 48000:
122
+ # sample_rate = 48000
123
+ # elif sample_rate not in OPUS_RATES:
124
+ # # Find the next highest supported rate
125
+ # for rate in sorted(OPUS_RATES):
126
+ # if rate > sample_rate:
127
+ # sample_rate = rate
128
+ # break
129
+ # if sample_rate not in OPUS_RATES: # Fallback if still not supported
130
+ # sample_rate = 48000
131
+
132
+ # # Resample if necessary
133
+ # if sample_rate != audio["sample_rate"]:
134
+ # waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate)
135
+
136
+ # # Create output with specified format
137
+ # output_buffer = io.BytesIO()
138
+ # output_container = av.open(output_buffer, mode='w', format=format)
139
+
140
+ # # # Set metadata on the container
141
+ # # for key, value in metadata.items():
142
+ # # output_container.metadata[key] = value
143
+
144
+ # # Set up the output stream with appropriate properties
145
+ # if format == "opus":
146
+ # out_stream = output_container.add_stream("libopus", rate=sample_rate)
147
+ # if quality == "64k":
148
+ # out_stream.bit_rate = 64000
149
+ # elif quality == "96k":
150
+ # out_stream.bit_rate = 96000
151
+ # elif quality == "128k":
152
+ # out_stream.bit_rate = 128000
153
+ # elif quality == "192k":
154
+ # out_stream.bit_rate = 192000
155
+ # elif quality == "320k":
156
+ # out_stream.bit_rate = 320000
157
+ # elif format == "mp3":
158
+ # out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
159
+ # if quality == "V0":
160
+ # #TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
161
+ # out_stream.codec_context.qscale = 1
162
+ # elif quality == "128k":
163
+ # out_stream.bit_rate = 128000
164
+ # elif quality == "320k":
165
+ # out_stream.bit_rate = 320000
166
+ # else: #format == "flac":
167
+ # out_stream = output_container.add_stream("flac", rate=sample_rate)
168
+
169
+ # frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[0] == 1 else 'stereo')
170
+ # frame.sample_rate = sample_rate
171
+ # frame.pts = 0
172
+ # output_container.mux(out_stream.encode(frame))
173
+
174
+ # # Flush encoder
175
+ # output_container.mux(out_stream.encode(None))
176
+
177
+ # # Close containers
178
+ # output_container.close()
179
+
180
+ # # Write the output to file
181
+ # output_buffer.seek(0)
182
+ # with open(output_path, 'wb') as f:
183
+ # f.write(output_buffer.getbuffer())
184
+
185
+ # results.append({
186
+ # "filename": file,
187
+ # "subfolder": subfolder,
188
+ # "type": self.type
189
+ # })
190
+ # counter += 1
191
+
192
+ # return { "ui": { "audio": results } }
193
+
194
+ # class SaveAudio:
195
+ # def __init__(self):
196
+ # self.output_dir = folder_paths.get_output_directory()
197
+ # self.type = "output"
198
+ # self.prefix_append = ""
199
+
200
+ # @classmethod
201
+ # def INPUT_TYPES(s):
202
+ # return {"required": { "audio": ("AUDIO", ),
203
+ # "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}),
204
+ # },
205
+ # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
206
+ # }
207
+
208
+ # RETURN_TYPES = ()
209
+ # FUNCTION = "save_flac"
210
+
211
+ # OUTPUT_NODE = True
212
+
213
+ # CATEGORY = "audio"
214
+
215
+ # def save_flac(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None):
216
+ # return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo)
217
+
218
+ # class SaveAudioMP3:
219
+ # def __init__(self):
220
+ # self.output_dir = folder_paths.get_output_directory()
221
+ # self.type = "output"
222
+ # self.prefix_append = ""
223
+
224
+ # @classmethod
225
+ # def INPUT_TYPES(s):
226
+ # return {"required": { "audio": ("AUDIO", ),
227
+ # "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}),
228
+ # "quality": (["V0", "128k", "320k"], {"default": "V0"}),
229
+ # },
230
+ # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
231
+ # }
232
+
233
+ # RETURN_TYPES = ()
234
+ # FUNCTION = "save_mp3"
235
+
236
+ # OUTPUT_NODE = True
237
+
238
+ # CATEGORY = "audio"
239
+
240
+ # def save_mp3(self, audio, filename_prefix="ComfyUI", format="mp3", prompt=None, extra_pnginfo=None, quality="128k"):
241
+ # return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality)
242
+
243
+ # class SaveAudioOpus:
244
+ # def __init__(self):
245
+ # self.output_dir = folder_paths.get_output_directory()
246
+ # self.type = "output"
247
+ # self.prefix_append = ""
248
+
249
+ # @classmethod
250
+ # def INPUT_TYPES(s):
251
+ # return {"required": { "audio": ("AUDIO", ),
252
+ # "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}),
253
+ # "quality": (["64k", "96k", "128k", "192k", "320k"], {"default": "128k"}),
254
+ # },
255
+ # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
256
+ # }
257
+
258
+ # RETURN_TYPES = ()
259
+ # FUNCTION = "save_opus"
260
+
261
+ # OUTPUT_NODE = True
262
+
263
+ # CATEGORY = "audio"
264
+
265
+ # def save_opus(self, audio, filename_prefix="ComfyUI", format="opus", prompt=None, extra_pnginfo=None, quality="V3"):
266
+ # return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality)
267
+
268
+ # class PreviewAudio(SaveAudio):
269
+ # def __init__(self):
270
+ # self.output_dir = folder_paths.get_temp_directory()
271
+ # self.type = "temp"
272
+ # self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
273
+
274
+ # @classmethod
275
+ # def INPUT_TYPES(s):
276
+ # return {"required":
277
+ # {"audio": ("AUDIO", ), },
278
+ # "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
279
+ # }
280
+
281
+ # def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
282
+ # """Convert audio to float 32 bits PCM format."""
283
+ # if wav.dtype.is_floating_point:
284
+ # return wav
285
+ # elif wav.dtype == torch.int16:
286
+ # return wav.float() / (2 ** 15)
287
+ # elif wav.dtype == torch.int32:
288
+ # return wav.float() / (2 ** 31)
289
+ # raise ValueError(f"Unsupported wav dtype: {wav.dtype}")
290
+
291
+ # def load(filepath: str) -> tuple[torch.Tensor, int]:
292
+ # with av.open(filepath) as af:
293
+ # if not af.streams.audio:
294
+ # raise ValueError("No audio stream found in the file.")
295
+
296
+ # stream = af.streams.audio[0]
297
+ # sr = stream.codec_context.sample_rate
298
+ # n_channels = stream.channels
299
+
300
+ # frames = []
301
+ # length = 0
302
+ # for frame in af.decode(streams=stream.index):
303
+ # buf = torch.from_numpy(frame.to_ndarray())
304
+ # if buf.shape[0] != n_channels:
305
+ # buf = buf.view(-1, n_channels).t()
306
+
307
+ # frames.append(buf)
308
+ # length += buf.shape[1]
309
+
310
+ # if not frames:
311
+ # raise ValueError("No audio frames decoded.")
312
+
313
+ # wav = torch.cat(frames, dim=1)
314
+ # wav = f32_pcm(wav)
315
+ # return wav, sr
316
+
317
+ # class LoadAudio:
318
+ # @classmethod
319
+ # def INPUT_TYPES(s):
320
+ # input_dir = folder_paths.get_input_directory()
321
+ # files = folder_paths.filter_files_content_types(os.listdir(input_dir), ["audio", "video"])
322
+ # return {"required": {"audio": (sorted(files), {"audio_upload": True})}}
323
+
324
+ # CATEGORY = "audio"
325
+
326
+ # RETURN_TYPES = ("AUDIO", )
327
+ # FUNCTION = "load"
328
+
329
+ # def load(self, audio):
330
+ # audio_path = folder_paths.get_annotated_filepath(audio)
331
+ # waveform, sample_rate = load(audio_path)
332
+ # audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate}
333
+ # return (audio, )
334
+
335
+ # @classmethod
336
+ # def IS_CHANGED(s, audio):
337
+ # image_path = folder_paths.get_annotated_filepath(audio)
338
+ # m = hashlib.sha256()
339
+ # with open(image_path, 'rb') as f:
340
+ # m.update(f.read())
341
+ # return m.digest().hex()
342
+
343
+ # @classmethod
344
+ # def VALIDATE_INPUTS(s, audio):
345
+ # if not folder_paths.exists_annotated_filepath(audio):
346
+ # return "Invalid audio file: {}".format(audio)
347
+ # return True
348
+
349
+ # # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
350
+ # NODE_CLASS_MAPPINGS = {
351
+ # "EmptyLatentAudio": EmptyLatentAudio,
352
+ # "VAEEncodeAudio": VAEEncodeAudio,
353
+ # "VAEDecodeAudio": VAEDecodeAudio,
354
+ # "SaveAudio": SaveAudio,
355
+ # "SaveAudioMP3": SaveAudioMP3,
356
+ # "SaveAudioOpus": SaveAudioOpus,
357
+ # "LoadAudio": LoadAudio,
358
+ # "PreviewAudio": PreviewAudio,
359
+ # "ConditioningStableAudio": ConditioningStableAudio,
360
+ # }
361
+
362
+ # NODE_DISPLAY_NAME_MAPPINGS = {
363
+ # "EmptyLatentAudio": "Empty Latent Audio",
364
+ # "VAEEncodeAudio": "VAE Encode Audio",
365
+ # "VAEDecodeAudio": "VAE Decode Audio",
366
+ # "PreviewAudio": "Preview Audio",
367
+ # "LoadAudio": "Load Audio",
368
+ # "SaveAudio": "Save Audio (FLAC)",
369
+ # "SaveAudioMP3": "Save Audio (MP3)",
370
+ # "SaveAudioOpus": "Save Audio (Opus)",
371
+ # }
ldm_patched/contrib/nodes_camera_trajectory.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nodes
2
+ import torch
3
+ import numpy as np
4
+ from einops import rearrange
5
+ import ldm_patched.modules.model_management
6
+
7
+
8
+
9
+ MAX_RESOLUTION = nodes.MAX_RESOLUTION
10
+
11
+ CAMERA_DICT = {
12
+ "base_T_norm": 1.5,
13
+ "base_angle": np.pi/3,
14
+ "Static": { "angle":[0., 0., 0.], "T":[0., 0., 0.]},
15
+ "Pan Up": { "angle":[0., 0., 0.], "T":[0., -1., 0.]},
16
+ "Pan Down": { "angle":[0., 0., 0.], "T":[0.,1.,0.]},
17
+ "Pan Left": { "angle":[0., 0., 0.], "T":[-1.,0.,0.]},
18
+ "Pan Right": { "angle":[0., 0., 0.], "T": [1.,0.,0.]},
19
+ "Zoom In": { "angle":[0., 0., 0.], "T": [0.,0.,2.]},
20
+ "Zoom Out": { "angle":[0., 0., 0.], "T": [0.,0.,-2.]},
21
+ "Anti Clockwise (ACW)": { "angle": [0., 0., -1.], "T":[0., 0., 0.]},
22
+ "ClockWise (CW)": { "angle": [0., 0., 1.], "T":[0., 0., 0.]},
23
+ }
24
+
25
+
26
+ def process_pose_params(cam_params, width=672, height=384, original_pose_width=1280, original_pose_height=720, device='cpu'):
27
+
28
+ def get_relative_pose(cam_params):
29
+ """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
30
+ """
31
+ abs_w2cs = [cam_param.w2c_mat for cam_param in cam_params]
32
+ abs_c2ws = [cam_param.c2w_mat for cam_param in cam_params]
33
+ cam_to_origin = 0
34
+ target_cam_c2w = np.array([
35
+ [1, 0, 0, 0],
36
+ [0, 1, 0, -cam_to_origin],
37
+ [0, 0, 1, 0],
38
+ [0, 0, 0, 1]
39
+ ])
40
+ abs2rel = target_cam_c2w @ abs_w2cs[0]
41
+ ret_poses = [target_cam_c2w, ] + [abs2rel @ abs_c2w for abs_c2w in abs_c2ws[1:]]
42
+ ret_poses = np.array(ret_poses, dtype=np.float32)
43
+ return ret_poses
44
+
45
+ """Modified from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
46
+ """
47
+ cam_params = [Camera(cam_param) for cam_param in cam_params]
48
+
49
+ sample_wh_ratio = width / height
50
+ pose_wh_ratio = original_pose_width / original_pose_height # Assuming placeholder ratios, change as needed
51
+
52
+ if pose_wh_ratio > sample_wh_ratio:
53
+ resized_ori_w = height * pose_wh_ratio
54
+ for cam_param in cam_params:
55
+ cam_param.fx = resized_ori_w * cam_param.fx / width
56
+ else:
57
+ resized_ori_h = width / pose_wh_ratio
58
+ for cam_param in cam_params:
59
+ cam_param.fy = resized_ori_h * cam_param.fy / height
60
+
61
+ intrinsic = np.asarray([[cam_param.fx * width,
62
+ cam_param.fy * height,
63
+ cam_param.cx * width,
64
+ cam_param.cy * height]
65
+ for cam_param in cam_params], dtype=np.float32)
66
+
67
+ K = torch.as_tensor(intrinsic)[None] # [1, 1, 4]
68
+ c2ws = get_relative_pose(cam_params) # Assuming this function is defined elsewhere
69
+ c2ws = torch.as_tensor(c2ws)[None] # [1, n_frame, 4, 4]
70
+ plucker_embedding = ray_condition(K, c2ws, height, width, device=device)[0].permute(0, 3, 1, 2).contiguous() # V, 6, H, W
71
+ plucker_embedding = plucker_embedding[None]
72
+ plucker_embedding = rearrange(plucker_embedding, "b f c h w -> b f h w c")[0]
73
+ return plucker_embedding
74
+
75
+ class Camera(object):
76
+ """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
77
+ """
78
+ def __init__(self, entry):
79
+ fx, fy, cx, cy = entry[1:5]
80
+ self.fx = fx
81
+ self.fy = fy
82
+ self.cx = cx
83
+ self.cy = cy
84
+ c2w_mat = np.array(entry[7:]).reshape(4, 4)
85
+ self.c2w_mat = c2w_mat
86
+ self.w2c_mat = np.linalg.inv(c2w_mat)
87
+
88
+ def ray_condition(K, c2w, H, W, device):
89
+ """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py
90
+ """
91
+ # c2w: B, V, 4, 4
92
+ # K: B, V, 4
93
+
94
+ B = K.shape[0]
95
+
96
+ j, i = torch.meshgrid(
97
+ torch.linspace(0, H - 1, H, device=device, dtype=c2w.dtype),
98
+ torch.linspace(0, W - 1, W, device=device, dtype=c2w.dtype),
99
+ indexing='ij'
100
+ )
101
+ i = i.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW]
102
+ j = j.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW]
103
+
104
+ fx, fy, cx, cy = K.chunk(4, dim=-1) # B,V, 1
105
+
106
+ zs = torch.ones_like(i) # [B, HxW]
107
+ xs = (i - cx) / fx * zs
108
+ ys = (j - cy) / fy * zs
109
+ zs = zs.expand_as(ys)
110
+
111
+ directions = torch.stack((xs, ys, zs), dim=-1) # B, V, HW, 3
112
+ directions = directions / directions.norm(dim=-1, keepdim=True) # B, V, HW, 3
113
+
114
+ rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW
115
+ rays_o = c2w[..., :3, 3] # B, V, 3
116
+ rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW
117
+ # c2w @ dirctions
118
+ rays_dxo = torch.cross(rays_o, rays_d)
119
+ plucker = torch.cat([rays_dxo, rays_d], dim=-1)
120
+ plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6
121
+ # plucker = plucker.permute(0, 1, 4, 2, 3)
122
+ return plucker
123
+
124
+ def get_camera_motion(angle, T, speed, n=81):
125
+ def compute_R_form_rad_angle(angles):
126
+ theta_x, theta_y, theta_z = angles
127
+ Rx = np.array([[1, 0, 0],
128
+ [0, np.cos(theta_x), -np.sin(theta_x)],
129
+ [0, np.sin(theta_x), np.cos(theta_x)]])
130
+
131
+ Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)],
132
+ [0, 1, 0],
133
+ [-np.sin(theta_y), 0, np.cos(theta_y)]])
134
+
135
+ Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0],
136
+ [np.sin(theta_z), np.cos(theta_z), 0],
137
+ [0, 0, 1]])
138
+
139
+ R = np.dot(Rz, np.dot(Ry, Rx))
140
+ return R
141
+ RT = []
142
+ for i in range(n):
143
+ _angle = (i/n)*speed*(CAMERA_DICT["base_angle"])*angle
144
+ R = compute_R_form_rad_angle(_angle)
145
+ _T=(i/n)*speed*(CAMERA_DICT["base_T_norm"])*(T.reshape(3,1))
146
+ _RT = np.concatenate([R,_T], axis=1)
147
+ RT.append(_RT)
148
+ RT = np.stack(RT)
149
+ return RT
150
+
151
+ class WanCameraEmbedding:
152
+ @classmethod
153
+ def INPUT_TYPES(cls):
154
+ return {
155
+ "required": {
156
+ "camera_pose":(["Static","Pan Up","Pan Down","Pan Left","Pan Right","Zoom In","Zoom Out","Anti Clockwise (ACW)", "ClockWise (CW)"],{"default":"Static"}),
157
+ "width": ("INT", {"default": 832, "min": 16, "max": MAX_RESOLUTION, "step": 16}),
158
+ "height": ("INT", {"default": 480, "min": 16, "max": MAX_RESOLUTION, "step": 16}),
159
+ "length": ("INT", {"default": 81, "min": 1, "max": MAX_RESOLUTION, "step": 4}),
160
+ },
161
+ "optional":{
162
+ "speed":("FLOAT",{"default":1.0, "min": 0, "max": 10.0, "step": 0.1}),
163
+ "fx":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.000000001}),
164
+ "fy":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.000000001}),
165
+ "cx":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.01}),
166
+ "cy":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.01}),
167
+ }
168
+
169
+ }
170
+
171
+ RETURN_TYPES = ("WAN_CAMERA_EMBEDDING","INT","INT","INT")
172
+ RETURN_NAMES = ("camera_embedding","width","height","length")
173
+ FUNCTION = "run"
174
+ CATEGORY = "camera"
175
+
176
+ def run(self, camera_pose, width, height, length, speed=1.0, fx=0.5, fy=0.5, cx=0.5, cy=0.5):
177
+ """
178
+ Use Camera trajectory as extrinsic parameters to calculate Plücker embeddings (Sitzmannet al., 2021)
179
+ Adapted from https://github.com/aigc-apps/VideoX-Fun/blob/main/comfyui/comfyui_nodes.py
180
+ """
181
+ motion_list = [camera_pose]
182
+ speed = speed
183
+ angle = np.array(CAMERA_DICT[motion_list[0]]["angle"])
184
+ T = np.array(CAMERA_DICT[motion_list[0]]["T"])
185
+ RT = get_camera_motion(angle, T, speed, length)
186
+
187
+ trajs=[]
188
+ for cp in RT.tolist():
189
+ traj=[fx,fy,cx,cy,0,0]
190
+ traj.extend(cp[0])
191
+ traj.extend(cp[1])
192
+ traj.extend(cp[2])
193
+ traj.extend([0,0,0,1])
194
+ trajs.append(traj)
195
+
196
+ cam_params = np.array([[float(x) for x in pose] for pose in trajs])
197
+ cam_params = np.concatenate([np.zeros_like(cam_params[:, :1]), cam_params], 1)
198
+ control_camera_video = process_pose_params(cam_params, width=width, height=height)
199
+ control_camera_video = control_camera_video.permute([3, 0, 1, 2]).unsqueeze(0).to(device=ldm_patched.modules.model_management.intermediate_device())
200
+
201
+ control_camera_video = torch.concat(
202
+ [
203
+ torch.repeat_interleave(control_camera_video[:, :, 0:1], repeats=4, dim=2),
204
+ control_camera_video[:, :, 1:]
205
+ ], dim=2
206
+ ).transpose(1, 2)
207
+
208
+ # Reshape, transpose, and view into desired shape
209
+ b, f, c, h, w = control_camera_video.shape
210
+ control_camera_video = control_camera_video.contiguous().view(b, f // 4, 4, c, h, w).transpose(2, 3)
211
+ control_camera_video = control_camera_video.contiguous().view(b, f // 4, c * 4, h, w).transpose(1, 2)
212
+
213
+ return (control_camera_video, width, height, length)
214
+
215
+
216
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
217
+ NODE_CLASS_MAPPINGS = {
218
+ "WanCameraEmbedding": WanCameraEmbedding,
219
+ }
ldm_patched/contrib/nodes_canny.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ #From https://github.com/kornia/kornia
6
+ import math
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import ldm_patched.modules.model_management
11
+
12
+ def get_canny_nms_kernel(device=None, dtype=None):
13
+ """Utility function that returns 3x3 kernels for the Canny Non-maximal suppression."""
14
+ return torch.tensor(
15
+ [
16
+ [[[0.0, 0.0, 0.0], [0.0, 1.0, -1.0], [0.0, 0.0, 0.0]]],
17
+ [[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]],
18
+ [[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]]],
19
+ [[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]]],
20
+ [[[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
21
+ [[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
22
+ [[[0.0, -1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
23
+ [[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
24
+ ],
25
+ device=device,
26
+ dtype=dtype,
27
+ )
28
+
29
+
30
+ def get_hysteresis_kernel(device=None, dtype=None):
31
+ """Utility function that returns the 3x3 kernels for the Canny hysteresis."""
32
+ return torch.tensor(
33
+ [
34
+ [[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]]],
35
+ [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]],
36
+ [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]],
37
+ [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]],
38
+ [[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
39
+ [[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
40
+ [[[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
41
+ [[[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
42
+ ],
43
+ device=device,
44
+ dtype=dtype,
45
+ )
46
+
47
+ def gaussian_blur_2d(img, kernel_size, sigma):
48
+ ksize_half = (kernel_size - 1) * 0.5
49
+
50
+ x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
51
+
52
+ pdf = torch.exp(-0.5 * (x / sigma).pow(2))
53
+
54
+ x_kernel = pdf / pdf.sum()
55
+ x_kernel = x_kernel.to(device=img.device, dtype=img.dtype)
56
+
57
+ kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :])
58
+ kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1])
59
+
60
+ padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2]
61
+
62
+ img = torch.nn.functional.pad(img, padding, mode="reflect")
63
+ img = torch.nn.functional.conv2d(img, kernel2d, groups=img.shape[-3])
64
+
65
+ return img
66
+
67
+ def get_sobel_kernel2d(device=None, dtype=None):
68
+ kernel_x = torch.tensor([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]], device=device, dtype=dtype)
69
+ kernel_y = kernel_x.transpose(0, 1)
70
+ return torch.stack([kernel_x, kernel_y])
71
+
72
+ def spatial_gradient(input, normalized: bool = True):
73
+ r"""Compute the first order image derivative in both x and y using a Sobel operator.
74
+ .. image:: _static/img/spatial_gradient.png
75
+ Args:
76
+ input: input image tensor with shape :math:`(B, C, H, W)`.
77
+ mode: derivatives modality, can be: `sobel` or `diff`.
78
+ order: the order of the derivatives.
79
+ normalized: whether the output is normalized.
80
+ Return:
81
+ the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`.
82
+ .. note::
83
+ See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
84
+ filtering_edges.html>`__.
85
+ Examples:
86
+ >>> input = torch.rand(1, 3, 4, 4)
87
+ >>> output = spatial_gradient(input) # 1x3x2x4x4
88
+ >>> output.shape
89
+ torch.Size([1, 3, 2, 4, 4])
90
+ """
91
+ # KORNIA_CHECK_IS_TENSOR(input)
92
+ # KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])
93
+
94
+ # allocate kernel
95
+ kernel = get_sobel_kernel2d(device=input.device, dtype=input.dtype)
96
+ if normalized:
97
+ kernel = normalize_kernel2d(kernel)
98
+
99
+ # prepare kernel
100
+ b, c, h, w = input.shape
101
+ tmp_kernel = kernel[:, None, ...]
102
+
103
+ # Pad with "replicate for spatial dims, but with zeros for channel
104
+ spatial_pad = [kernel.size(1) // 2, kernel.size(1) // 2, kernel.size(2) // 2, kernel.size(2) // 2]
105
+ out_channels: int = 2
106
+ padded_inp = torch.nn.functional.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')
107
+ out = F.conv2d(padded_inp, tmp_kernel, groups=1, padding=0, stride=1)
108
+ return out.reshape(b, c, out_channels, h, w)
109
+
110
+ def rgb_to_grayscale(image, rgb_weights = None):
111
+ r"""Convert a RGB image to grayscale version of image.
112
+
113
+ .. image:: _static/img/rgb_to_grayscale.png
114
+
115
+ The image data is assumed to be in the range of (0, 1).
116
+
117
+ Args:
118
+ image: RGB image to be converted to grayscale with shape :math:`(*,3,H,W)`.
119
+ rgb_weights: Weights that will be applied on each channel (RGB).
120
+ The sum of the weights should add up to one.
121
+ Returns:
122
+ grayscale version of the image with shape :math:`(*,1,H,W)`.
123
+
124
+ .. note::
125
+ See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
126
+ color_conversions.html>`__.
127
+
128
+ Example:
129
+ >>> input = torch.rand(2, 3, 4, 5)
130
+ >>> gray = rgb_to_grayscale(input) # 2x1x4x5
131
+ """
132
+
133
+ if len(image.shape) < 3 or image.shape[-3] != 3:
134
+ raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
135
+
136
+ if rgb_weights is None:
137
+ # 8 bit images
138
+ if image.dtype == torch.uint8:
139
+ rgb_weights = torch.tensor([76, 150, 29], device=image.device, dtype=torch.uint8)
140
+ # floating point images
141
+ elif image.dtype in (torch.float16, torch.float32, torch.float64):
142
+ rgb_weights = torch.tensor([0.299, 0.587, 0.114], device=image.device, dtype=image.dtype)
143
+ else:
144
+ raise TypeError(f"Unknown data type: {image.dtype}")
145
+ else:
146
+ # is tensor that we make sure is in the same device/dtype
147
+ rgb_weights = rgb_weights.to(image)
148
+
149
+ # unpack the color image channels with RGB order
150
+ r: Tensor = image[..., 0:1, :, :]
151
+ g: Tensor = image[..., 1:2, :, :]
152
+ b: Tensor = image[..., 2:3, :, :]
153
+
154
+ w_r, w_g, w_b = rgb_weights.unbind()
155
+ return w_r * r + w_g * g + w_b * b
156
+
157
+ def canny(
158
+ input,
159
+ low_threshold = 0.1,
160
+ high_threshold = 0.2,
161
+ kernel_size = 5,
162
+ sigma = 1,
163
+ hysteresis = True,
164
+ eps = 1e-6,
165
+ ):
166
+ r"""Find edges of the input image and filters them using the Canny algorithm.
167
+ .. image:: _static/img/canny.png
168
+ Args:
169
+ input: input image tensor with shape :math:`(B,C,H,W)`.
170
+ low_threshold: lower threshold for the hysteresis procedure.
171
+ high_threshold: upper threshold for the hysteresis procedure.
172
+ kernel_size: the size of the kernel for the gaussian blur.
173
+ sigma: the standard deviation of the kernel for the gaussian blur.
174
+ hysteresis: if True, applies the hysteresis edge tracking.
175
+ Otherwise, the edges are divided between weak (0.5) and strong (1) edges.
176
+ eps: regularization number to avoid NaN during backprop.
177
+ Returns:
178
+ - the canny edge magnitudes map, shape of :math:`(B,1,H,W)`.
179
+ - the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`.
180
+ .. note::
181
+ See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
182
+ canny.html>`__.
183
+ Example:
184
+ >>> input = torch.rand(5, 3, 4, 4)
185
+ >>> magnitude, edges = canny(input) # 5x3x4x4
186
+ >>> magnitude.shape
187
+ torch.Size([5, 1, 4, 4])
188
+ >>> edges.shape
189
+ torch.Size([5, 1, 4, 4])
190
+ """
191
+ # KORNIA_CHECK_IS_TENSOR(input)
192
+ # KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])
193
+ # KORNIA_CHECK(
194
+ # low_threshold <= high_threshold,
195
+ # "Invalid input thresholds. low_threshold should be smaller than the high_threshold. Got: "
196
+ # f"{low_threshold}>{high_threshold}",
197
+ # )
198
+ # KORNIA_CHECK(0 < low_threshold < 1, f'Invalid low threshold. Should be in range (0, 1). Got: {low_threshold}')
199
+ # KORNIA_CHECK(0 < high_threshold < 1, f'Invalid high threshold. Should be in range (0, 1). Got: {high_threshold}')
200
+
201
+ device = input.device
202
+ dtype = input.dtype
203
+
204
+ # To Grayscale
205
+ if input.shape[1] == 3:
206
+ input = rgb_to_grayscale(input)
207
+
208
+ # Gaussian filter
209
+ blurred: Tensor = gaussian_blur_2d(input, kernel_size, sigma)
210
+
211
+ # Compute the gradients
212
+ gradients: Tensor = spatial_gradient(blurred, normalized=False)
213
+
214
+ # Unpack the edges
215
+ gx: Tensor = gradients[:, :, 0]
216
+ gy: Tensor = gradients[:, :, 1]
217
+
218
+ # Compute gradient magnitude and angle
219
+ magnitude: Tensor = torch.sqrt(gx * gx + gy * gy + eps)
220
+ angle: Tensor = torch.atan2(gy, gx)
221
+
222
+ # Radians to Degrees
223
+ angle = 180.0 * angle / math.pi
224
+
225
+ # Round angle to the nearest 45 degree
226
+ angle = torch.round(angle / 45) * 45
227
+
228
+ # Non-maximal suppression
229
+ nms_kernels: Tensor = get_canny_nms_kernel(device, dtype)
230
+ nms_magnitude: Tensor = F.conv2d(magnitude, nms_kernels, padding=nms_kernels.shape[-1] // 2)
231
+
232
+ # Get the indices for both directions
233
+ positive_idx: Tensor = (angle / 45) % 8
234
+ positive_idx = positive_idx.long()
235
+
236
+ negative_idx: Tensor = ((angle / 45) + 4) % 8
237
+ negative_idx = negative_idx.long()
238
+
239
+ # Apply the non-maximum suppression to the different directions
240
+ channel_select_filtered_positive: Tensor = torch.gather(nms_magnitude, 1, positive_idx)
241
+ channel_select_filtered_negative: Tensor = torch.gather(nms_magnitude, 1, negative_idx)
242
+
243
+ channel_select_filtered: Tensor = torch.stack(
244
+ [channel_select_filtered_positive, channel_select_filtered_negative], 1
245
+ )
246
+
247
+ is_max: Tensor = channel_select_filtered.min(dim=1)[0] > 0.0
248
+
249
+ magnitude = magnitude * is_max
250
+
251
+ # Threshold
252
+ edges: Tensor = F.threshold(magnitude, low_threshold, 0.0)
253
+
254
+ low: Tensor = magnitude > low_threshold
255
+ high: Tensor = magnitude > high_threshold
256
+
257
+ edges = low * 0.5 + high * 0.5
258
+ edges = edges.to(dtype)
259
+
260
+ # Hysteresis
261
+ if hysteresis:
262
+ edges_old: Tensor = -torch.ones(edges.shape, device=edges.device, dtype=dtype)
263
+ hysteresis_kernels: Tensor = get_hysteresis_kernel(device, dtype)
264
+
265
+ while ((edges_old - edges).abs() != 0).any():
266
+ weak: Tensor = (edges == 0.5).float()
267
+ strong: Tensor = (edges == 1).float()
268
+
269
+ hysteresis_magnitude: Tensor = F.conv2d(
270
+ edges, hysteresis_kernels, padding=hysteresis_kernels.shape[-1] // 2
271
+ )
272
+ hysteresis_magnitude = (hysteresis_magnitude == 1).any(1, keepdim=True).to(dtype)
273
+ hysteresis_magnitude = hysteresis_magnitude * weak + strong
274
+
275
+ edges_old = edges.clone()
276
+ edges = hysteresis_magnitude + (hysteresis_magnitude == 0) * weak * 0.5
277
+
278
+ edges = hysteresis_magnitude
279
+
280
+ return magnitude, edges
281
+
282
+
283
+ class Canny:
284
+ @classmethod
285
+ def INPUT_TYPES(s):
286
+ return {"required": {"image": ("IMAGE",),
287
+ "low_threshold": ("FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}),
288
+ "high_threshold": ("FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01})
289
+ }}
290
+
291
+ RETURN_TYPES = ("IMAGE",)
292
+ FUNCTION = "detect_edge"
293
+
294
+ CATEGORY = "image/preprocessors"
295
+
296
+ def detect_edge(self, image, low_threshold, high_threshold):
297
+ output = canny(image.to(ldm_patched.modules.model_management.get_torch_device()).movedim(-1, 1), low_threshold, high_threshold)
298
+ img_out = output[1].to(ldm_patched.modules.model_management.intermediate_device()).repeat(1, 3, 1, 1).movedim(1, -1)
299
+ return (img_out,)
300
+
301
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
302
+ NODE_CLASS_MAPPINGS = {
303
+ "Canny": Canny,
304
+ }
ldm_patched/contrib/nodes_cfg.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ # https://github.com/WeichenFan/CFG-Zero-star
4
+ def optimized_scale(positive, negative):
5
+ positive_flat = positive.reshape(positive.shape[0], -1)
6
+ negative_flat = negative.reshape(negative.shape[0], -1)
7
+
8
+ # Calculate dot production
9
+ dot_product = torch.sum(positive_flat * negative_flat, dim=1, keepdim=True)
10
+
11
+ # Squared norm of uncondition
12
+ squared_norm = torch.sum(negative_flat ** 2, dim=1, keepdim=True) + 1e-8
13
+
14
+ # st_star = v_cond^T * v_uncond / ||v_uncond||^2
15
+ st_star = dot_product / squared_norm
16
+
17
+ return st_star.reshape([positive.shape[0]] + [1] * (positive.ndim - 1))
18
+
19
+ class CFGZeroStar:
20
+ @classmethod
21
+ def INPUT_TYPES(s):
22
+ return {"required": {"model": ("MODEL",),
23
+ }}
24
+ RETURN_TYPES = ("MODEL",)
25
+ RETURN_NAMES = ("patched_model",)
26
+ FUNCTION = "patch"
27
+ CATEGORY = "advanced/guidance"
28
+
29
+ def patch(self, model):
30
+ m = model.clone()
31
+ def cfg_zero_star(args):
32
+ guidance_scale = args['cond_scale']
33
+ x = args['input']
34
+ cond_p = args['cond_denoised']
35
+ uncond_p = args['uncond_denoised']
36
+ out = args["denoised"]
37
+ alpha = optimized_scale(x - cond_p, x - uncond_p)
38
+
39
+ return out + uncond_p * (alpha - 1.0) + guidance_scale * uncond_p * (1.0 - alpha)
40
+ m.set_model_sampler_post_cfg_function(cfg_zero_star)
41
+ return (m, )
42
+
43
+ class CFGNorm:
44
+ @classmethod
45
+ def INPUT_TYPES(s):
46
+ return {"required": {"model": ("MODEL",),
47
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
48
+ }}
49
+ RETURN_TYPES = ("MODEL",)
50
+ RETURN_NAMES = ("patched_model",)
51
+ FUNCTION = "patch"
52
+ CATEGORY = "advanced/guidance"
53
+ EXPERIMENTAL = True
54
+
55
+ def patch(self, model, strength):
56
+ m = model.clone()
57
+ def cfg_norm(args):
58
+ cond_p = args['cond_denoised']
59
+ pred_text_ = args["denoised"]
60
+
61
+ norm_full_cond = torch.norm(cond_p, dim=1, keepdim=True)
62
+ norm_pred_text = torch.norm(pred_text_, dim=1, keepdim=True)
63
+ scale = (norm_full_cond / (norm_pred_text + 1e-8)).clamp(min=0.0, max=1.0)
64
+ return pred_text_ * scale * strength
65
+
66
+ m.set_model_sampler_post_cfg_function(cfg_norm)
67
+ return (m, )
68
+
69
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
70
+ NODE_CLASS_MAPPINGS = {
71
+ "CFGZeroStar": CFGZeroStar,
72
+ "CFGNorm": CFGNorm,
73
+ }
ldm_patched/contrib/nodes_clip_sdxl.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ import torch
6
+ from ldm_patched.contrib.nodes import MAX_RESOLUTION
7
+
8
+ class CLIPTextEncodeSDXLRefiner:
9
+ @classmethod
10
+ def INPUT_TYPES(s):
11
+ return {"required": {
12
+ "ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
13
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
14
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
15
+ "text": ("STRING", {"multiline": True}), "clip": ("CLIP", ),
16
+ }}
17
+ RETURN_TYPES = ("CONDITIONING",)
18
+ FUNCTION = "encode"
19
+
20
+ CATEGORY = "advanced/conditioning"
21
+
22
+ def encode(self, clip, ascore, width, height, text):
23
+ tokens = clip.tokenize(text)
24
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
25
+ return ([[cond, {"pooled_output": pooled, "aesthetic_score": ascore, "width": width,"height": height}]], )
26
+
27
+ class CLIPTextEncodeSDXL:
28
+ @classmethod
29
+ def INPUT_TYPES(s):
30
+ return {"required": {
31
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
32
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
33
+ "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
34
+ "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
35
+ "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
36
+ "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
37
+ "text_g": ("STRING", {"multiline": True, "default": "CLIP_G"}), "clip": ("CLIP", ),
38
+ "text_l": ("STRING", {"multiline": True, "default": "CLIP_L"}), "clip": ("CLIP", ),
39
+ }}
40
+ RETURN_TYPES = ("CONDITIONING",)
41
+ FUNCTION = "encode"
42
+
43
+ CATEGORY = "advanced/conditioning"
44
+
45
+ def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l):
46
+ tokens = clip.tokenize(text_g)
47
+ tokens["l"] = clip.tokenize(text_l)["l"]
48
+ if len(tokens["l"]) != len(tokens["g"]):
49
+ empty = clip.tokenize("")
50
+ while len(tokens["l"]) < len(tokens["g"]):
51
+ tokens["l"] += empty["l"]
52
+ while len(tokens["l"]) > len(tokens["g"]):
53
+ tokens["g"] += empty["g"]
54
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
55
+ return ([[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]], )
56
+
57
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
58
+ NODE_CLASS_MAPPINGS = {
59
+ "CLIPTextEncodeSDXLRefiner": CLIPTextEncodeSDXLRefiner,
60
+ "CLIPTextEncodeSDXL": CLIPTextEncodeSDXL,
61
+ }
ldm_patched/contrib/nodes_compositing.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ import numpy as np
6
+ import torch
7
+ import ldm_patched.modules.utils
8
+ from enum import Enum
9
+
10
+ def resize_mask(mask, shape):
11
+ return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1)
12
+
13
+ class PorterDuffMode(Enum):
14
+ ADD = 0
15
+ CLEAR = 1
16
+ DARKEN = 2
17
+ DST = 3
18
+ DST_ATOP = 4
19
+ DST_IN = 5
20
+ DST_OUT = 6
21
+ DST_OVER = 7
22
+ LIGHTEN = 8
23
+ MULTIPLY = 9
24
+ OVERLAY = 10
25
+ SCREEN = 11
26
+ SRC = 12
27
+ SRC_ATOP = 13
28
+ SRC_IN = 14
29
+ SRC_OUT = 15
30
+ SRC_OVER = 16
31
+ XOR = 17
32
+
33
+
34
+ def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_image: torch.Tensor, dst_alpha: torch.Tensor, mode: PorterDuffMode):
35
+ if mode == PorterDuffMode.ADD:
36
+ out_alpha = torch.clamp(src_alpha + dst_alpha, 0, 1)
37
+ out_image = torch.clamp(src_image + dst_image, 0, 1)
38
+ elif mode == PorterDuffMode.CLEAR:
39
+ out_alpha = torch.zeros_like(dst_alpha)
40
+ out_image = torch.zeros_like(dst_image)
41
+ elif mode == PorterDuffMode.DARKEN:
42
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
43
+ out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.min(src_image, dst_image)
44
+ elif mode == PorterDuffMode.DST:
45
+ out_alpha = dst_alpha
46
+ out_image = dst_image
47
+ elif mode == PorterDuffMode.DST_ATOP:
48
+ out_alpha = src_alpha
49
+ out_image = src_alpha * dst_image + (1 - dst_alpha) * src_image
50
+ elif mode == PorterDuffMode.DST_IN:
51
+ out_alpha = src_alpha * dst_alpha
52
+ out_image = dst_image * src_alpha
53
+ elif mode == PorterDuffMode.DST_OUT:
54
+ out_alpha = (1 - src_alpha) * dst_alpha
55
+ out_image = (1 - src_alpha) * dst_image
56
+ elif mode == PorterDuffMode.DST_OVER:
57
+ out_alpha = dst_alpha + (1 - dst_alpha) * src_alpha
58
+ out_image = dst_image + (1 - dst_alpha) * src_image
59
+ elif mode == PorterDuffMode.LIGHTEN:
60
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
61
+ out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.max(src_image, dst_image)
62
+ elif mode == PorterDuffMode.MULTIPLY:
63
+ out_alpha = src_alpha * dst_alpha
64
+ out_image = src_image * dst_image
65
+ elif mode == PorterDuffMode.OVERLAY:
66
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
67
+ out_image = torch.where(2 * dst_image < dst_alpha, 2 * src_image * dst_image,
68
+ src_alpha * dst_alpha - 2 * (dst_alpha - src_image) * (src_alpha - dst_image))
69
+ elif mode == PorterDuffMode.SCREEN:
70
+ out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
71
+ out_image = src_image + dst_image - src_image * dst_image
72
+ elif mode == PorterDuffMode.SRC:
73
+ out_alpha = src_alpha
74
+ out_image = src_image
75
+ elif mode == PorterDuffMode.SRC_ATOP:
76
+ out_alpha = dst_alpha
77
+ out_image = dst_alpha * src_image + (1 - src_alpha) * dst_image
78
+ elif mode == PorterDuffMode.SRC_IN:
79
+ out_alpha = src_alpha * dst_alpha
80
+ out_image = src_image * dst_alpha
81
+ elif mode == PorterDuffMode.SRC_OUT:
82
+ out_alpha = (1 - dst_alpha) * src_alpha
83
+ out_image = (1 - dst_alpha) * src_image
84
+ elif mode == PorterDuffMode.SRC_OVER:
85
+ out_alpha = src_alpha + (1 - src_alpha) * dst_alpha
86
+ out_image = src_image + (1 - src_alpha) * dst_image
87
+ elif mode == PorterDuffMode.XOR:
88
+ out_alpha = (1 - dst_alpha) * src_alpha + (1 - src_alpha) * dst_alpha
89
+ out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image
90
+ else:
91
+ out_alpha = None
92
+ out_image = None
93
+ return out_image, out_alpha
94
+
95
+
96
+ class PorterDuffImageComposite:
97
+ @classmethod
98
+ def INPUT_TYPES(s):
99
+ return {
100
+ "required": {
101
+ "source": ("IMAGE",),
102
+ "source_alpha": ("MASK",),
103
+ "destination": ("IMAGE",),
104
+ "destination_alpha": ("MASK",),
105
+ "mode": ([mode.name for mode in PorterDuffMode], {"default": PorterDuffMode.DST.name}),
106
+ },
107
+ }
108
+
109
+ RETURN_TYPES = ("IMAGE", "MASK")
110
+ FUNCTION = "composite"
111
+ CATEGORY = "mask/compositing"
112
+
113
+ def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode):
114
+ batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha))
115
+ out_images = []
116
+ out_alphas = []
117
+
118
+ for i in range(batch_size):
119
+ src_image = source[i]
120
+ dst_image = destination[i]
121
+
122
+ assert src_image.shape[2] == dst_image.shape[2] # inputs need to have same number of channels
123
+
124
+ src_alpha = source_alpha[i].unsqueeze(2)
125
+ dst_alpha = destination_alpha[i].unsqueeze(2)
126
+
127
+ if dst_alpha.shape[:2] != dst_image.shape[:2]:
128
+ upscale_input = dst_alpha.unsqueeze(0).permute(0, 3, 1, 2)
129
+ upscale_output = ldm_patched.modules.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center')
130
+ dst_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0)
131
+ if src_image.shape != dst_image.shape:
132
+ upscale_input = src_image.unsqueeze(0).permute(0, 3, 1, 2)
133
+ upscale_output = ldm_patched.modules.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center')
134
+ src_image = upscale_output.permute(0, 2, 3, 1).squeeze(0)
135
+ if src_alpha.shape != dst_alpha.shape:
136
+ upscale_input = src_alpha.unsqueeze(0).permute(0, 3, 1, 2)
137
+ upscale_output = ldm_patched.modules.utils.common_upscale(upscale_input, dst_alpha.shape[1], dst_alpha.shape[0], upscale_method='bicubic', crop='center')
138
+ src_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0)
139
+
140
+ out_image, out_alpha = porter_duff_composite(src_image, src_alpha, dst_image, dst_alpha, PorterDuffMode[mode])
141
+
142
+ out_images.append(out_image)
143
+ out_alphas.append(out_alpha.squeeze(2))
144
+
145
+ result = (torch.stack(out_images), torch.stack(out_alphas))
146
+ return result
147
+
148
+
149
+ class SplitImageWithAlpha:
150
+ @classmethod
151
+ def INPUT_TYPES(s):
152
+ return {
153
+ "required": {
154
+ "image": ("IMAGE",),
155
+ }
156
+ }
157
+
158
+ CATEGORY = "mask/compositing"
159
+ RETURN_TYPES = ("IMAGE", "MASK")
160
+ FUNCTION = "split_image_with_alpha"
161
+
162
+ def split_image_with_alpha(self, image: torch.Tensor):
163
+ out_images = [i[:,:,:3] for i in image]
164
+ out_alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image]
165
+ result = (torch.stack(out_images), 1.0 - torch.stack(out_alphas))
166
+ return result
167
+
168
+
169
+ class JoinImageWithAlpha:
170
+ @classmethod
171
+ def INPUT_TYPES(s):
172
+ return {
173
+ "required": {
174
+ "image": ("IMAGE",),
175
+ "alpha": ("MASK",),
176
+ }
177
+ }
178
+
179
+ CATEGORY = "mask/compositing"
180
+ RETURN_TYPES = ("IMAGE",)
181
+ FUNCTION = "join_image_with_alpha"
182
+
183
+ def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor):
184
+ batch_size = min(len(image), len(alpha))
185
+ out_images = []
186
+
187
+ alpha = 1.0 - resize_mask(alpha, image.shape[1:])
188
+ for i in range(batch_size):
189
+ out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2))
190
+
191
+ result = (torch.stack(out_images),)
192
+ return result
193
+
194
+
195
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
196
+ NODE_CLASS_MAPPINGS = {
197
+ "PorterDuffImageComposite": PorterDuffImageComposite,
198
+ "SplitImageWithAlpha": SplitImageWithAlpha,
199
+ "JoinImageWithAlpha": JoinImageWithAlpha,
200
+ }
201
+
202
+
203
+ NODE_DISPLAY_NAME_MAPPINGS = {
204
+ "PorterDuffImageComposite": "Porter-Duff Image Composite",
205
+ "SplitImageWithAlpha": "Split Image with Alpha",
206
+ "JoinImageWithAlpha": "Join Image with Alpha",
207
+ }
ldm_patched/contrib/nodes_cond.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ class CLIPTextEncodeControlnet:
4
+ @classmethod
5
+ def INPUT_TYPES(s):
6
+ return {"required": {"clip": ("CLIP", ), "conditioning": ("CONDITIONING", ), "text": ("STRING", {"multiline": True, "dynamicPrompts": True})}}
7
+ RETURN_TYPES = ("CONDITIONING",)
8
+ FUNCTION = "encode"
9
+
10
+ CATEGORY = "_for_testing/conditioning"
11
+
12
+ def encode(self, clip, conditioning, text):
13
+ tokens = clip.tokenize(text)
14
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
15
+ c = []
16
+ for t in conditioning:
17
+ n = [t[0], t[1].copy()]
18
+ n[1]['cross_attn_controlnet'] = cond
19
+ n[1]['pooled_output_controlnet'] = pooled
20
+ c.append(n)
21
+ return (c, )
22
+
23
+ class T5TokenizerOptions:
24
+ @classmethod
25
+ def INPUT_TYPES(s):
26
+ return {
27
+ "required": {
28
+ "clip": ("CLIP", ),
29
+ "min_padding": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}),
30
+ "min_length": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}),
31
+ }
32
+ }
33
+
34
+ CATEGORY = "_for_testing/conditioning"
35
+ RETURN_TYPES = ("CLIP",)
36
+ FUNCTION = "set_options"
37
+
38
+ def set_options(self, clip, min_padding, min_length):
39
+ clip = clip.clone()
40
+ for t5_type in ["t5xxl", "pile_t5xl", "t5base", "mt5xl", "umt5xxl"]:
41
+ clip.set_tokenizer_option("{}_min_padding".format(t5_type), min_padding)
42
+ clip.set_tokenizer_option("{}_min_length".format(t5_type), min_length)
43
+
44
+ return (clip, )
45
+
46
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
47
+ NODE_CLASS_MAPPINGS = {
48
+ "CLIPTextEncodeControlnet": CLIPTextEncodeControlnet,
49
+ "T5TokenizerOptions": T5TokenizerOptions,
50
+ }
ldm_patched/contrib/nodes_controlnet.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ldm_patched.controlnet.control_types import UNION_CONTROLNET_TYPES
2
+ import nodes
3
+ import ldm_patched.modules.utils
4
+
5
+ class SetUnionControlNetType:
6
+ @classmethod
7
+ def INPUT_TYPES(s):
8
+ return {"required": {"control_net": ("CONTROL_NET", ),
9
+ "type": (["auto"] + list(UNION_CONTROLNET_TYPES.keys()),)
10
+ }}
11
+
12
+ CATEGORY = "conditioning/controlnet"
13
+ RETURN_TYPES = ("CONTROL_NET",)
14
+
15
+ FUNCTION = "set_controlnet_type"
16
+
17
+ def set_controlnet_type(self, control_net, type):
18
+ control_net = control_net.copy()
19
+ type_number = UNION_CONTROLNET_TYPES.get(type, -1)
20
+ if type_number >= 0:
21
+ control_net.set_extra_arg("control_type", [type_number])
22
+ else:
23
+ control_net.set_extra_arg("control_type", [])
24
+
25
+ return (control_net,)
26
+
27
+ class ControlNetInpaintingAliMamaApply(nodes.ControlNetApplyAdvanced):
28
+ @classmethod
29
+ def INPUT_TYPES(s):
30
+ return {"required": {"positive": ("CONDITIONING", ),
31
+ "negative": ("CONDITIONING", ),
32
+ "control_net": ("CONTROL_NET", ),
33
+ "vae": ("VAE", ),
34
+ "image": ("IMAGE", ),
35
+ "mask": ("MASK", ),
36
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
37
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
38
+ "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
39
+ }}
40
+
41
+ FUNCTION = "apply_inpaint_controlnet"
42
+
43
+ CATEGORY = "conditioning/controlnet"
44
+
45
+ def apply_inpaint_controlnet(self, positive, negative, control_net, vae, image, mask, strength, start_percent, end_percent):
46
+ extra_concat = []
47
+ if control_net.concat_mask:
48
+ mask = 1.0 - mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
49
+ mask_apply = ldm_patched.modules.utils.common_upscale(mask, image.shape[2], image.shape[1], "bilinear", "center").round()
50
+ image = image * mask_apply.movedim(1, -1).repeat(1, 1, 1, image.shape[3])
51
+ extra_concat = [mask]
52
+
53
+ return self.apply_controlnet(positive, negative, control_net, image, strength, start_percent, end_percent, vae=vae, extra_concat=extra_concat)
54
+
55
+
56
+
57
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
58
+ NODE_CLASS_MAPPINGS = {
59
+ "SetUnionControlNetType": SetUnionControlNetType,
60
+ "ControlNetInpaintingAliMamaApply": ControlNetInpaintingAliMamaApply,
61
+ }
ldm_patched/contrib/nodes_cosmos.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nodes
2
+ import torch
3
+ import ldm_patched.modules.model_management
4
+ import ldm_patched.modules.utils
5
+ import ldm_patched.utils.latent_visualization
6
+
7
+
8
+ class EmptyCosmosLatentVideo:
9
+ @classmethod
10
+ def INPUT_TYPES(s):
11
+ return {"required": { "width": ("INT", {"default": 1280, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
12
+ "height": ("INT", {"default": 704, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
13
+ "length": ("INT", {"default": 121, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 8}),
14
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
15
+ RETURN_TYPES = ("LATENT",)
16
+ FUNCTION = "generate"
17
+
18
+ CATEGORY = "latent/video"
19
+
20
+ def generate(self, width, height, length, batch_size=1):
21
+ latent = torch.zeros([batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=ldm_patched.modules.model_management.intermediate_device())
22
+ return ({"samples": latent}, )
23
+
24
+
25
+ def vae_encode_with_padding(vae, image, width, height, length, padding=0):
26
+ pixels = ldm_patched.modules.utils.common_upscale(image[..., :3].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
27
+ pixel_len = min(pixels.shape[0], length)
28
+ padded_length = min(length, (((pixel_len - 1) // 8) + 1 + padding) * 8 - 7)
29
+ padded_pixels = torch.ones((padded_length, height, width, 3)) * 0.5
30
+ padded_pixels[:pixel_len] = pixels[:pixel_len]
31
+ latent_len = ((pixel_len - 1) // 8) + 1
32
+ latent_temp = vae.encode(padded_pixels)
33
+ return latent_temp[:, :, :latent_len]
34
+
35
+
36
+ class CosmosImageToVideoLatent:
37
+ @classmethod
38
+ def INPUT_TYPES(s):
39
+ return {"required": {"vae": ("VAE", ),
40
+ "width": ("INT", {"default": 1280, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
41
+ "height": ("INT", {"default": 704, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
42
+ "length": ("INT", {"default": 121, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 8}),
43
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
44
+ },
45
+ "optional": {"start_image": ("IMAGE", ),
46
+ "end_image": ("IMAGE", ),
47
+ }}
48
+
49
+
50
+ RETURN_TYPES = ("LATENT",)
51
+ FUNCTION = "encode"
52
+
53
+ CATEGORY = "conditioning/inpaint"
54
+
55
+ def encode(self, vae, width, height, length, batch_size, start_image=None, end_image=None):
56
+ latent = torch.zeros([1, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=ldm_patched.modules.model_management.intermediate_device())
57
+ if start_image is None and end_image is None:
58
+ out_latent = {}
59
+ out_latent["samples"] = latent
60
+ return (out_latent,)
61
+
62
+ mask = torch.ones([latent.shape[0], 1, ((length - 1) // 8) + 1, latent.shape[-2], latent.shape[-1]], device=ldm_patched.modules.model_management.intermediate_device())
63
+
64
+ if start_image is not None:
65
+ latent_temp = vae_encode_with_padding(vae, start_image, width, height, length, padding=1)
66
+ latent[:, :, :latent_temp.shape[-3]] = latent_temp
67
+ mask[:, :, :latent_temp.shape[-3]] *= 0.0
68
+
69
+ if end_image is not None:
70
+ latent_temp = vae_encode_with_padding(vae, end_image, width, height, length, padding=0)
71
+ latent[:, :, -latent_temp.shape[-3]:] = latent_temp
72
+ mask[:, :, -latent_temp.shape[-3]:] *= 0.0
73
+
74
+ out_latent = {}
75
+ out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1))
76
+ out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1))
77
+ return (out_latent,)
78
+
79
+ class CosmosPredict2ImageToVideoLatent:
80
+ @classmethod
81
+ def INPUT_TYPES(s):
82
+ return {"required": {"vae": ("VAE", ),
83
+ "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
84
+ "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
85
+ "length": ("INT", {"default": 93, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}),
86
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
87
+ },
88
+ "optional": {"start_image": ("IMAGE", ),
89
+ "end_image": ("IMAGE", ),
90
+ }}
91
+
92
+
93
+ RETURN_TYPES = ("LATENT",)
94
+ FUNCTION = "encode"
95
+
96
+ CATEGORY = "conditioning/inpaint"
97
+
98
+ def encode(self, vae, width, height, length, batch_size, start_image=None, end_image=None):
99
+ latent = torch.zeros([1, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=ldm_patched.modules.model_management.intermediate_device())
100
+ if start_image is None and end_image is None:
101
+ out_latent = {}
102
+ out_latent["samples"] = latent
103
+ return (out_latent,)
104
+
105
+ mask = torch.ones([latent.shape[0], 1, ((length - 1) // 4) + 1, latent.shape[-2], latent.shape[-1]], device=ldm_patched.modules.model_management.intermediate_device())
106
+
107
+ if start_image is not None:
108
+ latent_temp = vae_encode_with_padding(vae, start_image, width, height, length, padding=1)
109
+ latent[:, :, :latent_temp.shape[-3]] = latent_temp
110
+ mask[:, :, :latent_temp.shape[-3]] *= 0.0
111
+
112
+ if end_image is not None:
113
+ latent_temp = vae_encode_with_padding(vae, end_image, width, height, length, padding=0)
114
+ latent[:, :, -latent_temp.shape[-3]:] = latent_temp
115
+ mask[:, :, -latent_temp.shape[-3]:] *= 0.0
116
+
117
+ out_latent = {}
118
+ latent_format = ldm_patched.utils.latent_visualization.Wan21()
119
+ latent = latent_format.process_out(latent) * mask + latent * (1.0 - mask)
120
+ out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1))
121
+ out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1))
122
+ return (out_latent,)
123
+
124
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
125
+ NODE_CLASS_MAPPINGS = {
126
+ "EmptyCosmosLatentVideo": EmptyCosmosLatentVideo,
127
+ "CosmosImageToVideoLatent": CosmosImageToVideoLatent,
128
+ "CosmosPredict2ImageToVideoLatent": CosmosPredict2ImageToVideoLatent,
129
+ }
ldm_patched/contrib/nodes_custom_sampler.py CHANGED
@@ -5,7 +5,7 @@ from ldm_patched.k_diffusion import sampling as k_diffusion_sampling
5
  import ldm_patched.utils.latent_visualization
6
  import torch
7
  import ldm_patched.modules.utils
8
- import node_helpers
9
 
10
 
11
  class BasicScheduler:
@@ -536,7 +536,7 @@ class Guider_DualCFG(ldm_patched.modules.samplers.CFGGuider):
536
  self.cfg2 = cfg2
537
 
538
  def set_conds(self, positive, middle, negative):
539
- middle = node_helpers.conditioning_set_values(middle, {"prompt_type": "negative"})
540
  self.inner_set_conds({"positive": positive, "middle": middle, "negative": negative})
541
 
542
  def predict_noise(self, x, timestep, model_options={}, seed=None):
@@ -689,6 +689,7 @@ class AddNoise:
689
  return (out,)
690
 
691
 
 
692
  NODE_CLASS_MAPPINGS = {
693
  "SamplerCustom": SamplerCustom,
694
  "BasicScheduler": BasicScheduler,
 
5
  import ldm_patched.utils.latent_visualization
6
  import torch
7
  import ldm_patched.modules.utils
8
+ import ldm_patched.contrib.node_helpers
9
 
10
 
11
  class BasicScheduler:
 
536
  self.cfg2 = cfg2
537
 
538
  def set_conds(self, positive, middle, negative):
539
+ middle = ldm_patched.contrib.node_helpers.conditioning_set_values(middle, {"prompt_type": "negative"})
540
  self.inner_set_conds({"positive": positive, "middle": middle, "negative": negative})
541
 
542
  def predict_noise(self, x, timestep, model_options={}, seed=None):
 
689
  return (out,)
690
 
691
 
692
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
693
  NODE_CLASS_MAPPINGS = {
694
  "SamplerCustom": SamplerCustom,
695
  "BasicScheduler": BasicScheduler,
ldm_patched/contrib/nodes_differential_diffusion.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # code adapted from https://github.com/exx8/differential-diffusion
2
+
3
+ import torch
4
+
5
+ class DifferentialDiffusion():
6
+ @classmethod
7
+ def INPUT_TYPES(s):
8
+ return {"required": {"model": ("MODEL", ),
9
+ }}
10
+ RETURN_TYPES = ("MODEL",)
11
+ FUNCTION = "apply"
12
+ CATEGORY = "_for_testing"
13
+ INIT = False
14
+
15
+ def apply(self, model):
16
+ model = model.clone()
17
+ model.set_model_denoise_mask_function(self.forward)
18
+ return (model,)
19
+
20
+ def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict):
21
+ model = extra_options["model"]
22
+ step_sigmas = extra_options["sigmas"]
23
+ sigma_to = model.inner_model.model_sampling.sigma_min
24
+ if step_sigmas[-1] > sigma_to:
25
+ sigma_to = step_sigmas[-1]
26
+ sigma_from = step_sigmas[0]
27
+
28
+ ts_from = model.inner_model.model_sampling.timestep(sigma_from)
29
+ ts_to = model.inner_model.model_sampling.timestep(sigma_to)
30
+ current_ts = model.inner_model.model_sampling.timestep(sigma)
31
+
32
+ threshold = (current_ts - ts_to) / (ts_from - ts_to)
33
+
34
+ return (denoise_mask >= threshold).to(denoise_mask.dtype)
35
+
36
+
37
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
38
+ NODE_CLASS_MAPPINGS = {
39
+ "DifferentialDiffusion": DifferentialDiffusion,
40
+ }
41
+ NODE_DISPLAY_NAME_MAPPINGS = {
42
+ "DifferentialDiffusion": "Differential Diffusion",
43
+ }
ldm_patched/contrib/nodes_edit_model.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ldm_patched.contrib.node_helpers
2
+
3
+
4
+ class ReferenceLatent:
5
+ @classmethod
6
+ def INPUT_TYPES(s):
7
+ return {"required": {"conditioning": ("CONDITIONING", ),
8
+ },
9
+ "optional": {"latent": ("LATENT", ),}
10
+ }
11
+
12
+ RETURN_TYPES = ("CONDITIONING",)
13
+ FUNCTION = "append"
14
+
15
+ CATEGORY = "advanced/conditioning/edit_models"
16
+ DESCRIPTION = "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images."
17
+
18
+ def append(self, conditioning, latent=None):
19
+ if latent is not None:
20
+ conditioning = ldm_patched.contrib.node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True)
21
+ return (conditioning, )
22
+
23
+
24
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
25
+ NODE_CLASS_MAPPINGS = {
26
+ "ReferenceLatent": ReferenceLatent,
27
+ }
ldm_patched/contrib/nodes_flux.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ldm_patched.contrib.node_helpers
2
+ import ldm_patched.modules.utils
3
+
4
+ class CLIPTextEncodeFlux:
5
+ @classmethod
6
+ def INPUT_TYPES(s):
7
+ return {"required": {
8
+ "clip": ("CLIP", ),
9
+ "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
10
+ "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
11
+ "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
12
+ }}
13
+ RETURN_TYPES = ("CONDITIONING",)
14
+ FUNCTION = "encode"
15
+
16
+ CATEGORY = "advanced/conditioning/flux"
17
+
18
+ def encode(self, clip, clip_l, t5xxl, guidance):
19
+ tokens = clip.tokenize(clip_l)
20
+ tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
21
+
22
+ return (clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}), )
23
+
24
+ class FluxGuidance:
25
+ @classmethod
26
+ def INPUT_TYPES(s):
27
+ return {"required": {
28
+ "conditioning": ("CONDITIONING", ),
29
+ "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
30
+ }}
31
+
32
+ RETURN_TYPES = ("CONDITIONING",)
33
+ FUNCTION = "append"
34
+
35
+ CATEGORY = "advanced/conditioning/flux"
36
+
37
+ def append(self, conditioning, guidance):
38
+ c = ldm_patched.contrib.node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
39
+ return (c, )
40
+
41
+
42
+ class FluxDisableGuidance:
43
+ @classmethod
44
+ def INPUT_TYPES(s):
45
+ return {"required": {
46
+ "conditioning": ("CONDITIONING", ),
47
+ }}
48
+
49
+ RETURN_TYPES = ("CONDITIONING",)
50
+ FUNCTION = "append"
51
+
52
+ CATEGORY = "advanced/conditioning/flux"
53
+ DESCRIPTION = "This node completely disables the guidance embed on Flux and Flux like models"
54
+
55
+ def append(self, conditioning):
56
+ c = ldm_patched.contrib.node_helpers.conditioning_set_values(conditioning, {"guidance": None})
57
+ return (c, )
58
+
59
+
60
+ PREFERED_KONTEXT_RESOLUTIONS = [
61
+ (672, 1568),
62
+ (688, 1504),
63
+ (720, 1456),
64
+ (752, 1392),
65
+ (800, 1328),
66
+ (832, 1248),
67
+ (880, 1184),
68
+ (944, 1104),
69
+ (1024, 1024),
70
+ (1104, 944),
71
+ (1184, 880),
72
+ (1248, 832),
73
+ (1328, 800),
74
+ (1392, 752),
75
+ (1456, 720),
76
+ (1504, 688),
77
+ (1568, 672),
78
+ ]
79
+
80
+
81
+ class FluxKontextImageScale:
82
+ @classmethod
83
+ def INPUT_TYPES(s):
84
+ return {"required": {"image": ("IMAGE", ),
85
+ },
86
+ }
87
+
88
+ RETURN_TYPES = ("IMAGE",)
89
+ FUNCTION = "scale"
90
+
91
+ CATEGORY = "advanced/conditioning/flux"
92
+ DESCRIPTION = "This node resizes the image to one that is more optimal for flux kontext."
93
+
94
+ def scale(self, image):
95
+ width = image.shape[2]
96
+ height = image.shape[1]
97
+ aspect_ratio = width / height
98
+ _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS)
99
+ image = ldm_patched.modules.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1)
100
+ return (image, )
101
+
102
+
103
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
104
+ NODE_CLASS_MAPPINGS = {
105
+ "CLIPTextEncodeFlux": CLIPTextEncodeFlux,
106
+ "FluxGuidance": FluxGuidance,
107
+ "FluxDisableGuidance": FluxDisableGuidance,
108
+ "FluxKontextImageScale": FluxKontextImageScale,
109
+ }
ldm_patched/contrib/nodes_freelunch.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
2
+
3
+ #code originally taken from: https://github.com/ChenyangSi/FreeU (under MIT License)
4
+
5
+ import torch
6
+
7
+
8
+ def Fourier_filter(x, threshold, scale):
9
+ # FFT
10
+ x_freq = torch.fft.fftn(x.float(), dim=(-2, -1))
11
+ x_freq = torch.fft.fftshift(x_freq, dim=(-2, -1))
12
+
13
+ B, C, H, W = x_freq.shape
14
+ mask = torch.ones((B, C, H, W), device=x.device)
15
+
16
+ crow, ccol = H // 2, W //2
17
+ mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale
18
+ x_freq = x_freq * mask
19
+
20
+ # IFFT
21
+ x_freq = torch.fft.ifftshift(x_freq, dim=(-2, -1))
22
+ x_filtered = torch.fft.ifftn(x_freq, dim=(-2, -1)).real
23
+
24
+ return x_filtered.to(x.dtype)
25
+
26
+
27
+ class FreeU:
28
+ @classmethod
29
+ def INPUT_TYPES(s):
30
+ return {"required": { "model": ("MODEL",),
31
+ "b1": ("FLOAT", {"default": 1.1, "min": 0.0, "max": 10.0, "step": 0.01}),
32
+ "b2": ("FLOAT", {"default": 1.2, "min": 0.0, "max": 10.0, "step": 0.01}),
33
+ "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}),
34
+ "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}),
35
+ }}
36
+ RETURN_TYPES = ("MODEL",)
37
+ FUNCTION = "patch"
38
+
39
+ CATEGORY = "model_patches"
40
+
41
+ def patch(self, model, b1, b2, s1, s2):
42
+ model_channels = model.model.model_config.unet_config["model_channels"]
43
+ scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)}
44
+ on_cpu_devices = {}
45
+
46
+ def output_block_patch(h, hsp, transformer_options):
47
+ scale = scale_dict.get(int(h.shape[1]), None)
48
+ if scale is not None:
49
+ h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * scale[0]
50
+ if hsp.device not in on_cpu_devices:
51
+ try:
52
+ hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
53
+ except:
54
+ print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.")
55
+ on_cpu_devices[hsp.device] = True
56
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
57
+ else:
58
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
59
+
60
+ return h, hsp
61
+
62
+ m = model.clone()
63
+ m.set_model_output_block_patch(output_block_patch)
64
+ return (m, )
65
+
66
+ class FreeU_V2:
67
+ @classmethod
68
+ def INPUT_TYPES(s):
69
+ return {"required": { "model": ("MODEL",),
70
+ "b1": ("FLOAT", {"default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}),
71
+ "b2": ("FLOAT", {"default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}),
72
+ "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}),
73
+ "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}),
74
+ }}
75
+ RETURN_TYPES = ("MODEL",)
76
+ FUNCTION = "patch"
77
+
78
+ CATEGORY = "model_patches"
79
+
80
+ def patch(self, model, b1, b2, s1, s2):
81
+ model_channels = model.model.model_config.unet_config["model_channels"]
82
+ scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)}
83
+ on_cpu_devices = {}
84
+
85
+ def output_block_patch(h, hsp, transformer_options):
86
+ scale = scale_dict.get(int(h.shape[1]), None)
87
+ if scale is not None:
88
+ hidden_mean = h.mean(1).unsqueeze(1)
89
+ B = hidden_mean.shape[0]
90
+ hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
91
+ hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
92
+ hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
93
+
94
+ h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * ((scale[0] - 1 ) * hidden_mean + 1)
95
+
96
+ if hsp.device not in on_cpu_devices:
97
+ try:
98
+ hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
99
+ except:
100
+ print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.")
101
+ on_cpu_devices[hsp.device] = True
102
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
103
+ else:
104
+ hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
105
+
106
+ return h, hsp
107
+
108
+ m = model.clone()
109
+ m.set_model_output_block_patch(output_block_patch)
110
+ return (m, )
111
+
112
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
113
+ NODE_CLASS_MAPPINGS = {
114
+ "FreeU": FreeU,
115
+ "FreeU_V2": FreeU_V2,
116
+ }
ldm_patched/contrib/nodes_fresca.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code based on https://github.com/WikiChao/FreSca (MIT License)
2
+ import torch
3
+ import torch.fft as fft
4
+
5
+
6
+ def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
7
+ """
8
+ Apply frequency-dependent scaling to an image tensor using Fourier transforms.
9
+
10
+ Parameters:
11
+ x: Input tensor of shape (B, C, H, W)
12
+ scale_low: Scaling factor for low-frequency components (default: 1.0)
13
+ scale_high: Scaling factor for high-frequency components (default: 1.5)
14
+ freq_cutoff: Number of frequency indices around center to consider as low-frequency (default: 20)
15
+
16
+ Returns:
17
+ x_filtered: Filtered version of x in spatial domain with frequency-specific scaling applied.
18
+ """
19
+ # Preserve input dtype and device
20
+ dtype, device = x.dtype, x.device
21
+
22
+ # Convert to float32 for FFT computations
23
+ x = x.to(torch.float32)
24
+
25
+ # 1) Apply FFT and shift low frequencies to center
26
+ x_freq = fft.fftn(x, dim=(-2, -1))
27
+ x_freq = fft.fftshift(x_freq, dim=(-2, -1))
28
+
29
+ # Initialize mask with high-frequency scaling factor
30
+ mask = torch.ones(x_freq.shape, device=device) * scale_high
31
+ m = mask
32
+ for d in range(len(x_freq.shape) - 2):
33
+ dim = d + 2
34
+ cc = x_freq.shape[dim] // 2
35
+ f_c = min(freq_cutoff, cc)
36
+ m = m.narrow(dim, cc - f_c, f_c * 2)
37
+
38
+ # Apply low-frequency scaling factor to center region
39
+ m[:] = scale_low
40
+
41
+ # 3) Apply frequency-specific scaling
42
+ x_freq = x_freq * mask
43
+
44
+ # 4) Convert back to spatial domain
45
+ x_freq = fft.ifftshift(x_freq, dim=(-2, -1))
46
+ x_filtered = fft.ifftn(x_freq, dim=(-2, -1)).real
47
+
48
+ # 5) Restore original dtype
49
+ x_filtered = x_filtered.to(dtype)
50
+
51
+ return x_filtered
52
+
53
+
54
+ class FreSca:
55
+ @classmethod
56
+ def INPUT_TYPES(s):
57
+ return {
58
+ "required": {
59
+ "model": ("MODEL",),
60
+ "scale_low": ("FLOAT", {"default": 1.0, "min": 0, "max": 10, "step": 0.01,
61
+ "tooltip": "Scaling factor for low-frequency components"}),
62
+ "scale_high": ("FLOAT", {"default": 1.25, "min": 0, "max": 10, "step": 0.01,
63
+ "tooltip": "Scaling factor for high-frequency components"}),
64
+ "freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 10000, "step": 1,
65
+ "tooltip": "Number of frequency indices around center to consider as low-frequency"}),
66
+ }
67
+ }
68
+ RETURN_TYPES = ("MODEL",)
69
+ FUNCTION = "patch"
70
+ CATEGORY = "_for_testing"
71
+ DESCRIPTION = "Applies frequency-dependent scaling to the guidance"
72
+ def patch(self, model, scale_low, scale_high, freq_cutoff):
73
+ def custom_cfg_function(args):
74
+ conds_out = args["conds_out"]
75
+ if len(conds_out) <= 1 or None in args["conds"][:2]:
76
+ return conds_out
77
+ cond = conds_out[0]
78
+ uncond = conds_out[1]
79
+
80
+ guidance = cond - uncond
81
+ filtered_guidance = Fourier_filter(
82
+ guidance,
83
+ scale_low=scale_low,
84
+ scale_high=scale_high,
85
+ freq_cutoff=freq_cutoff,
86
+ )
87
+ filtered_cond = filtered_guidance + uncond
88
+
89
+ return [filtered_cond, uncond] + conds_out[2:]
90
+
91
+ m = model.clone()
92
+ m.set_model_sampler_pre_cfg_function(custom_cfg_function)
93
+
94
+ return (m,)
95
+
96
+
97
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
98
+ NODE_CLASS_MAPPINGS = {
99
+ "FreSca": FreSca,
100
+ }
101
+
102
+ NODE_DISPLAY_NAME_MAPPINGS = {
103
+ "FreSca": "FreSca",
104
+ }
ldm_patched/contrib/nodes_gits.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://github.com/zju-pi/diff-sampler/tree/main/gits-main
2
+ import numpy as np
3
+ import torch
4
+
5
+ def loglinear_interp(t_steps, num_steps):
6
+ """
7
+ Performs log-linear interpolation of a given array of decreasing numbers.
8
+ """
9
+ xs = np.linspace(0, 1, len(t_steps))
10
+ ys = np.log(t_steps[::-1])
11
+
12
+ new_xs = np.linspace(0, 1, num_steps)
13
+ new_ys = np.interp(new_xs, xs, ys)
14
+
15
+ interped_ys = np.exp(new_ys)[::-1].copy()
16
+ return interped_ys
17
+
18
+ NOISE_LEVELS = {
19
+ 0.80: [
20
+ [14.61464119, 7.49001646, 0.02916753],
21
+ [14.61464119, 11.54541874, 6.77309084, 0.02916753],
22
+ [14.61464119, 11.54541874, 7.49001646, 3.07277966, 0.02916753],
23
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 2.05039096, 0.02916753],
24
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 2.05039096, 0.02916753],
25
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
26
+ [14.61464119, 12.96784878, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
27
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
28
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
29
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
30
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
31
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
32
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
33
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
34
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.1956799, 1.98035145, 0.86115354, 0.02916753],
35
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.1956799, 1.98035145, 0.86115354, 0.02916753],
36
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.07277966, 1.84880662, 0.83188516, 0.02916753],
37
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.88507891, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.07277966, 1.84880662, 0.83188516, 0.02916753],
38
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.88507891, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.75677586, 2.84484982, 1.78698075, 0.803307, 0.02916753],
39
+ ],
40
+ 0.85: [
41
+ [14.61464119, 7.49001646, 0.02916753],
42
+ [14.61464119, 7.49001646, 1.84880662, 0.02916753],
43
+ [14.61464119, 11.54541874, 6.77309084, 1.56271636, 0.02916753],
44
+ [14.61464119, 11.54541874, 7.11996698, 3.07277966, 1.24153244, 0.02916753],
45
+ [14.61464119, 11.54541874, 7.49001646, 5.09240818, 2.84484982, 0.95350921, 0.02916753],
46
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.09240818, 2.84484982, 0.95350921, 0.02916753],
47
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.58536053, 3.1956799, 1.84880662, 0.803307, 0.02916753],
48
+ [14.61464119, 12.96784878, 11.54541874, 8.75849152, 7.49001646, 5.58536053, 3.1956799, 1.84880662, 0.803307, 0.02916753],
49
+ [14.61464119, 12.96784878, 11.54541874, 8.75849152, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
50
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 8.75849152, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
51
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
52
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
53
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
54
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.60512662, 2.6383388, 1.56271636, 0.72133851, 0.02916753],
55
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
56
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
57
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
58
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
59
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.88507891, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
60
+ ],
61
+ 0.90: [
62
+ [14.61464119, 6.77309084, 0.02916753],
63
+ [14.61464119, 7.49001646, 1.56271636, 0.02916753],
64
+ [14.61464119, 7.49001646, 3.07277966, 0.95350921, 0.02916753],
65
+ [14.61464119, 7.49001646, 4.86714602, 2.54230714, 0.89115214, 0.02916753],
66
+ [14.61464119, 11.54541874, 7.49001646, 4.86714602, 2.54230714, 0.89115214, 0.02916753],
67
+ [14.61464119, 11.54541874, 7.49001646, 5.09240818, 3.07277966, 1.61558151, 0.69515091, 0.02916753],
68
+ [14.61464119, 12.2308979, 8.75849152, 7.11996698, 4.86714602, 3.07277966, 1.61558151, 0.69515091, 0.02916753],
69
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 2.95596409, 1.61558151, 0.69515091, 0.02916753],
70
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.24153244, 0.57119018, 0.02916753],
71
+ [14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.24153244, 0.57119018, 0.02916753],
72
+ [14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.24153244, 0.57119018, 0.02916753],
73
+ [14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.75677586, 2.84484982, 1.84880662, 1.08895338, 0.52423614, 0.02916753],
74
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.75677586, 2.84484982, 1.84880662, 1.08895338, 0.52423614, 0.02916753],
75
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.45427561, 3.32507086, 2.45070267, 1.61558151, 0.95350921, 0.45573691, 0.02916753],
76
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.45427561, 3.32507086, 2.45070267, 1.61558151, 0.95350921, 0.45573691, 0.02916753],
77
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.91689563, 3.07277966, 2.27973175, 1.56271636, 0.95350921, 0.45573691, 0.02916753],
78
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.91689563, 3.07277966, 2.27973175, 1.56271636, 0.95350921, 0.45573691, 0.02916753],
79
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.91689563, 3.07277966, 2.27973175, 1.56271636, 0.95350921, 0.45573691, 0.02916753],
80
+ [14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.60512662, 2.95596409, 2.19988537, 1.51179266, 0.89115214, 0.43325692, 0.02916753],
81
+ ],
82
+ 0.95: [
83
+ [14.61464119, 6.77309084, 0.02916753],
84
+ [14.61464119, 6.77309084, 1.56271636, 0.02916753],
85
+ [14.61464119, 7.49001646, 2.84484982, 0.89115214, 0.02916753],
86
+ [14.61464119, 7.49001646, 4.86714602, 2.36326075, 0.803307, 0.02916753],
87
+ [14.61464119, 7.49001646, 4.86714602, 2.95596409, 1.56271636, 0.64427125, 0.02916753],
88
+ [14.61464119, 11.54541874, 7.49001646, 4.86714602, 2.95596409, 1.56271636, 0.64427125, 0.02916753],
89
+ [14.61464119, 11.54541874, 7.49001646, 4.86714602, 3.07277966, 1.91321158, 1.08895338, 0.50118381, 0.02916753],
90
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.07277966, 1.91321158, 1.08895338, 0.50118381, 0.02916753],
91
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.07277966, 1.91321158, 1.08895338, 0.50118381, 0.02916753],
92
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.41535246, 0.803307, 0.38853383, 0.02916753],
93
+ [14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.46139455, 2.6383388, 1.84880662, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
94
+ [14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.46139455, 2.6383388, 1.84880662, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
95
+ [14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 6.14220476, 4.86714602, 3.75677586, 2.95596409, 2.19988537, 1.56271636, 1.05362725, 0.64427125, 0.32104823, 0.02916753],
96
+ [14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 6.44769001, 5.58536053, 4.65472794, 3.60512662, 2.95596409, 2.19988537, 1.56271636, 1.05362725, 0.64427125, 0.32104823, 0.02916753],
97
+ [14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.65472794, 3.60512662, 2.95596409, 2.19988537, 1.56271636, 1.05362725, 0.64427125, 0.32104823, 0.02916753],
98
+ [14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.65472794, 3.75677586, 3.07277966, 2.45070267, 1.78698075, 1.24153244, 0.83188516, 0.50118381, 0.22545385, 0.02916753],
99
+ [14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.60512662, 2.95596409, 2.36326075, 1.72759056, 1.24153244, 0.83188516, 0.50118381, 0.22545385, 0.02916753],
100
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.60512662, 2.95596409, 2.36326075, 1.72759056, 1.24153244, 0.83188516, 0.50118381, 0.22545385, 0.02916753],
101
+ [14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.75677586, 3.07277966, 2.45070267, 1.91321158, 1.46270394, 1.05362725, 0.72133851, 0.43325692, 0.19894916, 0.02916753],
102
+ ],
103
+ 1.00: [
104
+ [14.61464119, 1.56271636, 0.02916753],
105
+ [14.61464119, 6.77309084, 0.95350921, 0.02916753],
106
+ [14.61464119, 6.77309084, 2.36326075, 0.803307, 0.02916753],
107
+ [14.61464119, 7.11996698, 3.07277966, 1.56271636, 0.59516323, 0.02916753],
108
+ [14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.41535246, 0.57119018, 0.02916753],
109
+ [14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.86115354, 0.38853383, 0.02916753],
110
+ [14.61464119, 11.54541874, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.86115354, 0.38853383, 0.02916753],
111
+ [14.61464119, 11.54541874, 7.49001646, 4.86714602, 3.07277966, 1.98035145, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
112
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.07277966, 1.98035145, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
113
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.27973175, 1.51179266, 0.95350921, 0.54755926, 0.25053367, 0.02916753],
114
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.36326075, 1.61558151, 1.08895338, 0.72133851, 0.41087446, 0.17026083, 0.02916753],
115
+ [14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.36326075, 1.61558151, 1.08895338, 0.72133851, 0.41087446, 0.17026083, 0.02916753],
116
+ [14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.12350607, 1.56271636, 1.08895338, 0.72133851, 0.41087446, 0.17026083, 0.02916753],
117
+ [14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.19988537, 1.61558151, 1.162866, 0.803307, 0.50118381, 0.27464288, 0.09824532, 0.02916753],
118
+ [14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.75677586, 3.07277966, 2.45070267, 1.84880662, 1.36964464, 1.01931262, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
119
+ [14.61464119, 11.54541874, 8.75849152, 7.49001646, 6.14220476, 5.09240818, 4.26497746, 3.46139455, 2.84484982, 2.19988537, 1.67050016, 1.24153244, 0.92192322, 0.64427125, 0.43325692, 0.25053367, 0.09824532, 0.02916753],
120
+ [14.61464119, 11.54541874, 8.75849152, 7.49001646, 6.14220476, 5.09240818, 4.26497746, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.12534678, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
121
+ [14.61464119, 12.2308979, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 5.09240818, 4.26497746, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.12534678, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
122
+ [14.61464119, 12.2308979, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.26497746, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.12534678, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
123
+ ],
124
+ 1.05: [
125
+ [14.61464119, 0.95350921, 0.02916753],
126
+ [14.61464119, 6.77309084, 0.89115214, 0.02916753],
127
+ [14.61464119, 6.77309084, 2.05039096, 0.72133851, 0.02916753],
128
+ [14.61464119, 6.77309084, 2.84484982, 1.28281462, 0.52423614, 0.02916753],
129
+ [14.61464119, 6.77309084, 3.07277966, 1.61558151, 0.803307, 0.34370604, 0.02916753],
130
+ [14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.56271636, 0.803307, 0.34370604, 0.02916753],
131
+ [14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.95350921, 0.52423614, 0.22545385, 0.02916753],
132
+ [14.61464119, 7.49001646, 4.86714602, 3.07277966, 1.98035145, 1.24153244, 0.74807048, 0.41087446, 0.17026083, 0.02916753],
133
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.27973175, 1.51179266, 0.95350921, 0.59516323, 0.34370604, 0.13792117, 0.02916753],
134
+ [14.61464119, 7.49001646, 5.09240818, 3.46139455, 2.45070267, 1.61558151, 1.08895338, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
135
+ [14.61464119, 11.54541874, 7.49001646, 5.09240818, 3.46139455, 2.45070267, 1.61558151, 1.08895338, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
136
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.36326075, 1.61558151, 1.08895338, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
137
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.45070267, 1.72759056, 1.24153244, 0.86115354, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
138
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.19988537, 1.61558151, 1.162866, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
139
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.19988537, 1.67050016, 1.28281462, 0.95350921, 0.72133851, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
140
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.36326075, 1.84880662, 1.41535246, 1.08895338, 0.83188516, 0.61951244, 0.45573691, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
141
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.20157266, 0.95350921, 0.74807048, 0.57119018, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
142
+ [14.61464119, 11.54541874, 8.30717278, 7.11996698, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.20157266, 0.95350921, 0.74807048, 0.57119018, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
143
+ [14.61464119, 11.54541874, 8.30717278, 7.11996698, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.45070267, 1.98035145, 1.61558151, 1.32549286, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.41087446, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
144
+ ],
145
+ 1.10: [
146
+ [14.61464119, 0.89115214, 0.02916753],
147
+ [14.61464119, 2.36326075, 0.72133851, 0.02916753],
148
+ [14.61464119, 5.85520077, 1.61558151, 0.57119018, 0.02916753],
149
+ [14.61464119, 6.77309084, 2.45070267, 1.08895338, 0.45573691, 0.02916753],
150
+ [14.61464119, 6.77309084, 2.95596409, 1.56271636, 0.803307, 0.34370604, 0.02916753],
151
+ [14.61464119, 6.77309084, 3.07277966, 1.61558151, 0.89115214, 0.4783645, 0.19894916, 0.02916753],
152
+ [14.61464119, 6.77309084, 3.07277966, 1.84880662, 1.08895338, 0.64427125, 0.34370604, 0.13792117, 0.02916753],
153
+ [14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.95350921, 0.54755926, 0.27464288, 0.09824532, 0.02916753],
154
+ [14.61464119, 7.49001646, 4.86714602, 2.95596409, 1.91321158, 1.24153244, 0.803307, 0.4783645, 0.25053367, 0.09824532, 0.02916753],
155
+ [14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.05039096, 1.41535246, 0.95350921, 0.64427125, 0.41087446, 0.22545385, 0.09824532, 0.02916753],
156
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.27973175, 1.61558151, 1.12534678, 0.803307, 0.54755926, 0.36617002, 0.22545385, 0.09824532, 0.02916753],
157
+ [14.61464119, 7.49001646, 4.86714602, 3.32507086, 2.45070267, 1.72759056, 1.24153244, 0.89115214, 0.64427125, 0.45573691, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
158
+ [14.61464119, 7.49001646, 5.09240818, 3.60512662, 2.84484982, 2.05039096, 1.51179266, 1.08895338, 0.803307, 0.59516323, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
159
+ [14.61464119, 7.49001646, 5.09240818, 3.60512662, 2.84484982, 2.12350607, 1.61558151, 1.24153244, 0.95350921, 0.72133851, 0.54755926, 0.41087446, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
160
+ [14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
161
+ [14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.45070267, 1.91321158, 1.51179266, 1.20157266, 0.95350921, 0.74807048, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
162
+ [14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
163
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
164
+ [14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
165
+ ],
166
+ 1.15: [
167
+ [14.61464119, 0.83188516, 0.02916753],
168
+ [14.61464119, 1.84880662, 0.59516323, 0.02916753],
169
+ [14.61464119, 5.85520077, 1.56271636, 0.52423614, 0.02916753],
170
+ [14.61464119, 5.85520077, 1.91321158, 0.83188516, 0.34370604, 0.02916753],
171
+ [14.61464119, 5.85520077, 2.45070267, 1.24153244, 0.59516323, 0.25053367, 0.02916753],
172
+ [14.61464119, 5.85520077, 2.84484982, 1.51179266, 0.803307, 0.41087446, 0.17026083, 0.02916753],
173
+ [14.61464119, 5.85520077, 2.84484982, 1.56271636, 0.89115214, 0.50118381, 0.25053367, 0.09824532, 0.02916753],
174
+ [14.61464119, 6.77309084, 3.07277966, 1.84880662, 1.12534678, 0.72133851, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
175
+ [14.61464119, 6.77309084, 3.07277966, 1.91321158, 1.24153244, 0.803307, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
176
+ [14.61464119, 7.49001646, 4.86714602, 2.95596409, 1.91321158, 1.24153244, 0.803307, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
177
+ [14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.05039096, 1.36964464, 0.95350921, 0.69515091, 0.4783645, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
178
+ [14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.803307, 0.59516323, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
179
+ [14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.803307, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
180
+ [14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.19988537, 1.61558151, 1.24153244, 0.95350921, 0.74807048, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
181
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.78698075, 1.32549286, 1.01931262, 0.803307, 0.64427125, 0.50118381, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
182
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.78698075, 1.32549286, 1.01931262, 0.803307, 0.64427125, 0.52423614, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
183
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.12534678, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
184
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.12534678, 0.89115214, 0.72133851, 0.59516323, 0.50118381, 0.41087446, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
185
+ [14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.12534678, 0.89115214, 0.72133851, 0.59516323, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
186
+ ],
187
+ 1.20: [
188
+ [14.61464119, 0.803307, 0.02916753],
189
+ [14.61464119, 1.56271636, 0.52423614, 0.02916753],
190
+ [14.61464119, 2.36326075, 0.92192322, 0.36617002, 0.02916753],
191
+ [14.61464119, 2.84484982, 1.24153244, 0.59516323, 0.25053367, 0.02916753],
192
+ [14.61464119, 5.85520077, 2.05039096, 0.95350921, 0.45573691, 0.17026083, 0.02916753],
193
+ [14.61464119, 5.85520077, 2.45070267, 1.24153244, 0.64427125, 0.29807833, 0.09824532, 0.02916753],
194
+ [14.61464119, 5.85520077, 2.45070267, 1.36964464, 0.803307, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
195
+ [14.61464119, 5.85520077, 2.84484982, 1.61558151, 0.95350921, 0.59516323, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
196
+ [14.61464119, 5.85520077, 2.84484982, 1.67050016, 1.08895338, 0.74807048, 0.50118381, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
197
+ [14.61464119, 5.85520077, 2.95596409, 1.84880662, 1.24153244, 0.83188516, 0.59516323, 0.41087446, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
198
+ [14.61464119, 5.85520077, 3.07277966, 1.98035145, 1.36964464, 0.95350921, 0.69515091, 0.50118381, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
199
+ [14.61464119, 6.77309084, 3.46139455, 2.36326075, 1.56271636, 1.08895338, 0.803307, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
200
+ [14.61464119, 6.77309084, 3.46139455, 2.45070267, 1.61558151, 1.162866, 0.86115354, 0.64427125, 0.50118381, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
201
+ [14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
202
+ [14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
203
+ [14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.41087446, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
204
+ [14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.19988537, 1.61558151, 1.20157266, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
205
+ [14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.19988537, 1.61558151, 1.24153244, 0.95350921, 0.74807048, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
206
+ [14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.19988537, 1.61558151, 1.24153244, 0.95350921, 0.74807048, 0.59516323, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
207
+ ],
208
+ 1.25: [
209
+ [14.61464119, 0.72133851, 0.02916753],
210
+ [14.61464119, 1.56271636, 0.50118381, 0.02916753],
211
+ [14.61464119, 2.05039096, 0.803307, 0.32104823, 0.02916753],
212
+ [14.61464119, 2.36326075, 0.95350921, 0.43325692, 0.17026083, 0.02916753],
213
+ [14.61464119, 2.84484982, 1.24153244, 0.59516323, 0.27464288, 0.09824532, 0.02916753],
214
+ [14.61464119, 3.07277966, 1.51179266, 0.803307, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
215
+ [14.61464119, 5.85520077, 2.36326075, 1.24153244, 0.72133851, 0.41087446, 0.22545385, 0.09824532, 0.02916753],
216
+ [14.61464119, 5.85520077, 2.45070267, 1.36964464, 0.83188516, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
217
+ [14.61464119, 5.85520077, 2.84484982, 1.61558151, 0.98595673, 0.64427125, 0.43325692, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
218
+ [14.61464119, 5.85520077, 2.84484982, 1.67050016, 1.08895338, 0.74807048, 0.52423614, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
219
+ [14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.803307, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
220
+ [14.61464119, 5.85520077, 2.95596409, 1.84880662, 1.24153244, 0.86115354, 0.64427125, 0.4783645, 0.36617002, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
221
+ [14.61464119, 5.85520077, 2.95596409, 1.84880662, 1.28281462, 0.92192322, 0.69515091, 0.52423614, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
222
+ [14.61464119, 5.85520077, 2.95596409, 1.91321158, 1.32549286, 0.95350921, 0.72133851, 0.54755926, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
223
+ [14.61464119, 5.85520077, 2.95596409, 1.91321158, 1.32549286, 0.95350921, 0.72133851, 0.57119018, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
224
+ [14.61464119, 5.85520077, 2.95596409, 1.91321158, 1.32549286, 0.95350921, 0.74807048, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
225
+ [14.61464119, 5.85520077, 3.07277966, 2.05039096, 1.41535246, 1.05362725, 0.803307, 0.61951244, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
226
+ [14.61464119, 5.85520077, 3.07277966, 2.05039096, 1.41535246, 1.05362725, 0.803307, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
227
+ [14.61464119, 5.85520077, 3.07277966, 2.05039096, 1.46270394, 1.08895338, 0.83188516, 0.66947293, 0.54755926, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
228
+ ],
229
+ 1.30: [
230
+ [14.61464119, 0.72133851, 0.02916753],
231
+ [14.61464119, 1.24153244, 0.43325692, 0.02916753],
232
+ [14.61464119, 1.56271636, 0.59516323, 0.22545385, 0.02916753],
233
+ [14.61464119, 1.84880662, 0.803307, 0.36617002, 0.13792117, 0.02916753],
234
+ [14.61464119, 2.36326075, 1.01931262, 0.52423614, 0.25053367, 0.09824532, 0.02916753],
235
+ [14.61464119, 2.84484982, 1.36964464, 0.74807048, 0.41087446, 0.22545385, 0.09824532, 0.02916753],
236
+ [14.61464119, 3.07277966, 1.56271636, 0.89115214, 0.54755926, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
237
+ [14.61464119, 3.07277966, 1.61558151, 0.95350921, 0.61951244, 0.41087446, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
238
+ [14.61464119, 5.85520077, 2.45070267, 1.36964464, 0.83188516, 0.54755926, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
239
+ [14.61464119, 5.85520077, 2.45070267, 1.41535246, 0.92192322, 0.64427125, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
240
+ [14.61464119, 5.85520077, 2.6383388, 1.56271636, 1.01931262, 0.72133851, 0.50118381, 0.36617002, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
241
+ [14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.05362725, 0.74807048, 0.54755926, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
242
+ [14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.08895338, 0.77538133, 0.57119018, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
243
+ [14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.59516323, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
244
+ [14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
245
+ [14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
246
+ [14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
247
+ [14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
248
+ [14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
249
+ ],
250
+ 1.35: [
251
+ [14.61464119, 0.69515091, 0.02916753],
252
+ [14.61464119, 0.95350921, 0.34370604, 0.02916753],
253
+ [14.61464119, 1.56271636, 0.57119018, 0.19894916, 0.02916753],
254
+ [14.61464119, 1.61558151, 0.69515091, 0.29807833, 0.09824532, 0.02916753],
255
+ [14.61464119, 1.84880662, 0.83188516, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
256
+ [14.61464119, 2.45070267, 1.162866, 0.64427125, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
257
+ [14.61464119, 2.84484982, 1.36964464, 0.803307, 0.50118381, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
258
+ [14.61464119, 2.84484982, 1.41535246, 0.83188516, 0.54755926, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
259
+ [14.61464119, 2.84484982, 1.56271636, 0.95350921, 0.64427125, 0.45573691, 0.32104823, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
260
+ [14.61464119, 2.84484982, 1.56271636, 0.95350921, 0.64427125, 0.45573691, 0.34370604, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
261
+ [14.61464119, 3.07277966, 1.61558151, 1.01931262, 0.72133851, 0.52423614, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
262
+ [14.61464119, 3.07277966, 1.61558151, 1.01931262, 0.72133851, 0.52423614, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
263
+ [14.61464119, 3.07277966, 1.61558151, 1.05362725, 0.74807048, 0.54755926, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
264
+ [14.61464119, 3.07277966, 1.72759056, 1.12534678, 0.803307, 0.59516323, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
265
+ [14.61464119, 3.07277966, 1.72759056, 1.12534678, 0.803307, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
266
+ [14.61464119, 5.85520077, 2.45070267, 1.51179266, 1.01931262, 0.74807048, 0.57119018, 0.45573691, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
267
+ [14.61464119, 5.85520077, 2.6383388, 1.61558151, 1.08895338, 0.803307, 0.61951244, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
268
+ [14.61464119, 5.85520077, 2.6383388, 1.61558151, 1.08895338, 0.803307, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
269
+ [14.61464119, 5.85520077, 2.6383388, 1.61558151, 1.08895338, 0.803307, 0.64427125, 0.52423614, 0.45573691, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
270
+ ],
271
+ 1.40: [
272
+ [14.61464119, 0.59516323, 0.02916753],
273
+ [14.61464119, 0.95350921, 0.34370604, 0.02916753],
274
+ [14.61464119, 1.08895338, 0.43325692, 0.13792117, 0.02916753],
275
+ [14.61464119, 1.56271636, 0.64427125, 0.27464288, 0.09824532, 0.02916753],
276
+ [14.61464119, 1.61558151, 0.803307, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
277
+ [14.61464119, 2.05039096, 0.95350921, 0.54755926, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
278
+ [14.61464119, 2.45070267, 1.24153244, 0.72133851, 0.43325692, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
279
+ [14.61464119, 2.45070267, 1.24153244, 0.74807048, 0.50118381, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
280
+ [14.61464119, 2.45070267, 1.28281462, 0.803307, 0.52423614, 0.36617002, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
281
+ [14.61464119, 2.45070267, 1.28281462, 0.803307, 0.54755926, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
282
+ [14.61464119, 2.84484982, 1.41535246, 0.86115354, 0.59516323, 0.43325692, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
283
+ [14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.64427125, 0.45573691, 0.34370604, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
284
+ [14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.64427125, 0.4783645, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
285
+ [14.61464119, 2.84484982, 1.56271636, 0.98595673, 0.69515091, 0.52423614, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
286
+ [14.61464119, 2.84484982, 1.56271636, 1.01931262, 0.72133851, 0.54755926, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
287
+ [14.61464119, 2.84484982, 1.61558151, 1.05362725, 0.74807048, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
288
+ [14.61464119, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.61951244, 0.50118381, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
289
+ [14.61464119, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.61951244, 0.50118381, 0.43325692, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
290
+ [14.61464119, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.64427125, 0.52423614, 0.45573691, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
291
+ ],
292
+ 1.45: [
293
+ [14.61464119, 0.59516323, 0.02916753],
294
+ [14.61464119, 0.803307, 0.25053367, 0.02916753],
295
+ [14.61464119, 0.95350921, 0.34370604, 0.09824532, 0.02916753],
296
+ [14.61464119, 1.24153244, 0.54755926, 0.25053367, 0.09824532, 0.02916753],
297
+ [14.61464119, 1.56271636, 0.72133851, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
298
+ [14.61464119, 1.61558151, 0.803307, 0.45573691, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
299
+ [14.61464119, 1.91321158, 0.95350921, 0.57119018, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
300
+ [14.61464119, 2.19988537, 1.08895338, 0.64427125, 0.41087446, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
301
+ [14.61464119, 2.45070267, 1.24153244, 0.74807048, 0.50118381, 0.34370604, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
302
+ [14.61464119, 2.45070267, 1.24153244, 0.74807048, 0.50118381, 0.36617002, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
303
+ [14.61464119, 2.45070267, 1.28281462, 0.803307, 0.54755926, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
304
+ [14.61464119, 2.45070267, 1.28281462, 0.803307, 0.57119018, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
305
+ [14.61464119, 2.45070267, 1.28281462, 0.83188516, 0.59516323, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
306
+ [14.61464119, 2.45070267, 1.28281462, 0.83188516, 0.59516323, 0.45573691, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
307
+ [14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.69515091, 0.52423614, 0.41087446, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
308
+ [14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.69515091, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
309
+ [14.61464119, 2.84484982, 1.56271636, 0.98595673, 0.72133851, 0.54755926, 0.45573691, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
310
+ [14.61464119, 2.84484982, 1.56271636, 1.01931262, 0.74807048, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
311
+ [14.61464119, 2.84484982, 1.56271636, 1.01931262, 0.74807048, 0.59516323, 0.50118381, 0.43325692, 0.38853383, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
312
+ ],
313
+ 1.50: [
314
+ [14.61464119, 0.54755926, 0.02916753],
315
+ [14.61464119, 0.803307, 0.25053367, 0.02916753],
316
+ [14.61464119, 0.86115354, 0.32104823, 0.09824532, 0.02916753],
317
+ [14.61464119, 1.24153244, 0.54755926, 0.25053367, 0.09824532, 0.02916753],
318
+ [14.61464119, 1.56271636, 0.72133851, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
319
+ [14.61464119, 1.61558151, 0.803307, 0.45573691, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
320
+ [14.61464119, 1.61558151, 0.83188516, 0.52423614, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
321
+ [14.61464119, 1.84880662, 0.95350921, 0.59516323, 0.38853383, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
322
+ [14.61464119, 1.84880662, 0.95350921, 0.59516323, 0.41087446, 0.29807833, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
323
+ [14.61464119, 1.84880662, 0.95350921, 0.61951244, 0.43325692, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
324
+ [14.61464119, 2.19988537, 1.12534678, 0.72133851, 0.50118381, 0.36617002, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
325
+ [14.61464119, 2.19988537, 1.12534678, 0.72133851, 0.50118381, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
326
+ [14.61464119, 2.36326075, 1.24153244, 0.803307, 0.57119018, 0.43325692, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
327
+ [14.61464119, 2.36326075, 1.24153244, 0.803307, 0.57119018, 0.43325692, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
328
+ [14.61464119, 2.36326075, 1.24153244, 0.803307, 0.59516323, 0.45573691, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
329
+ [14.61464119, 2.36326075, 1.24153244, 0.803307, 0.59516323, 0.45573691, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
330
+ [14.61464119, 2.45070267, 1.32549286, 0.86115354, 0.64427125, 0.50118381, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
331
+ [14.61464119, 2.45070267, 1.36964464, 0.92192322, 0.69515091, 0.54755926, 0.45573691, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
332
+ [14.61464119, 2.45070267, 1.41535246, 0.95350921, 0.72133851, 0.57119018, 0.4783645, 0.43325692, 0.38853383, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
333
+ ],
334
+ }
335
+
336
+ class GITSScheduler:
337
+ @classmethod
338
+ def INPUT_TYPES(s):
339
+ return {"required":
340
+ {"coeff": ("FLOAT", {"default": 1.20, "min": 0.80, "max": 1.50, "step": 0.05}),
341
+ "steps": ("INT", {"default": 10, "min": 2, "max": 1000}),
342
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
343
+ }
344
+ }
345
+ RETURN_TYPES = ("SIGMAS",)
346
+ CATEGORY = "sampling/custom_sampling/schedulers"
347
+
348
+ FUNCTION = "get_sigmas"
349
+
350
+ def get_sigmas(self, coeff, steps, denoise):
351
+ total_steps = steps
352
+ if denoise < 1.0:
353
+ if denoise <= 0.0:
354
+ return (torch.FloatTensor([]),)
355
+ total_steps = round(steps * denoise)
356
+
357
+ if steps <= 20:
358
+ sigmas = NOISE_LEVELS[round(coeff, 2)][steps-2][:]
359
+ else:
360
+ sigmas = NOISE_LEVELS[round(coeff, 2)][-1][:]
361
+ sigmas = loglinear_interp(sigmas, steps + 1)
362
+
363
+ sigmas = sigmas[-(total_steps + 1):]
364
+ sigmas[-1] = 0
365
+ return (torch.FloatTensor(sigmas), )
366
+
367
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
368
+ NODE_CLASS_MAPPINGS = {
369
+ "GITSScheduler": GITSScheduler,
370
+ }
ldm_patched/contrib/nodes_hidream.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ldm_patched.utils.path_utils
2
+ import ldm_patched.modules.sd
3
+ import ldm_patched.modules.model_management
4
+
5
+
6
+ class QuadrupleCLIPLoader:
7
+ @classmethod
8
+ def INPUT_TYPES(s):
9
+ return {"required": { "clip_name1": (ldm_patched.utils.path_utils.get_filename_list("text_encoders"), ),
10
+ "clip_name2": (ldm_patched.utils.path_utils.get_filename_list("text_encoders"), ),
11
+ "clip_name3": (ldm_patched.utils.path_utils.get_filename_list("text_encoders"), ),
12
+ "clip_name4": (ldm_patched.utils.path_utils.get_filename_list("text_encoders"), )
13
+ }}
14
+ RETURN_TYPES = ("CLIP",)
15
+ FUNCTION = "load_clip"
16
+
17
+ CATEGORY = "advanced/loaders"
18
+
19
+ DESCRIPTION = "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct"
20
+
21
+ def load_clip(self, clip_name1, clip_name2, clip_name3, clip_name4):
22
+ clip_path1 = ldm_patched.utils.path_utils.get_full_path_or_raise("text_encoders", clip_name1)
23
+ clip_path2 = ldm_patched.utils.path_utils.get_full_path_or_raise("text_encoders", clip_name2)
24
+ clip_path3 = ldm_patched.utils.path_utils.get_full_path_or_raise("text_encoders", clip_name3)
25
+ clip_path4 = ldm_patched.utils.path_utils.get_full_path_or_raise("text_encoders", clip_name4)
26
+ clip = ldm_patched.modules.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4], embedding_directory=ldm_patched.utils.path_utils.get_folder_paths("embeddings"))
27
+ return (clip,)
28
+
29
+ class CLIPTextEncodeHiDream:
30
+ @classmethod
31
+ def INPUT_TYPES(s):
32
+ return {"required": {
33
+ "clip": ("CLIP", ),
34
+ "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
35
+ "clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
36
+ "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
37
+ "llama": ("STRING", {"multiline": True, "dynamicPrompts": True})
38
+ }}
39
+ RETURN_TYPES = ("CONDITIONING",)
40
+ FUNCTION = "encode"
41
+
42
+ CATEGORY = "advanced/conditioning"
43
+
44
+ def encode(self, clip, clip_l, clip_g, t5xxl, llama):
45
+
46
+ tokens = clip.tokenize(clip_g)
47
+ tokens["l"] = clip.tokenize(clip_l)["l"]
48
+ tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
49
+ tokens["llama"] = clip.tokenize(llama)["llama"]
50
+ return (clip.encode_from_tokens_scheduled(tokens), )
51
+
52
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
53
+ NODE_CLASS_MAPPINGS = {
54
+ "QuadrupleCLIPLoader": QuadrupleCLIPLoader,
55
+ "CLIPTextEncodeHiDream": CLIPTextEncodeHiDream,
56
+ }
ldm_patched/contrib/nodes_hooks.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, Union
3
+ import logging
4
+ import torch
5
+ from collections.abc import Iterable
6
+
7
+ if TYPE_CHECKING:
8
+ from ldm_patched.modules.sd import CLIP
9
+
10
+ import ldm_patched.hooks
11
+ import ldm_patched.modules.sd
12
+ import ldm_patched.modules.utils
13
+ import ldm_patched.utils.path_utils
14
+
15
+ ###########################################
16
+ # Mask, Combine, and Hook Conditioning
17
+ #------------------------------------------
18
+ class PairConditioningSetProperties:
19
+ NodeId = 'PairConditioningSetProperties'
20
+ NodeName = 'Cond Pair Set Props'
21
+ @classmethod
22
+ def INPUT_TYPES(s):
23
+ return {
24
+ "required": {
25
+ "positive_NEW": ("CONDITIONING", ),
26
+ "negative_NEW": ("CONDITIONING", ),
27
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
28
+ "set_cond_area": (["default", "mask bounds"],),
29
+ },
30
+ "optional": {
31
+ "mask": ("MASK", ),
32
+ "hooks": ("HOOKS",),
33
+ "timesteps": ("TIMESTEPS_RANGE",),
34
+ }
35
+ }
36
+
37
+ EXPERIMENTAL = True
38
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
39
+ RETURN_NAMES = ("positive", "negative")
40
+ CATEGORY = "advanced/hooks/cond pair"
41
+ FUNCTION = "set_properties"
42
+
43
+ def set_properties(self, positive_NEW, negative_NEW,
44
+ strength: float, set_cond_area: str,
45
+ mask: torch.Tensor=None, hooks: ldm_patched.hooks.HookGroup=None, timesteps: tuple=None):
46
+ final_positive, final_negative = ldm_patched.hooks.set_conds_props(conds=[positive_NEW, negative_NEW],
47
+ strength=strength, set_cond_area=set_cond_area,
48
+ mask=mask, hooks=hooks, timesteps_range=timesteps)
49
+ return (final_positive, final_negative)
50
+
51
+ class PairConditioningSetPropertiesAndCombine:
52
+ NodeId = 'PairConditioningSetPropertiesAndCombine'
53
+ NodeName = 'Cond Pair Set Props Combine'
54
+ @classmethod
55
+ def INPUT_TYPES(s):
56
+ return {
57
+ "required": {
58
+ "positive": ("CONDITIONING", ),
59
+ "negative": ("CONDITIONING", ),
60
+ "positive_NEW": ("CONDITIONING", ),
61
+ "negative_NEW": ("CONDITIONING", ),
62
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
63
+ "set_cond_area": (["default", "mask bounds"],),
64
+ },
65
+ "optional": {
66
+ "mask": ("MASK", ),
67
+ "hooks": ("HOOKS",),
68
+ "timesteps": ("TIMESTEPS_RANGE",),
69
+ }
70
+ }
71
+
72
+ EXPERIMENTAL = True
73
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
74
+ RETURN_NAMES = ("positive", "negative")
75
+ CATEGORY = "advanced/hooks/cond pair"
76
+ FUNCTION = "set_properties"
77
+
78
+ def set_properties(self, positive, negative, positive_NEW, negative_NEW,
79
+ strength: float, set_cond_area: str,
80
+ mask: torch.Tensor=None, hooks: ldm_patched.hooks.HookGroup=None, timesteps: tuple=None):
81
+ final_positive, final_negative = ldm_patched.hooks.set_conds_props_and_combine(conds=[positive, negative], new_conds=[positive_NEW, negative_NEW],
82
+ strength=strength, set_cond_area=set_cond_area,
83
+ mask=mask, hooks=hooks, timesteps_range=timesteps)
84
+ return (final_positive, final_negative)
85
+
86
+ class ConditioningSetProperties:
87
+ NodeId = 'ConditioningSetProperties'
88
+ NodeName = 'Cond Set Props'
89
+ @classmethod
90
+ def INPUT_TYPES(s):
91
+ return {
92
+ "required": {
93
+ "cond_NEW": ("CONDITIONING", ),
94
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
95
+ "set_cond_area": (["default", "mask bounds"],),
96
+ },
97
+ "optional": {
98
+ "mask": ("MASK", ),
99
+ "hooks": ("HOOKS",),
100
+ "timesteps": ("TIMESTEPS_RANGE",),
101
+ }
102
+ }
103
+
104
+ EXPERIMENTAL = True
105
+ RETURN_TYPES = ("CONDITIONING",)
106
+ CATEGORY = "advanced/hooks/cond single"
107
+ FUNCTION = "set_properties"
108
+
109
+ def set_properties(self, cond_NEW,
110
+ strength: float, set_cond_area: str,
111
+ mask: torch.Tensor=None, hooks: ldm_patched.hooks.HookGroup=None, timesteps: tuple=None):
112
+ (final_cond,) = ldm_patched.hooks.set_conds_props(conds=[cond_NEW],
113
+ strength=strength, set_cond_area=set_cond_area,
114
+ mask=mask, hooks=hooks, timesteps_range=timesteps)
115
+ return (final_cond,)
116
+
117
+ class ConditioningSetPropertiesAndCombine:
118
+ NodeId = 'ConditioningSetPropertiesAndCombine'
119
+ NodeName = 'Cond Set Props Combine'
120
+ @classmethod
121
+ def INPUT_TYPES(s):
122
+ return {
123
+ "required": {
124
+ "cond": ("CONDITIONING", ),
125
+ "cond_NEW": ("CONDITIONING", ),
126
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
127
+ "set_cond_area": (["default", "mask bounds"],),
128
+ },
129
+ "optional": {
130
+ "mask": ("MASK", ),
131
+ "hooks": ("HOOKS",),
132
+ "timesteps": ("TIMESTEPS_RANGE",),
133
+ }
134
+ }
135
+
136
+ EXPERIMENTAL = True
137
+ RETURN_TYPES = ("CONDITIONING",)
138
+ CATEGORY = "advanced/hooks/cond single"
139
+ FUNCTION = "set_properties"
140
+
141
+ def set_properties(self, cond, cond_NEW,
142
+ strength: float, set_cond_area: str,
143
+ mask: torch.Tensor=None, hooks: ldm_patched.hooks.HookGroup=None, timesteps: tuple=None):
144
+ (final_cond,) = ldm_patched.hooks.set_conds_props_and_combine(conds=[cond], new_conds=[cond_NEW],
145
+ strength=strength, set_cond_area=set_cond_area,
146
+ mask=mask, hooks=hooks, timesteps_range=timesteps)
147
+ return (final_cond,)
148
+
149
+ class PairConditioningCombine:
150
+ NodeId = 'PairConditioningCombine'
151
+ NodeName = 'Cond Pair Combine'
152
+ @classmethod
153
+ def INPUT_TYPES(s):
154
+ return {
155
+ "required": {
156
+ "positive_A": ("CONDITIONING",),
157
+ "negative_A": ("CONDITIONING",),
158
+ "positive_B": ("CONDITIONING",),
159
+ "negative_B": ("CONDITIONING",),
160
+ },
161
+ }
162
+
163
+ EXPERIMENTAL = True
164
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
165
+ RETURN_NAMES = ("positive", "negative")
166
+ CATEGORY = "advanced/hooks/cond pair"
167
+ FUNCTION = "combine"
168
+
169
+ def combine(self, positive_A, negative_A, positive_B, negative_B):
170
+ final_positive, final_negative = ldm_patched.hooks.set_conds_props_and_combine(conds=[positive_A, negative_A], new_conds=[positive_B, negative_B],)
171
+ return (final_positive, final_negative,)
172
+
173
+ class PairConditioningSetDefaultAndCombine:
174
+ NodeId = 'PairConditioningSetDefaultCombine'
175
+ NodeName = 'Cond Pair Set Default Combine'
176
+ @classmethod
177
+ def INPUT_TYPES(s):
178
+ return {
179
+ "required": {
180
+ "positive": ("CONDITIONING",),
181
+ "negative": ("CONDITIONING",),
182
+ "positive_DEFAULT": ("CONDITIONING",),
183
+ "negative_DEFAULT": ("CONDITIONING",),
184
+ },
185
+ "optional": {
186
+ "hooks": ("HOOKS",),
187
+ }
188
+ }
189
+
190
+ EXPERIMENTAL = True
191
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
192
+ RETURN_NAMES = ("positive", "negative")
193
+ CATEGORY = "advanced/hooks/cond pair"
194
+ FUNCTION = "set_default_and_combine"
195
+
196
+ def set_default_and_combine(self, positive, negative, positive_DEFAULT, negative_DEFAULT,
197
+ hooks: ldm_patched.hooks.HookGroup=None):
198
+ final_positive, final_negative = ldm_patched.hooks.set_default_conds_and_combine(conds=[positive, negative], new_conds=[positive_DEFAULT, negative_DEFAULT],
199
+ hooks=hooks)
200
+ return (final_positive, final_negative)
201
+
202
+ class ConditioningSetDefaultAndCombine:
203
+ NodeId = 'ConditioningSetDefaultCombine'
204
+ NodeName = 'Cond Set Default Combine'
205
+ @classmethod
206
+ def INPUT_TYPES(s):
207
+ return {
208
+ "required": {
209
+ "cond": ("CONDITIONING",),
210
+ "cond_DEFAULT": ("CONDITIONING",),
211
+ },
212
+ "optional": {
213
+ "hooks": ("HOOKS",),
214
+ }
215
+ }
216
+
217
+ EXPERIMENTAL = True
218
+ RETURN_TYPES = ("CONDITIONING",)
219
+ CATEGORY = "advanced/hooks/cond single"
220
+ FUNCTION = "set_default_and_combine"
221
+
222
+ def set_default_and_combine(self, cond, cond_DEFAULT,
223
+ hooks: ldm_patched.hooks.HookGroup=None):
224
+ (final_conditioning,) = ldm_patched.hooks.set_default_conds_and_combine(conds=[cond], new_conds=[cond_DEFAULT],
225
+ hooks=hooks)
226
+ return (final_conditioning,)
227
+
228
+ class SetClipHooks:
229
+ NodeId = 'SetClipHooks'
230
+ NodeName = 'Set CLIP Hooks'
231
+ @classmethod
232
+ def INPUT_TYPES(s):
233
+ return {
234
+ "required": {
235
+ "clip": ("CLIP",),
236
+ "apply_to_conds": ("BOOLEAN", {"default": True}),
237
+ "schedule_clip": ("BOOLEAN", {"default": False})
238
+ },
239
+ "optional": {
240
+ "hooks": ("HOOKS",)
241
+ }
242
+ }
243
+
244
+ EXPERIMENTAL = True
245
+ RETURN_TYPES = ("CLIP",)
246
+ CATEGORY = "advanced/hooks/clip"
247
+ FUNCTION = "apply_hooks"
248
+
249
+ def apply_hooks(self, clip: CLIP, schedule_clip: bool, apply_to_conds: bool, hooks: ldm_patched.hooks.HookGroup=None):
250
+ if hooks is not None:
251
+ clip = clip.clone()
252
+ if apply_to_conds:
253
+ clip.apply_hooks_to_conds = hooks
254
+ clip.patcher.forced_hooks = hooks.clone()
255
+ clip.use_clip_schedule = schedule_clip
256
+ if not clip.use_clip_schedule:
257
+ clip.patcher.forced_hooks.set_keyframes_on_hooks(None)
258
+ clip.patcher.register_all_hook_patches(hooks, ldm_patched.hooks.create_target_dict(ldm_patched.hooks.EnumWeightTarget.Clip))
259
+ return (clip,)
260
+
261
+ class ConditioningTimestepsRange:
262
+ NodeId = 'ConditioningTimestepsRange'
263
+ NodeName = 'Timesteps Range'
264
+ @classmethod
265
+ def INPUT_TYPES(s):
266
+ return {
267
+ "required": {
268
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
269
+ "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
270
+ },
271
+ }
272
+
273
+ EXPERIMENTAL = True
274
+ RETURN_TYPES = ("TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE")
275
+ RETURN_NAMES = ("TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE")
276
+ CATEGORY = "advanced/hooks"
277
+ FUNCTION = "create_range"
278
+
279
+ def create_range(self, start_percent: float, end_percent: float):
280
+ return ((start_percent, end_percent), (0.0, start_percent), (end_percent, 1.0))
281
+ #------------------------------------------
282
+ ###########################################
283
+
284
+
285
+ ###########################################
286
+ # Create Hooks
287
+ #------------------------------------------
288
+ class CreateHookLora:
289
+ NodeId = 'CreateHookLora'
290
+ NodeName = 'Create Hook LoRA'
291
+ def __init__(self):
292
+ self.loaded_lora = None
293
+
294
+ @classmethod
295
+ def INPUT_TYPES(s):
296
+ return {
297
+ "required": {
298
+ "lora_name": (ldm_patched.utils.path_utils.get_filename_list("loras"), ),
299
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
300
+ "strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
301
+ },
302
+ "optional": {
303
+ "prev_hooks": ("HOOKS",)
304
+ }
305
+ }
306
+
307
+ EXPERIMENTAL = True
308
+ RETURN_TYPES = ("HOOKS",)
309
+ CATEGORY = "advanced/hooks/create"
310
+ FUNCTION = "create_hook"
311
+
312
+ def create_hook(self, lora_name: str, strength_model: float, strength_clip: float, prev_hooks: ldm_patched.hooks.HookGroup=None):
313
+ if prev_hooks is None:
314
+ prev_hooks = ldm_patched.hooks.HookGroup()
315
+ prev_hooks.clone()
316
+
317
+ if strength_model == 0 and strength_clip == 0:
318
+ return (prev_hooks,)
319
+
320
+ lora_path = ldm_patched.utils.path_utils.get_full_path("loras", lora_name)
321
+ lora = None
322
+ if self.loaded_lora is not None:
323
+ if self.loaded_lora[0] == lora_path:
324
+ lora = self.loaded_lora[1]
325
+ else:
326
+ temp = self.loaded_lora
327
+ self.loaded_lora = None
328
+ del temp
329
+
330
+ if lora is None:
331
+ lora = ldm_patched.modules.utils.load_torch_file(lora_path, safe_load=True)
332
+ self.loaded_lora = (lora_path, lora)
333
+
334
+ hooks = ldm_patched.hooks.create_hook_lora(lora=lora, strength_model=strength_model, strength_clip=strength_clip)
335
+ return (prev_hooks.clone_and_combine(hooks),)
336
+
337
+ class CreateHookLoraModelOnly(CreateHookLora):
338
+ NodeId = 'CreateHookLoraModelOnly'
339
+ NodeName = 'Create Hook LoRA (MO)'
340
+ @classmethod
341
+ def INPUT_TYPES(s):
342
+ return {
343
+ "required": {
344
+ "lora_name": (ldm_patched.utils.path_utils.get_filename_list("loras"), ),
345
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
346
+ },
347
+ "optional": {
348
+ "prev_hooks": ("HOOKS",)
349
+ }
350
+ }
351
+
352
+ EXPERIMENTAL = True
353
+ RETURN_TYPES = ("HOOKS",)
354
+ CATEGORY = "advanced/hooks/create"
355
+ FUNCTION = "create_hook_model_only"
356
+
357
+ def create_hook_model_only(self, lora_name: str, strength_model: float, prev_hooks: ldm_patched.hooks.HookGroup=None):
358
+ return self.create_hook(lora_name=lora_name, strength_model=strength_model, strength_clip=0, prev_hooks=prev_hooks)
359
+
360
+ class CreateHookModelAsLora:
361
+ NodeId = 'CreateHookModelAsLora'
362
+ NodeName = 'Create Hook Model as LoRA'
363
+
364
+ def __init__(self):
365
+ # when not None, will be in following format:
366
+ # (ckpt_path: str, weights_model: dict, weights_clip: dict)
367
+ self.loaded_weights = None
368
+
369
+ @classmethod
370
+ def INPUT_TYPES(s):
371
+ return {
372
+ "required": {
373
+ "ckpt_name": (ldm_patched.utils.path_utils.get_filename_list("checkpoints"), ),
374
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
375
+ "strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
376
+ },
377
+ "optional": {
378
+ "prev_hooks": ("HOOKS",)
379
+ }
380
+ }
381
+
382
+ EXPERIMENTAL = True
383
+ RETURN_TYPES = ("HOOKS",)
384
+ CATEGORY = "advanced/hooks/create"
385
+ FUNCTION = "create_hook"
386
+
387
+ def create_hook(self, ckpt_name: str, strength_model: float, strength_clip: float,
388
+ prev_hooks: ldm_patched.hooks.HookGroup=None):
389
+ if prev_hooks is None:
390
+ prev_hooks = ldm_patched.hooks.HookGroup()
391
+ prev_hooks.clone()
392
+
393
+ ckpt_path = ldm_patched.utils.path_utils.get_full_path("checkpoints", ckpt_name)
394
+ weights_model = None
395
+ weights_clip = None
396
+ if self.loaded_weights is not None:
397
+ if self.loaded_weights[0] == ckpt_path:
398
+ weights_model = self.loaded_weights[1]
399
+ weights_clip = self.loaded_weights[2]
400
+ else:
401
+ temp = self.loaded_weights
402
+ self.loaded_weights = None
403
+ del temp
404
+
405
+ if weights_model is None:
406
+ out = ldm_patched.modules.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=ldm_patched.utils.path_utils.get_folder_paths("embeddings"))
407
+ weights_model = ldm_patched.hooks.get_patch_weights_from_model(out[0])
408
+ weights_clip = ldm_patched.hooks.get_patch_weights_from_model(out[1].patcher if out[1] else out[1])
409
+ self.loaded_weights = (ckpt_path, weights_model, weights_clip)
410
+
411
+ hooks = ldm_patched.hooks.create_hook_model_as_lora(weights_model=weights_model, weights_clip=weights_clip,
412
+ strength_model=strength_model, strength_clip=strength_clip)
413
+ return (prev_hooks.clone_and_combine(hooks),)
414
+
415
+ class CreateHookModelAsLoraModelOnly(CreateHookModelAsLora):
416
+ NodeId = 'CreateHookModelAsLoraModelOnly'
417
+ NodeName = 'Create Hook Model as LoRA (MO)'
418
+ @classmethod
419
+ def INPUT_TYPES(s):
420
+ return {
421
+ "required": {
422
+ "ckpt_name": (ldm_patched.utils.path_utils.get_filename_list("checkpoints"), ),
423
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
424
+ },
425
+ "optional": {
426
+ "prev_hooks": ("HOOKS",)
427
+ }
428
+ }
429
+
430
+ EXPERIMENTAL = True
431
+ RETURN_TYPES = ("HOOKS",)
432
+ CATEGORY = "advanced/hooks/create"
433
+ FUNCTION = "create_hook_model_only"
434
+
435
+ def create_hook_model_only(self, ckpt_name: str, strength_model: float,
436
+ prev_hooks: ldm_patched.hooks.HookGroup=None):
437
+ return self.create_hook(ckpt_name=ckpt_name, strength_model=strength_model, strength_clip=0.0, prev_hooks=prev_hooks)
438
+ #------------------------------------------
439
+ ###########################################
440
+
441
+
442
+ ###########################################
443
+ # Schedule Hooks
444
+ #------------------------------------------
445
+ class SetHookKeyframes:
446
+ NodeId = 'SetHookKeyframes'
447
+ NodeName = 'Set Hook Keyframes'
448
+ @classmethod
449
+ def INPUT_TYPES(s):
450
+ return {
451
+ "required": {
452
+ "hooks": ("HOOKS",),
453
+ },
454
+ "optional": {
455
+ "hook_kf": ("HOOK_KEYFRAMES",),
456
+ }
457
+ }
458
+
459
+ EXPERIMENTAL = True
460
+ RETURN_TYPES = ("HOOKS",)
461
+ CATEGORY = "advanced/hooks/scheduling"
462
+ FUNCTION = "set_hook_keyframes"
463
+
464
+ def set_hook_keyframes(self, hooks: ldm_patched.hooks.HookGroup, hook_kf: ldm_patched.hooks.HookKeyframeGroup=None):
465
+ if hook_kf is not None:
466
+ hooks = hooks.clone()
467
+ hooks.set_keyframes_on_hooks(hook_kf=hook_kf)
468
+ return (hooks,)
469
+
470
+ class CreateHookKeyframe:
471
+ NodeId = 'CreateHookKeyframe'
472
+ NodeName = 'Create Hook Keyframe'
473
+ @classmethod
474
+ def INPUT_TYPES(s):
475
+ return {
476
+ "required": {
477
+ "strength_mult": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
478
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
479
+ },
480
+ "optional": {
481
+ "prev_hook_kf": ("HOOK_KEYFRAMES",),
482
+ }
483
+ }
484
+
485
+ EXPERIMENTAL = True
486
+ RETURN_TYPES = ("HOOK_KEYFRAMES",)
487
+ RETURN_NAMES = ("HOOK_KF",)
488
+ CATEGORY = "advanced/hooks/scheduling"
489
+ FUNCTION = "create_hook_keyframe"
490
+
491
+ def create_hook_keyframe(self, strength_mult: float, start_percent: float, prev_hook_kf: ldm_patched.hooks.HookKeyframeGroup=None):
492
+ if prev_hook_kf is None:
493
+ prev_hook_kf = ldm_patched.hooks.HookKeyframeGroup()
494
+ prev_hook_kf = prev_hook_kf.clone()
495
+ keyframe = ldm_patched.hooks.HookKeyframe(strength=strength_mult, start_percent=start_percent)
496
+ prev_hook_kf.add(keyframe)
497
+ return (prev_hook_kf,)
498
+
499
+ class CreateHookKeyframesInterpolated:
500
+ NodeId = 'CreateHookKeyframesInterpolated'
501
+ NodeName = 'Create Hook Keyframes Interp.'
502
+ @classmethod
503
+ def INPUT_TYPES(s):
504
+ return {
505
+ "required": {
506
+ "strength_start": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
507
+ "strength_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
508
+ "interpolation": (ldm_patched.hooks.InterpolationMethod._LIST, ),
509
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
510
+ "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
511
+ "keyframes_count": ("INT", {"default": 5, "min": 2, "max": 100, "step": 1}),
512
+ "print_keyframes": ("BOOLEAN", {"default": False}),
513
+ },
514
+ "optional": {
515
+ "prev_hook_kf": ("HOOK_KEYFRAMES",),
516
+ },
517
+ }
518
+
519
+ EXPERIMENTAL = True
520
+ RETURN_TYPES = ("HOOK_KEYFRAMES",)
521
+ RETURN_NAMES = ("HOOK_KF",)
522
+ CATEGORY = "advanced/hooks/scheduling"
523
+ FUNCTION = "create_hook_keyframes"
524
+
525
+ def create_hook_keyframes(self, strength_start: float, strength_end: float, interpolation: str,
526
+ start_percent: float, end_percent: float, keyframes_count: int,
527
+ print_keyframes=False, prev_hook_kf: ldm_patched.hooks.HookKeyframeGroup=None):
528
+ if prev_hook_kf is None:
529
+ prev_hook_kf = ldm_patched.hooks.HookKeyframeGroup()
530
+ prev_hook_kf = prev_hook_kf.clone()
531
+ percents = ldm_patched.hooks.InterpolationMethod.get_weights(num_from=start_percent, num_to=end_percent, length=keyframes_count,
532
+ method=ldm_patched.hooks.InterpolationMethod.LINEAR)
533
+ strengths = ldm_patched.hooks.InterpolationMethod.get_weights(num_from=strength_start, num_to=strength_end, length=keyframes_count, method=interpolation)
534
+
535
+ is_first = True
536
+ for percent, strength in zip(percents, strengths):
537
+ guarantee_steps = 0
538
+ if is_first:
539
+ guarantee_steps = 1
540
+ is_first = False
541
+ prev_hook_kf.add(ldm_patched.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps))
542
+ if print_keyframes:
543
+ logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}")
544
+ return (prev_hook_kf,)
545
+
546
+ class CreateHookKeyframesFromFloats:
547
+ NodeId = 'CreateHookKeyframesFromFloats'
548
+ NodeName = 'Create Hook Keyframes From Floats'
549
+ @classmethod
550
+ def INPUT_TYPES(s):
551
+ return {
552
+ "required": {
553
+ "floats_strength": ("FLOATS", {"default": -1, "min": -1, "step": 0.001, "forceInput": True}),
554
+ "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
555
+ "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
556
+ "print_keyframes": ("BOOLEAN", {"default": False}),
557
+ },
558
+ "optional": {
559
+ "prev_hook_kf": ("HOOK_KEYFRAMES",),
560
+ }
561
+ }
562
+
563
+ EXPERIMENTAL = True
564
+ RETURN_TYPES = ("HOOK_KEYFRAMES",)
565
+ RETURN_NAMES = ("HOOK_KF",)
566
+ CATEGORY = "advanced/hooks/scheduling"
567
+ FUNCTION = "create_hook_keyframes"
568
+
569
+ def create_hook_keyframes(self, floats_strength: Union[float, list[float]],
570
+ start_percent: float, end_percent: float,
571
+ prev_hook_kf: ldm_patched.hooks.HookKeyframeGroup=None, print_keyframes=False):
572
+ if prev_hook_kf is None:
573
+ prev_hook_kf = ldm_patched.hooks.HookKeyframeGroup()
574
+ prev_hook_kf = prev_hook_kf.clone()
575
+ if type(floats_strength) in (float, int):
576
+ floats_strength = [float(floats_strength)]
577
+ elif isinstance(floats_strength, Iterable):
578
+ pass
579
+ else:
580
+ raise Exception(f"floats_strength must be either an iterable input or a float, but was{type(floats_strength).__repr__}.")
581
+ percents = ldm_patched.hooks.InterpolationMethod.get_weights(num_from=start_percent, num_to=end_percent, length=len(floats_strength),
582
+ method=ldm_patched.hooks.InterpolationMethod.LINEAR)
583
+
584
+ is_first = True
585
+ for percent, strength in zip(percents, floats_strength):
586
+ guarantee_steps = 0
587
+ if is_first:
588
+ guarantee_steps = 1
589
+ is_first = False
590
+ prev_hook_kf.add(ldm_patched.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps))
591
+ if print_keyframes:
592
+ logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}")
593
+ return (prev_hook_kf,)
594
+ #------------------------------------------
595
+ ###########################################
596
+
597
+
598
+ class SetModelHooksOnCond:
599
+ @classmethod
600
+ def INPUT_TYPES(s):
601
+ return {
602
+ "required": {
603
+ "conditioning": ("CONDITIONING",),
604
+ "hooks": ("HOOKS",),
605
+ },
606
+ }
607
+
608
+ EXPERIMENTAL = True
609
+ RETURN_TYPES = ("CONDITIONING",)
610
+ CATEGORY = "advanced/hooks/manual"
611
+ FUNCTION = "attach_hook"
612
+
613
+ def attach_hook(self, conditioning, hooks: ldm_patched.hooks.HookGroup):
614
+ return (ldm_patched.hooks.set_hooks_for_conditioning(conditioning, hooks),)
615
+
616
+
617
+ ###########################################
618
+ # Combine Hooks
619
+ #------------------------------------------
620
+ class CombineHooks:
621
+ NodeId = 'CombineHooks2'
622
+ NodeName = 'Combine Hooks [2]'
623
+ @classmethod
624
+ def INPUT_TYPES(s):
625
+ return {
626
+ "required": {
627
+ },
628
+ "optional": {
629
+ "hooks_A": ("HOOKS",),
630
+ "hooks_B": ("HOOKS",),
631
+ }
632
+ }
633
+
634
+ EXPERIMENTAL = True
635
+ RETURN_TYPES = ("HOOKS",)
636
+ CATEGORY = "advanced/hooks/combine"
637
+ FUNCTION = "combine_hooks"
638
+
639
+ def combine_hooks(self,
640
+ hooks_A: ldm_patched.hooks.HookGroup=None,
641
+ hooks_B: ldm_patched.hooks.HookGroup=None):
642
+ candidates = [hooks_A, hooks_B]
643
+ return (ldm_patched.hooks.HookGroup.combine_all_hooks(candidates),)
644
+
645
+ class CombineHooksFour:
646
+ NodeId = 'CombineHooks4'
647
+ NodeName = 'Combine Hooks [4]'
648
+ @classmethod
649
+ def INPUT_TYPES(s):
650
+ return {
651
+ "required": {
652
+ },
653
+ "optional": {
654
+ "hooks_A": ("HOOKS",),
655
+ "hooks_B": ("HOOKS",),
656
+ "hooks_C": ("HOOKS",),
657
+ "hooks_D": ("HOOKS",),
658
+ }
659
+ }
660
+
661
+ EXPERIMENTAL = True
662
+ RETURN_TYPES = ("HOOKS",)
663
+ CATEGORY = "advanced/hooks/combine"
664
+ FUNCTION = "combine_hooks"
665
+
666
+ def combine_hooks(self,
667
+ hooks_A: ldm_patched.hooks.HookGroup=None,
668
+ hooks_B: ldm_patched.hooks.HookGroup=None,
669
+ hooks_C: ldm_patched.hooks.HookGroup=None,
670
+ hooks_D: ldm_patched.hooks.HookGroup=None):
671
+ candidates = [hooks_A, hooks_B, hooks_C, hooks_D]
672
+ return (ldm_patched.hooks.HookGroup.combine_all_hooks(candidates),)
673
+
674
+ class CombineHooksEight:
675
+ NodeId = 'CombineHooks8'
676
+ NodeName = 'Combine Hooks [8]'
677
+ @classmethod
678
+ def INPUT_TYPES(s):
679
+ return {
680
+ "required": {
681
+ },
682
+ "optional": {
683
+ "hooks_A": ("HOOKS",),
684
+ "hooks_B": ("HOOKS",),
685
+ "hooks_C": ("HOOKS",),
686
+ "hooks_D": ("HOOKS",),
687
+ "hooks_E": ("HOOKS",),
688
+ "hooks_F": ("HOOKS",),
689
+ "hooks_G": ("HOOKS",),
690
+ "hooks_H": ("HOOKS",),
691
+ }
692
+ }
693
+
694
+ EXPERIMENTAL = True
695
+ RETURN_TYPES = ("HOOKS",)
696
+ CATEGORY = "advanced/hooks/combine"
697
+ FUNCTION = "combine_hooks"
698
+
699
+ def combine_hooks(self,
700
+ hooks_A: ldm_patched.hooks.HookGroup=None,
701
+ hooks_B: ldm_patched.hooks.HookGroup=None,
702
+ hooks_C: ldm_patched.hooks.HookGroup=None,
703
+ hooks_D: ldm_patched.hooks.HookGroup=None,
704
+ hooks_E: ldm_patched.hooks.HookGroup=None,
705
+ hooks_F: ldm_patched.hooks.HookGroup=None,
706
+ hooks_G: ldm_patched.hooks.HookGroup=None,
707
+ hooks_H: ldm_patched.hooks.HookGroup=None):
708
+ candidates = [hooks_A, hooks_B, hooks_C, hooks_D, hooks_E, hooks_F, hooks_G, hooks_H]
709
+ return (ldm_patched.hooks.HookGroup.combine_all_hooks(candidates),)
710
+ #------------------------------------------
711
+ ###########################################
712
+
713
+ node_list = [
714
+ # Create
715
+ CreateHookLora,
716
+ CreateHookLoraModelOnly,
717
+ CreateHookModelAsLora,
718
+ CreateHookModelAsLoraModelOnly,
719
+ # Scheduling
720
+ SetHookKeyframes,
721
+ CreateHookKeyframe,
722
+ CreateHookKeyframesInterpolated,
723
+ CreateHookKeyframesFromFloats,
724
+ # Combine
725
+ CombineHooks,
726
+ CombineHooksFour,
727
+ CombineHooksEight,
728
+ # Attach
729
+ ConditioningSetProperties,
730
+ ConditioningSetPropertiesAndCombine,
731
+ PairConditioningSetProperties,
732
+ PairConditioningSetPropertiesAndCombine,
733
+ ConditioningSetDefaultAndCombine,
734
+ PairConditioningSetDefaultAndCombine,
735
+ PairConditioningCombine,
736
+ SetClipHooks,
737
+ # Other
738
+ ConditioningTimestepsRange,
739
+ ]
740
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
741
+ NODE_CLASS_MAPPINGS = {}
742
+ NODE_DISPLAY_NAME_MAPPINGS = {}
743
+
744
+ for node in node_list:
745
+ NODE_CLASS_MAPPINGS[node.NodeId] = node
746
+ NODE_DISPLAY_NAME_MAPPINGS[node.NodeId] = node.NodeName
ldm_patched/contrib/nodes_hunyuan.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nodes
2
+ import ldm_patched.contrib.node_helpers
3
+ import torch
4
+ import ldm_patched.modules.model_management
5
+
6
+
7
+ class CLIPTextEncodeHunyuanDiT:
8
+ @classmethod
9
+ def INPUT_TYPES(s):
10
+ return {"required": {
11
+ "clip": ("CLIP", ),
12
+ "bert": ("STRING", {"multiline": True, "dynamicPrompts": True}),
13
+ "mt5xl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
14
+ }}
15
+ RETURN_TYPES = ("CONDITIONING",)
16
+ FUNCTION = "encode"
17
+
18
+ CATEGORY = "advanced/conditioning"
19
+
20
+ def encode(self, clip, bert, mt5xl):
21
+ tokens = clip.tokenize(bert)
22
+ tokens["mt5xl"] = clip.tokenize(mt5xl)["mt5xl"]
23
+
24
+ return (clip.encode_from_tokens_scheduled(tokens), )
25
+
26
+ class EmptyHunyuanLatentVideo:
27
+ @classmethod
28
+ def INPUT_TYPES(s):
29
+ return {"required": { "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
30
+ "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
31
+ "length": ("INT", {"default": 25, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}),
32
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
33
+ RETURN_TYPES = ("LATENT",)
34
+ FUNCTION = "generate"
35
+
36
+ CATEGORY = "latent/video"
37
+
38
+ def generate(self, width, height, length, batch_size=1):
39
+ latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=ldm_patched.modules.model_management.intermediate_device())
40
+ return ({"samples":latent}, )
41
+
42
+ PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = (
43
+ "<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: "
44
+ "1. The main content and theme of the video."
45
+ "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
46
+ "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
47
+ "4. background environment, light, style and atmosphere."
48
+ "5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n"
49
+ "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
50
+ "<|start_header_id|>assistant<|end_header_id|>\n\n"
51
+ )
52
+
53
+ class TextEncodeHunyuanVideo_ImageToVideo:
54
+ @classmethod
55
+ def INPUT_TYPES(s):
56
+ return {"required": {
57
+ "clip": ("CLIP", ),
58
+ "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
59
+ "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
60
+ "image_interleave": ("INT", {"default": 2, "min": 1, "max": 512, "tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt."}),
61
+ }}
62
+ RETURN_TYPES = ("CONDITIONING",)
63
+ FUNCTION = "encode"
64
+
65
+ CATEGORY = "advanced/conditioning"
66
+
67
+ def encode(self, clip, clip_vision_output, prompt, image_interleave):
68
+ tokens = clip.tokenize(prompt, llama_template=PROMPT_TEMPLATE_ENCODE_VIDEO_I2V, image_embeds=clip_vision_output.mm_projected, image_interleave=image_interleave)
69
+ return (clip.encode_from_tokens_scheduled(tokens), )
70
+
71
+ class HunyuanImageToVideo:
72
+ @classmethod
73
+ def INPUT_TYPES(s):
74
+ return {"required": {"positive": ("CONDITIONING", ),
75
+ "vae": ("VAE", ),
76
+ "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
77
+ "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
78
+ "length": ("INT", {"default": 53, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}),
79
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
80
+ "guidance_type": (["v1 (concat)", "v2 (replace)", "custom"], )
81
+ },
82
+ "optional": {"start_image": ("IMAGE", ),
83
+ }}
84
+
85
+ RETURN_TYPES = ("CONDITIONING", "LATENT")
86
+ RETURN_NAMES = ("positive", "latent")
87
+ FUNCTION = "encode"
88
+
89
+ CATEGORY = "conditioning/video_models"
90
+
91
+ def encode(self, positive, vae, width, height, length, batch_size, guidance_type, start_image=None):
92
+ latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=ldm_patched.modules.model_management.intermediate_device())
93
+ out_latent = {}
94
+
95
+ if start_image is not None:
96
+ start_image = ldm_patched.modules.utils.common_upscale(start_image[:length, :, :, :3].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
97
+
98
+ concat_latent_image = vae.encode(start_image)
99
+ mask = torch.ones((1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=start_image.device, dtype=start_image.dtype)
100
+ mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0
101
+
102
+ if guidance_type == "v1 (concat)":
103
+ cond = {"concat_latent_image": concat_latent_image, "concat_mask": mask}
104
+ elif guidance_type == "v2 (replace)":
105
+ cond = {'guiding_frame_index': 0}
106
+ latent[:, :, :concat_latent_image.shape[2]] = concat_latent_image
107
+ out_latent["noise_mask"] = mask
108
+ elif guidance_type == "custom":
109
+ cond = {"ref_latent": concat_latent_image}
110
+
111
+ positive = ldm_patched.contrib.node_helpers.conditioning_set_values(positive, cond)
112
+
113
+ out_latent["samples"] = latent
114
+ return (positive, out_latent)
115
+
116
+
117
+
118
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
119
+ NODE_CLASS_MAPPINGS = {
120
+ "CLIPTextEncodeHunyuanDiT": CLIPTextEncodeHunyuanDiT,
121
+ "TextEncodeHunyuanVideo_ImageToVideo": TextEncodeHunyuanVideo_ImageToVideo,
122
+ "EmptyHunyuanLatentVideo": EmptyHunyuanLatentVideo,
123
+ "HunyuanImageToVideo": HunyuanImageToVideo,
124
+ }
ldm_patched/contrib/nodes_hunyuan3d.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import json
4
+ import struct
5
+ import numpy as np
6
+ from ldm_patched.ldm.modules.diffusionmodules.mmdit import get_1d_sincos_pos_embed_from_grid_torch
7
+ import folder_paths
8
+ import ldm_patched.modules.model_management
9
+ from ldm_patched.modules.args_parser import args
10
+
11
+
12
+ class EmptyLatentHunyuan3Dv2:
13
+ @classmethod
14
+ def INPUT_TYPES(s):
15
+ return {"required": {"resolution": ("INT", {"default": 3072, "min": 1, "max": 8192}),
16
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}),
17
+ }}
18
+ RETURN_TYPES = ("LATENT",)
19
+ FUNCTION = "generate"
20
+
21
+ CATEGORY = "latent/3d"
22
+
23
+ def generate(self, resolution, batch_size):
24
+ latent = torch.zeros([batch_size, 64, resolution], device=ldm_patched.modules.model_management.intermediate_device())
25
+ return ({"samples": latent, "type": "hunyuan3dv2"}, )
26
+
27
+
28
+ class Hunyuan3Dv2Conditioning:
29
+ @classmethod
30
+ def INPUT_TYPES(s):
31
+ return {"required": {"clip_vision_output": ("CLIP_VISION_OUTPUT",),
32
+ }}
33
+
34
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
35
+ RETURN_NAMES = ("positive", "negative")
36
+
37
+ FUNCTION = "encode"
38
+
39
+ CATEGORY = "conditioning/video_models"
40
+
41
+ def encode(self, clip_vision_output):
42
+ embeds = clip_vision_output.last_hidden_state
43
+ positive = [[embeds, {}]]
44
+ negative = [[torch.zeros_like(embeds), {}]]
45
+ return (positive, negative)
46
+
47
+
48
+ class Hunyuan3Dv2ConditioningMultiView:
49
+ @classmethod
50
+ def INPUT_TYPES(s):
51
+ return {"required": {},
52
+ "optional": {"front": ("CLIP_VISION_OUTPUT",),
53
+ "left": ("CLIP_VISION_OUTPUT",),
54
+ "back": ("CLIP_VISION_OUTPUT",),
55
+ "right": ("CLIP_VISION_OUTPUT",), }}
56
+
57
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
58
+ RETURN_NAMES = ("positive", "negative")
59
+
60
+ FUNCTION = "encode"
61
+
62
+ CATEGORY = "conditioning/video_models"
63
+
64
+ def encode(self, front=None, left=None, back=None, right=None):
65
+ all_embeds = [front, left, back, right]
66
+ out = []
67
+ pos_embeds = None
68
+ for i, e in enumerate(all_embeds):
69
+ if e is not None:
70
+ if pos_embeds is None:
71
+ pos_embeds = get_1d_sincos_pos_embed_from_grid_torch(e.last_hidden_state.shape[-1], torch.arange(4))
72
+ out.append(e.last_hidden_state + pos_embeds[i].reshape(1, 1, -1))
73
+
74
+ embeds = torch.cat(out, dim=1)
75
+ positive = [[embeds, {}]]
76
+ negative = [[torch.zeros_like(embeds), {}]]
77
+ return (positive, negative)
78
+
79
+
80
+ class VOXEL:
81
+ def __init__(self, data):
82
+ self.data = data
83
+
84
+
85
+ class VAEDecodeHunyuan3D:
86
+ @classmethod
87
+ def INPUT_TYPES(s):
88
+ return {"required": {"samples": ("LATENT", ),
89
+ "vae": ("VAE", ),
90
+ "num_chunks": ("INT", {"default": 8000, "min": 1000, "max": 500000}),
91
+ "octree_resolution": ("INT", {"default": 256, "min": 16, "max": 512}),
92
+ }}
93
+ RETURN_TYPES = ("VOXEL",)
94
+ FUNCTION = "decode"
95
+
96
+ CATEGORY = "latent/3d"
97
+
98
+ def decode(self, vae, samples, num_chunks, octree_resolution):
99
+ voxels = VOXEL(vae.decode(samples["samples"], vae_options={"num_chunks": num_chunks, "octree_resolution": octree_resolution}))
100
+ return (voxels, )
101
+
102
+
103
+ def voxel_to_mesh(voxels, threshold=0.5, device=None):
104
+ if device is None:
105
+ device = torch.device("cpu")
106
+ voxels = voxels.to(device)
107
+
108
+ binary = (voxels > threshold).float()
109
+ padded = torch.nn.functional.pad(binary, (1, 1, 1, 1, 1, 1), 'constant', 0)
110
+
111
+ D, H, W = binary.shape
112
+
113
+ neighbors = torch.tensor([
114
+ [0, 0, 1],
115
+ [0, 0, -1],
116
+ [0, 1, 0],
117
+ [0, -1, 0],
118
+ [1, 0, 0],
119
+ [-1, 0, 0]
120
+ ], device=device)
121
+
122
+ z, y, x = torch.meshgrid(
123
+ torch.arange(D, device=device),
124
+ torch.arange(H, device=device),
125
+ torch.arange(W, device=device),
126
+ indexing='ij'
127
+ )
128
+ voxel_indices = torch.stack([z.flatten(), y.flatten(), x.flatten()], dim=1)
129
+
130
+ solid_mask = binary.flatten() > 0
131
+ solid_indices = voxel_indices[solid_mask]
132
+
133
+ corner_offsets = [
134
+ torch.tensor([
135
+ [0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]
136
+ ], device=device),
137
+ torch.tensor([
138
+ [0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]
139
+ ], device=device),
140
+ torch.tensor([
141
+ [0, 1, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1]
142
+ ], device=device),
143
+ torch.tensor([
144
+ [0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]
145
+ ], device=device),
146
+ torch.tensor([
147
+ [1, 0, 1], [1, 1, 1], [1, 1, 0], [1, 0, 0]
148
+ ], device=device),
149
+ torch.tensor([
150
+ [0, 1, 0], [0, 1, 1], [0, 0, 1], [0, 0, 0]
151
+ ], device=device)
152
+ ]
153
+
154
+ all_vertices = []
155
+ all_indices = []
156
+
157
+ vertex_count = 0
158
+
159
+ for face_idx, offset in enumerate(neighbors):
160
+ neighbor_indices = solid_indices + offset
161
+
162
+ padded_indices = neighbor_indices + 1
163
+
164
+ is_exposed = padded[
165
+ padded_indices[:, 0],
166
+ padded_indices[:, 1],
167
+ padded_indices[:, 2]
168
+ ] == 0
169
+
170
+ if not is_exposed.any():
171
+ continue
172
+
173
+ exposed_indices = solid_indices[is_exposed]
174
+
175
+ corners = corner_offsets[face_idx].unsqueeze(0)
176
+
177
+ face_vertices = exposed_indices.unsqueeze(1) + corners
178
+
179
+ all_vertices.append(face_vertices.reshape(-1, 3))
180
+
181
+ num_faces = exposed_indices.shape[0]
182
+ face_indices = torch.arange(
183
+ vertex_count,
184
+ vertex_count + 4 * num_faces,
185
+ device=device
186
+ ).reshape(-1, 4)
187
+
188
+ all_indices.append(torch.stack([face_indices[:, 0], face_indices[:, 1], face_indices[:, 2]], dim=1))
189
+ all_indices.append(torch.stack([face_indices[:, 0], face_indices[:, 2], face_indices[:, 3]], dim=1))
190
+
191
+ vertex_count += 4 * num_faces
192
+
193
+ if len(all_vertices) > 0:
194
+ vertices = torch.cat(all_vertices, dim=0)
195
+ faces = torch.cat(all_indices, dim=0)
196
+ else:
197
+ vertices = torch.zeros((1, 3))
198
+ faces = torch.zeros((1, 3))
199
+
200
+ v_min = 0
201
+ v_max = max(voxels.shape)
202
+
203
+ vertices = vertices - (v_min + v_max) / 2
204
+
205
+ scale = (v_max - v_min) / 2
206
+ if scale > 0:
207
+ vertices = vertices / scale
208
+
209
+ vertices = torch.fliplr(vertices)
210
+ return vertices, faces
211
+
212
+ def voxel_to_mesh_surfnet(voxels, threshold=0.5, device=None):
213
+ if device is None:
214
+ device = torch.device("cpu")
215
+ voxels = voxels.to(device)
216
+
217
+ D, H, W = voxels.shape
218
+
219
+ padded = torch.nn.functional.pad(voxels, (1, 1, 1, 1, 1, 1), 'constant', 0)
220
+ z, y, x = torch.meshgrid(
221
+ torch.arange(D, device=device),
222
+ torch.arange(H, device=device),
223
+ torch.arange(W, device=device),
224
+ indexing='ij'
225
+ )
226
+ cell_positions = torch.stack([z.flatten(), y.flatten(), x.flatten()], dim=1)
227
+
228
+ corner_offsets = torch.tensor([
229
+ [0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0],
230
+ [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]
231
+ ], device=device)
232
+
233
+ corner_values = torch.zeros((cell_positions.shape[0], 8), device=device)
234
+ for c, (dz, dy, dx) in enumerate(corner_offsets):
235
+ corner_values[:, c] = padded[
236
+ cell_positions[:, 0] + dz,
237
+ cell_positions[:, 1] + dy,
238
+ cell_positions[:, 2] + dx
239
+ ]
240
+
241
+ corner_signs = corner_values > threshold
242
+ has_inside = torch.any(corner_signs, dim=1)
243
+ has_outside = torch.any(~corner_signs, dim=1)
244
+ contains_surface = has_inside & has_outside
245
+
246
+ active_cells = cell_positions[contains_surface]
247
+ active_signs = corner_signs[contains_surface]
248
+ active_values = corner_values[contains_surface]
249
+
250
+ if active_cells.shape[0] == 0:
251
+ return torch.zeros((0, 3), device=device), torch.zeros((0, 3), dtype=torch.long, device=device)
252
+
253
+ edges = torch.tensor([
254
+ [0, 1], [0, 2], [0, 4], [1, 3],
255
+ [1, 5], [2, 3], [2, 6], [3, 7],
256
+ [4, 5], [4, 6], [5, 7], [6, 7]
257
+ ], device=device)
258
+
259
+ cell_vertices = {}
260
+ progress = ldm_patched.modules.utils.ProgressBar(100)
261
+
262
+ for edge_idx, (e1, e2) in enumerate(edges):
263
+ progress.update(1)
264
+ crossing = active_signs[:, e1] != active_signs[:, e2]
265
+ if not crossing.any():
266
+ continue
267
+
268
+ cell_indices = torch.nonzero(crossing, as_tuple=True)[0]
269
+
270
+ v1 = active_values[cell_indices, e1]
271
+ v2 = active_values[cell_indices, e2]
272
+
273
+ t = torch.zeros_like(v1, device=device)
274
+ denom = v2 - v1
275
+ valid = denom != 0
276
+ t[valid] = (threshold - v1[valid]) / denom[valid]
277
+ t[~valid] = 0.5
278
+
279
+ p1 = corner_offsets[e1].float()
280
+ p2 = corner_offsets[e2].float()
281
+
282
+ intersection = p1.unsqueeze(0) + t.unsqueeze(1) * (p2.unsqueeze(0) - p1.unsqueeze(0))
283
+
284
+ for i, point in zip(cell_indices.tolist(), intersection):
285
+ if i not in cell_vertices:
286
+ cell_vertices[i] = []
287
+ cell_vertices[i].append(point)
288
+
289
+ # Calculate the final vertices as the average of intersection points for each cell
290
+ vertices = []
291
+ vertex_lookup = {}
292
+
293
+ vert_progress_mod = round(len(cell_vertices)/50)
294
+
295
+ for i, points in cell_vertices.items():
296
+ if not i % vert_progress_mod:
297
+ progress.update(1)
298
+
299
+ if points:
300
+ vertex = torch.stack(points).mean(dim=0)
301
+ vertex = vertex + active_cells[i].float()
302
+ vertex_lookup[tuple(active_cells[i].tolist())] = len(vertices)
303
+ vertices.append(vertex)
304
+
305
+ if not vertices:
306
+ return torch.zeros((0, 3), device=device), torch.zeros((0, 3), dtype=torch.long, device=device)
307
+
308
+ final_vertices = torch.stack(vertices)
309
+
310
+ inside_corners_mask = active_signs
311
+ outside_corners_mask = ~active_signs
312
+
313
+ inside_counts = inside_corners_mask.sum(dim=1, keepdim=True).float()
314
+ outside_counts = outside_corners_mask.sum(dim=1, keepdim=True).float()
315
+
316
+ inside_pos = torch.zeros((active_cells.shape[0], 3), device=device)
317
+ outside_pos = torch.zeros((active_cells.shape[0], 3), device=device)
318
+
319
+ for i in range(8):
320
+ mask_inside = inside_corners_mask[:, i].unsqueeze(1)
321
+ mask_outside = outside_corners_mask[:, i].unsqueeze(1)
322
+ inside_pos += corner_offsets[i].float().unsqueeze(0) * mask_inside
323
+ outside_pos += corner_offsets[i].float().unsqueeze(0) * mask_outside
324
+
325
+ inside_pos /= inside_counts
326
+ outside_pos /= outside_counts
327
+ gradients = inside_pos - outside_pos
328
+
329
+ pos_dirs = torch.tensor([
330
+ [1, 0, 0],
331
+ [0, 1, 0],
332
+ [0, 0, 1]
333
+ ], device=device)
334
+
335
+ cross_products = [
336
+ torch.linalg.cross(pos_dirs[i].float(), pos_dirs[j].float())
337
+ for i in range(3) for j in range(i+1, 3)
338
+ ]
339
+
340
+ faces = []
341
+ all_keys = set(vertex_lookup.keys())
342
+
343
+ face_progress_mod = round(len(active_cells)/38*3)
344
+
345
+ for pair_idx, (i, j) in enumerate([(0,1), (0,2), (1,2)]):
346
+ dir_i = pos_dirs[i]
347
+ dir_j = pos_dirs[j]
348
+ cross_product = cross_products[pair_idx]
349
+
350
+ ni_positions = active_cells + dir_i
351
+ nj_positions = active_cells + dir_j
352
+ diag_positions = active_cells + dir_i + dir_j
353
+
354
+ alignments = torch.matmul(gradients, cross_product)
355
+
356
+ valid_quads = []
357
+ quad_indices = []
358
+
359
+ for idx, active_cell in enumerate(active_cells):
360
+ if not idx % face_progress_mod:
361
+ progress.update(1)
362
+ cell_key = tuple(active_cell.tolist())
363
+ ni_key = tuple(ni_positions[idx].tolist())
364
+ nj_key = tuple(nj_positions[idx].tolist())
365
+ diag_key = tuple(diag_positions[idx].tolist())
366
+
367
+ if cell_key in all_keys and ni_key in all_keys and nj_key in all_keys and diag_key in all_keys:
368
+ v0 = vertex_lookup[cell_key]
369
+ v1 = vertex_lookup[ni_key]
370
+ v2 = vertex_lookup[nj_key]
371
+ v3 = vertex_lookup[diag_key]
372
+
373
+ valid_quads.append((v0, v1, v2, v3))
374
+ quad_indices.append(idx)
375
+
376
+ for q_idx, (v0, v1, v2, v3) in enumerate(valid_quads):
377
+ cell_idx = quad_indices[q_idx]
378
+ if alignments[cell_idx] > 0:
379
+ faces.append(torch.tensor([v0, v1, v3], device=device, dtype=torch.long))
380
+ faces.append(torch.tensor([v0, v3, v2], device=device, dtype=torch.long))
381
+ else:
382
+ faces.append(torch.tensor([v0, v3, v1], device=device, dtype=torch.long))
383
+ faces.append(torch.tensor([v0, v2, v3], device=device, dtype=torch.long))
384
+
385
+ if faces:
386
+ faces = torch.stack(faces)
387
+ else:
388
+ faces = torch.zeros((0, 3), dtype=torch.long, device=device)
389
+
390
+ v_min = 0
391
+ v_max = max(D, H, W)
392
+
393
+ final_vertices = final_vertices - (v_min + v_max) / 2
394
+
395
+ scale = (v_max - v_min) / 2
396
+ if scale > 0:
397
+ final_vertices = final_vertices / scale
398
+
399
+ final_vertices = torch.fliplr(final_vertices)
400
+
401
+ return final_vertices, faces
402
+
403
+ class MESH:
404
+ def __init__(self, vertices, faces):
405
+ self.vertices = vertices
406
+ self.faces = faces
407
+
408
+
409
+ class VoxelToMeshBasic:
410
+ @classmethod
411
+ def INPUT_TYPES(s):
412
+ return {"required": {"voxel": ("VOXEL", ),
413
+ "threshold": ("FLOAT", {"default": 0.6, "min": -1.0, "max": 1.0, "step": 0.01}),
414
+ }}
415
+ RETURN_TYPES = ("MESH",)
416
+ FUNCTION = "decode"
417
+
418
+ CATEGORY = "3d"
419
+
420
+ def decode(self, voxel, threshold):
421
+ vertices = []
422
+ faces = []
423
+ for x in voxel.data:
424
+ v, f = voxel_to_mesh(x, threshold=threshold, device=None)
425
+ vertices.append(v)
426
+ faces.append(f)
427
+
428
+ return (MESH(torch.stack(vertices), torch.stack(faces)), )
429
+
430
+ class VoxelToMesh:
431
+ @classmethod
432
+ def INPUT_TYPES(s):
433
+ return {"required": {"voxel": ("VOXEL", ),
434
+ "algorithm": (["surface net", "basic"], ),
435
+ "threshold": ("FLOAT", {"default": 0.6, "min": -1.0, "max": 1.0, "step": 0.01}),
436
+ }}
437
+ RETURN_TYPES = ("MESH",)
438
+ FUNCTION = "decode"
439
+
440
+ CATEGORY = "3d"
441
+
442
+ def decode(self, voxel, algorithm, threshold):
443
+ vertices = []
444
+ faces = []
445
+
446
+ if algorithm == "basic":
447
+ mesh_function = voxel_to_mesh
448
+ elif algorithm == "surface net":
449
+ mesh_function = voxel_to_mesh_surfnet
450
+
451
+ for x in voxel.data:
452
+ v, f = mesh_function(x, threshold=threshold, device=None)
453
+ vertices.append(v)
454
+ faces.append(f)
455
+
456
+ return (MESH(torch.stack(vertices), torch.stack(faces)), )
457
+
458
+
459
+ def save_glb(vertices, faces, filepath, metadata=None):
460
+ """
461
+ Save PyTorch tensor vertices and faces as a GLB file without external dependencies.
462
+
463
+ Parameters:
464
+ vertices: torch.Tensor of shape (N, 3) - The vertex coordinates
465
+ faces: torch.Tensor of shape (M, 3) - The face indices (triangle faces)
466
+ filepath: str - Output filepath (should end with .glb)
467
+ """
468
+
469
+ # Convert tensors to numpy arrays
470
+ vertices_np = vertices.cpu().numpy().astype(np.float32)
471
+ faces_np = faces.cpu().numpy().astype(np.uint32)
472
+
473
+ vertices_buffer = vertices_np.tobytes()
474
+ indices_buffer = faces_np.tobytes()
475
+
476
+ def pad_to_4_bytes(buffer):
477
+ padding_length = (4 - (len(buffer) % 4)) % 4
478
+ return buffer + b'\x00' * padding_length
479
+
480
+ vertices_buffer_padded = pad_to_4_bytes(vertices_buffer)
481
+ indices_buffer_padded = pad_to_4_bytes(indices_buffer)
482
+
483
+ buffer_data = vertices_buffer_padded + indices_buffer_padded
484
+
485
+ vertices_byte_length = len(vertices_buffer)
486
+ vertices_byte_offset = 0
487
+ indices_byte_length = len(indices_buffer)
488
+ indices_byte_offset = len(vertices_buffer_padded)
489
+
490
+ gltf = {
491
+ "asset": {"version": "2.0", "generator": "ComfyUI"},
492
+ "buffers": [
493
+ {
494
+ "byteLength": len(buffer_data)
495
+ }
496
+ ],
497
+ "bufferViews": [
498
+ {
499
+ "buffer": 0,
500
+ "byteOffset": vertices_byte_offset,
501
+ "byteLength": vertices_byte_length,
502
+ "target": 34962 # ARRAY_BUFFER
503
+ },
504
+ {
505
+ "buffer": 0,
506
+ "byteOffset": indices_byte_offset,
507
+ "byteLength": indices_byte_length,
508
+ "target": 34963 # ELEMENT_ARRAY_BUFFER
509
+ }
510
+ ],
511
+ "accessors": [
512
+ {
513
+ "bufferView": 0,
514
+ "byteOffset": 0,
515
+ "componentType": 5126, # FLOAT
516
+ "count": len(vertices_np),
517
+ "type": "VEC3",
518
+ "max": vertices_np.max(axis=0).tolist(),
519
+ "min": vertices_np.min(axis=0).tolist()
520
+ },
521
+ {
522
+ "bufferView": 1,
523
+ "byteOffset": 0,
524
+ "componentType": 5125, # UNSIGNED_INT
525
+ "count": faces_np.size,
526
+ "type": "SCALAR"
527
+ }
528
+ ],
529
+ "meshes": [
530
+ {
531
+ "primitives": [
532
+ {
533
+ "attributes": {
534
+ "POSITION": 0
535
+ },
536
+ "indices": 1,
537
+ "mode": 4 # TRIANGLES
538
+ }
539
+ ]
540
+ }
541
+ ],
542
+ "nodes": [
543
+ {
544
+ "mesh": 0
545
+ }
546
+ ],
547
+ "scenes": [
548
+ {
549
+ "nodes": [0]
550
+ }
551
+ ],
552
+ "scene": 0
553
+ }
554
+
555
+ if metadata is not None:
556
+ gltf["asset"]["extras"] = metadata
557
+
558
+ # Convert the JSON to bytes
559
+ gltf_json = json.dumps(gltf).encode('utf8')
560
+
561
+ def pad_json_to_4_bytes(buffer):
562
+ padding_length = (4 - (len(buffer) % 4)) % 4
563
+ return buffer + b' ' * padding_length
564
+
565
+ gltf_json_padded = pad_json_to_4_bytes(gltf_json)
566
+
567
+ # Create the GLB header
568
+ # Magic glTF
569
+ glb_header = struct.pack('<4sII', b'glTF', 2, 12 + 8 + len(gltf_json_padded) + 8 + len(buffer_data))
570
+
571
+ # Create JSON chunk header (chunk type 0)
572
+ json_chunk_header = struct.pack('<II', len(gltf_json_padded), 0x4E4F534A) # "JSON" in little endian
573
+
574
+ # Create BIN chunk header (chunk type 1)
575
+ bin_chunk_header = struct.pack('<II', len(buffer_data), 0x004E4942) # "BIN\0" in little endian
576
+
577
+ # Write the GLB file
578
+ with open(filepath, 'wb') as f:
579
+ f.write(glb_header)
580
+ f.write(json_chunk_header)
581
+ f.write(gltf_json_padded)
582
+ f.write(bin_chunk_header)
583
+ f.write(buffer_data)
584
+
585
+ return filepath
586
+
587
+
588
+ class SaveGLB:
589
+ @classmethod
590
+ def INPUT_TYPES(s):
591
+ return {"required": {"mesh": ("MESH", ),
592
+ "filename_prefix": ("STRING", {"default": "mesh/ComfyUI"}), },
593
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, }
594
+
595
+ RETURN_TYPES = ()
596
+ FUNCTION = "save"
597
+
598
+ OUTPUT_NODE = True
599
+
600
+ CATEGORY = "3d"
601
+
602
+ def save(self, mesh, filename_prefix, prompt=None, extra_pnginfo=None):
603
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory())
604
+ results = []
605
+
606
+ # metadata = {}
607
+ # if not args.disable_metadata:
608
+ # if prompt is not None:
609
+ # metadata["prompt"] = json.dumps(prompt)
610
+ # if extra_pnginfo is not None:
611
+ # for x in extra_pnginfo:
612
+ # metadata[x] = json.dumps(extra_pnginfo[x])
613
+
614
+ # for i in range(mesh.vertices.shape[0]):
615
+ # f = f"{filename}_{counter:05}_.glb"
616
+ # save_glb(mesh.vertices[i], mesh.faces[i], os.path.join(full_output_folder, f), metadata)
617
+ # results.append({
618
+ # "filename": f,
619
+ # "subfolder": subfolder,
620
+ # "type": "output"
621
+ # })
622
+ # counter += 1
623
+ # return {"ui": {"3d": results}}
624
+
625
+
626
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
627
+ NODE_CLASS_MAPPINGS = {
628
+ "EmptyLatentHunyuan3Dv2": EmptyLatentHunyuan3Dv2,
629
+ "Hunyuan3Dv2Conditioning": Hunyuan3Dv2Conditioning,
630
+ "Hunyuan3Dv2ConditioningMultiView": Hunyuan3Dv2ConditioningMultiView,
631
+ "VAEDecodeHunyuan3D": VAEDecodeHunyuan3D,
632
+ "VoxelToMeshBasic": VoxelToMeshBasic,
633
+ "VoxelToMesh": VoxelToMesh,
634
+ "SaveGLB": SaveGLB,
635
+ }
ldm_patched/contrib/nodes_hypernetwork.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ import ldm_patched.modules.utils
6
+ import ldm_patched.utils.path_utils
7
+ import torch
8
+
9
+ def load_hypernetwork_patch(path, strength):
10
+ sd = ldm_patched.modules.utils.load_torch_file(path, safe_load=True)
11
+ activation_func = sd.get('activation_func', 'linear')
12
+ is_layer_norm = sd.get('is_layer_norm', False)
13
+ use_dropout = sd.get('use_dropout', False)
14
+ activate_output = sd.get('activate_output', False)
15
+ last_layer_dropout = sd.get('last_layer_dropout', False)
16
+
17
+ valid_activation = {
18
+ "linear": torch.nn.Identity,
19
+ "relu": torch.nn.ReLU,
20
+ "leakyrelu": torch.nn.LeakyReLU,
21
+ "elu": torch.nn.ELU,
22
+ "swish": torch.nn.Hardswish,
23
+ "tanh": torch.nn.Tanh,
24
+ "sigmoid": torch.nn.Sigmoid,
25
+ "softsign": torch.nn.Softsign,
26
+ "mish": torch.nn.Mish,
27
+ }
28
+
29
+ if activation_func not in valid_activation:
30
+ print("Unsupported Hypernetwork format, if you report it I might implement it.", path, " ", activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)
31
+ return None
32
+
33
+ out = {}
34
+
35
+ for d in sd:
36
+ try:
37
+ dim = int(d)
38
+ except:
39
+ continue
40
+
41
+ output = []
42
+ for index in [0, 1]:
43
+ attn_weights = sd[dim][index]
44
+ keys = attn_weights.keys()
45
+
46
+ linears = filter(lambda a: a.endswith(".weight"), keys)
47
+ linears = list(map(lambda a: a[:-len(".weight")], linears))
48
+ layers = []
49
+
50
+ i = 0
51
+ while i < len(linears):
52
+ lin_name = linears[i]
53
+ last_layer = (i == (len(linears) - 1))
54
+ penultimate_layer = (i == (len(linears) - 2))
55
+
56
+ lin_weight = attn_weights['{}.weight'.format(lin_name)]
57
+ lin_bias = attn_weights['{}.bias'.format(lin_name)]
58
+ layer = torch.nn.Linear(lin_weight.shape[1], lin_weight.shape[0])
59
+ layer.load_state_dict({"weight": lin_weight, "bias": lin_bias})
60
+ layers.append(layer)
61
+ if activation_func != "linear":
62
+ if (not last_layer) or (activate_output):
63
+ layers.append(valid_activation[activation_func]())
64
+ if is_layer_norm:
65
+ i += 1
66
+ ln_name = linears[i]
67
+ ln_weight = attn_weights['{}.weight'.format(ln_name)]
68
+ ln_bias = attn_weights['{}.bias'.format(ln_name)]
69
+ ln = torch.nn.LayerNorm(ln_weight.shape[0])
70
+ ln.load_state_dict({"weight": ln_weight, "bias": ln_bias})
71
+ layers.append(ln)
72
+ if use_dropout:
73
+ if (not last_layer) and (not penultimate_layer or last_layer_dropout):
74
+ layers.append(torch.nn.Dropout(p=0.3))
75
+ i += 1
76
+
77
+ output.append(torch.nn.Sequential(*layers))
78
+ out[dim] = torch.nn.ModuleList(output)
79
+
80
+ class hypernetwork_patch:
81
+ def __init__(self, hypernet, strength):
82
+ self.hypernet = hypernet
83
+ self.strength = strength
84
+ def __call__(self, q, k, v, extra_options):
85
+ dim = k.shape[-1]
86
+ if dim in self.hypernet:
87
+ hn = self.hypernet[dim]
88
+ k = k + hn[0](k) * self.strength
89
+ v = v + hn[1](v) * self.strength
90
+
91
+ return q, k, v
92
+
93
+ def to(self, device):
94
+ for d in self.hypernet.keys():
95
+ self.hypernet[d] = self.hypernet[d].to(device)
96
+ return self
97
+
98
+ return hypernetwork_patch(out, strength)
99
+
100
+ class HypernetworkLoader:
101
+ @classmethod
102
+ def INPUT_TYPES(s):
103
+ return {"required": { "model": ("MODEL",),
104
+ "hypernetwork_name": (ldm_patched.utils.path_utils.get_filename_list("hypernetworks"), ),
105
+ "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
106
+ }}
107
+ RETURN_TYPES = ("MODEL",)
108
+ FUNCTION = "load_hypernetwork"
109
+
110
+ CATEGORY = "loaders"
111
+
112
+ def load_hypernetwork(self, model, hypernetwork_name, strength):
113
+ hypernetwork_path = ldm_patched.utils.path_utils.get_full_path("hypernetworks", hypernetwork_name)
114
+ model_hypernetwork = model.clone()
115
+ patch = load_hypernetwork_patch(hypernetwork_path, strength)
116
+ if patch is not None:
117
+ model_hypernetwork.set_model_attn1_patch(patch)
118
+ model_hypernetwork.set_model_attn2_patch(patch)
119
+ return (model_hypernetwork,)
120
+
121
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
122
+ NODE_CLASS_MAPPINGS = {
123
+ "HypernetworkLoader": HypernetworkLoader
124
+ }
ldm_patched/contrib/nodes_hypertile.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
2
+
3
+ #Taken from: https://github.com/tfernd/HyperTile/
4
+
5
+ import math
6
+ from einops import rearrange
7
+ # Use torch rng for consistency across generations
8
+ from torch import randint
9
+
10
+ def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
11
+ min_value = min(min_value, value)
12
+
13
+ # All big divisors of value (inclusive)
14
+ divisors = [i for i in range(min_value, value + 1) if value % i == 0]
15
+
16
+ ns = [value // i for i in divisors[:max_options]] # has at least 1 element
17
+
18
+ if len(ns) - 1 > 0:
19
+ idx = randint(low=0, high=len(ns) - 1, size=(1,)).item()
20
+ else:
21
+ idx = 0
22
+
23
+ return ns[idx]
24
+
25
+ class HyperTile:
26
+ @classmethod
27
+ def INPUT_TYPES(s):
28
+ return {"required": { "model": ("MODEL",),
29
+ "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}),
30
+ "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}),
31
+ "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}),
32
+ "scale_depth": ("BOOLEAN", {"default": False}),
33
+ }}
34
+ RETURN_TYPES = ("MODEL",)
35
+ FUNCTION = "patch"
36
+
37
+ CATEGORY = "model_patches"
38
+
39
+ def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
40
+
41
+ latent_tile_size = max(32, tile_size) // 8
42
+ self.temp = None
43
+
44
+ def hypertile_in(q, k, v, extra_options):
45
+ model_chans = q.shape[-2]
46
+ orig_shape = extra_options['original_shape']
47
+ apply_to = []
48
+ for i in range(max_depth + 1):
49
+ apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
50
+
51
+ if model_chans in apply_to:
52
+ shape = extra_options["original_shape"]
53
+ aspect_ratio = shape[-1] / shape[-2]
54
+
55
+ hw = q.size(1)
56
+ h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
57
+
58
+ factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
59
+ nh = random_divisor(h, latent_tile_size * factor, swap_size)
60
+ nw = random_divisor(w, latent_tile_size * factor, swap_size)
61
+
62
+ if nh * nw > 1:
63
+ q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
64
+ self.temp = (nh, nw, h, w)
65
+ return q, k, v
66
+
67
+ return q, k, v
68
+ def hypertile_out(out, extra_options):
69
+ if self.temp is not None:
70
+ nh, nw, h, w = self.temp
71
+ self.temp = None
72
+ out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
73
+ out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
74
+ return out
75
+
76
+
77
+ m = model.clone()
78
+ m.set_model_attn1_patch(hypertile_in)
79
+ m.set_model_attn1_output_patch(hypertile_out)
80
+ return (m, )
81
+
82
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
83
+ NODE_CLASS_MAPPINGS = {
84
+ "HyperTile": HyperTile,
85
+ }
ldm_patched/contrib/nodes_images.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ import ldm_patched.contrib.nodes
6
+ import ldm_patched.utils.path_utils
7
+ from ldm_patched.modules.args_parser import args
8
+
9
+ from PIL import Image
10
+ from PIL.PngImagePlugin import PngInfo
11
+
12
+ import numpy as np
13
+ import json
14
+ import os
15
+
16
+ MAX_RESOLUTION = ldm_patched.contrib.nodes.MAX_RESOLUTION
17
+
18
+ class ImageCrop:
19
+ @classmethod
20
+ def INPUT_TYPES(s):
21
+ return {"required": { "image": ("IMAGE",),
22
+ "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
23
+ "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
24
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
25
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
26
+ }}
27
+ RETURN_TYPES = ("IMAGE",)
28
+ FUNCTION = "crop"
29
+
30
+ CATEGORY = "image/transform"
31
+
32
+ def crop(self, image, width, height, x, y):
33
+ x = min(x, image.shape[2] - 1)
34
+ y = min(y, image.shape[1] - 1)
35
+ to_x = width + x
36
+ to_y = height + y
37
+ img = image[:,y:to_y, x:to_x, :]
38
+ return (img,)
39
+
40
+ class RepeatImageBatch:
41
+ @classmethod
42
+ def INPUT_TYPES(s):
43
+ return {"required": { "image": ("IMAGE",),
44
+ "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
45
+ }}
46
+ RETURN_TYPES = ("IMAGE",)
47
+ FUNCTION = "repeat"
48
+
49
+ CATEGORY = "image/batch"
50
+
51
+ def repeat(self, image, amount):
52
+ s = image.repeat((amount, 1,1,1))
53
+ return (s,)
54
+
55
+ class SaveAnimatedWEBP:
56
+ def __init__(self):
57
+ self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
58
+ self.type = "output"
59
+ self.prefix_append = ""
60
+
61
+ methods = {"default": 4, "fastest": 0, "slowest": 6}
62
+ @classmethod
63
+ def INPUT_TYPES(s):
64
+ return {"required":
65
+ {"images": ("IMAGE", ),
66
+ "filename_prefix": ("STRING", {"default": "ldm_patched"}),
67
+ "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
68
+ "lossless": ("BOOLEAN", {"default": True}),
69
+ "quality": ("INT", {"default": 80, "min": 0, "max": 100}),
70
+ "method": (list(s.methods.keys()),),
71
+ # "num_frames": ("INT", {"default": 0, "min": 0, "max": 8192}),
72
+ },
73
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
74
+ }
75
+
76
+ RETURN_TYPES = ()
77
+ FUNCTION = "save_images"
78
+
79
+ OUTPUT_NODE = True
80
+
81
+ CATEGORY = "image/animation"
82
+
83
+ def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None):
84
+ method = self.methods.get(method)
85
+ filename_prefix += self.prefix_append
86
+ full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
87
+ results = list()
88
+ pil_images = []
89
+ for image in images:
90
+ i = 255. * image.cpu().numpy()
91
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
92
+ pil_images.append(img)
93
+
94
+ metadata = pil_images[0].getexif()
95
+ if not args.disable_server_info:
96
+ if prompt is not None:
97
+ metadata[0x0110] = "prompt:{}".format(json.dumps(prompt))
98
+ if extra_pnginfo is not None:
99
+ inital_exif = 0x010f
100
+ for x in extra_pnginfo:
101
+ metadata[inital_exif] = "{}:{}".format(x, json.dumps(extra_pnginfo[x]))
102
+ inital_exif -= 1
103
+
104
+ if num_frames == 0:
105
+ num_frames = len(pil_images)
106
+
107
+ c = len(pil_images)
108
+ for i in range(0, c, num_frames):
109
+ file = f"{filename}_{counter:05}_.webp"
110
+ pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], exif=metadata, lossless=lossless, quality=quality, method=method)
111
+ results.append({
112
+ "filename": file,
113
+ "subfolder": subfolder,
114
+ "type": self.type
115
+ })
116
+ counter += 1
117
+
118
+ animated = num_frames != 1
119
+ return { "ui": { "images": results, "animated": (animated,) } }
120
+
121
+ class SaveAnimatedPNG:
122
+ def __init__(self):
123
+ self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
124
+ self.type = "output"
125
+ self.prefix_append = ""
126
+
127
+ @classmethod
128
+ def INPUT_TYPES(s):
129
+ return {"required":
130
+ {"images": ("IMAGE", ),
131
+ "filename_prefix": ("STRING", {"default": "ldm_patched"}),
132
+ "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
133
+ "compress_level": ("INT", {"default": 4, "min": 0, "max": 9})
134
+ },
135
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
136
+ }
137
+
138
+ RETURN_TYPES = ()
139
+ FUNCTION = "save_images"
140
+
141
+ OUTPUT_NODE = True
142
+
143
+ CATEGORY = "image/animation"
144
+
145
+ def save_images(self, images, fps, compress_level, filename_prefix="ldm_patched", prompt=None, extra_pnginfo=None):
146
+ filename_prefix += self.prefix_append
147
+ full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
148
+ results = list()
149
+ pil_images = []
150
+ for image in images:
151
+ i = 255. * image.cpu().numpy()
152
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
153
+ pil_images.append(img)
154
+
155
+ metadata = None
156
+ if not args.disable_server_info:
157
+ metadata = PngInfo()
158
+ if prompt is not None:
159
+ metadata.add(b"ldm_patched", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True)
160
+ if extra_pnginfo is not None:
161
+ for x in extra_pnginfo:
162
+ metadata.add(b"ldm_patched", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True)
163
+
164
+ file = f"{filename}_{counter:05}_.png"
165
+ pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:])
166
+ results.append({
167
+ "filename": file,
168
+ "subfolder": subfolder,
169
+ "type": self.type
170
+ })
171
+
172
+ return { "ui": { "images": results, "animated": (True,)} }
173
+
174
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
175
+ NODE_CLASS_MAPPINGS = {
176
+ "ImageCrop": ImageCrop,
177
+ "RepeatImageBatch": RepeatImageBatch,
178
+ "SaveAnimatedWEBP": SaveAnimatedWEBP,
179
+ "SaveAnimatedPNG": SaveAnimatedPNG,
180
+ }
ldm_patched/contrib/nodes_ip2p.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ class InstructPixToPixConditioning:
4
+ @classmethod
5
+ def INPUT_TYPES(s):
6
+ return {"required": {"positive": ("CONDITIONING", ),
7
+ "negative": ("CONDITIONING", ),
8
+ "vae": ("VAE", ),
9
+ "pixels": ("IMAGE", ),
10
+ }}
11
+
12
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
13
+ RETURN_NAMES = ("positive", "negative", "latent")
14
+ FUNCTION = "encode"
15
+
16
+ CATEGORY = "conditioning/instructpix2pix"
17
+
18
+ def encode(self, positive, negative, pixels, vae):
19
+ x = (pixels.shape[1] // 8) * 8
20
+ y = (pixels.shape[2] // 8) * 8
21
+
22
+ if pixels.shape[1] != x or pixels.shape[2] != y:
23
+ x_offset = (pixels.shape[1] % 8) // 2
24
+ y_offset = (pixels.shape[2] % 8) // 2
25
+ pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
26
+
27
+ concat_latent = vae.encode(pixels)
28
+
29
+ out_latent = {}
30
+ out_latent["samples"] = torch.zeros_like(concat_latent)
31
+
32
+ out = []
33
+ for conditioning in [positive, negative]:
34
+ c = []
35
+ for t in conditioning:
36
+ d = t[1].copy()
37
+ d["concat_latent_image"] = concat_latent
38
+ n = [t[0], d]
39
+ c.append(n)
40
+ out.append(c)
41
+ return (out[0], out[1], out_latent)
42
+
43
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
44
+ NODE_CLASS_MAPPINGS = {
45
+ "InstructPixToPixConditioning": InstructPixToPixConditioning,
46
+ }
ldm_patched/contrib/nodes_latent.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ import ldm_patched.modules.utils
6
+ import ldm_patched.contrib.nodes_post_processing
7
+ import torch
8
+
9
+ def reshape_latent_to(target_shape, latent):
10
+ if latent.shape[1:] != target_shape[1:]:
11
+ latent = ldm_patched.modules.utils.common_upscale(latent, target_shape[3], target_shape[2], "bilinear", "center")
12
+ return ldm_patched.modules.utils.repeat_to_batch_size(latent, target_shape[0])
13
+
14
+
15
+ class LatentAdd:
16
+ @classmethod
17
+ def INPUT_TYPES(s):
18
+ return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
19
+
20
+ RETURN_TYPES = ("LATENT",)
21
+ FUNCTION = "op"
22
+
23
+ CATEGORY = "latent/advanced"
24
+
25
+ def op(self, samples1, samples2):
26
+ samples_out = samples1.copy()
27
+
28
+ s1 = samples1["samples"]
29
+ s2 = samples2["samples"]
30
+
31
+ s2 = reshape_latent_to(s1.shape, s2)
32
+ samples_out["samples"] = s1 + s2
33
+ return (samples_out,)
34
+
35
+ class LatentSubtract:
36
+ @classmethod
37
+ def INPUT_TYPES(s):
38
+ return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
39
+
40
+ RETURN_TYPES = ("LATENT",)
41
+ FUNCTION = "op"
42
+
43
+ CATEGORY = "latent/advanced"
44
+
45
+ def op(self, samples1, samples2):
46
+ samples_out = samples1.copy()
47
+
48
+ s1 = samples1["samples"]
49
+ s2 = samples2["samples"]
50
+
51
+ s2 = reshape_latent_to(s1.shape, s2)
52
+ samples_out["samples"] = s1 - s2
53
+ return (samples_out,)
54
+
55
+ class LatentMultiply:
56
+ @classmethod
57
+ def INPUT_TYPES(s):
58
+ return {"required": { "samples": ("LATENT",),
59
+ "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
60
+ }}
61
+
62
+ RETURN_TYPES = ("LATENT",)
63
+ FUNCTION = "op"
64
+
65
+ CATEGORY = "latent/advanced"
66
+
67
+ def op(self, samples, multiplier):
68
+ samples_out = samples.copy()
69
+
70
+ s1 = samples["samples"]
71
+ samples_out["samples"] = s1 * multiplier
72
+ return (samples_out,)
73
+
74
+ class LatentInterpolate:
75
+ @classmethod
76
+ def INPUT_TYPES(s):
77
+ return {"required": { "samples1": ("LATENT",),
78
+ "samples2": ("LATENT",),
79
+ "ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
80
+ }}
81
+
82
+ RETURN_TYPES = ("LATENT",)
83
+ FUNCTION = "op"
84
+
85
+ CATEGORY = "latent/advanced"
86
+
87
+ def op(self, samples1, samples2, ratio):
88
+ samples_out = samples1.copy()
89
+
90
+ s1 = samples1["samples"]
91
+ s2 = samples2["samples"]
92
+
93
+ s2 = reshape_latent_to(s1.shape, s2)
94
+
95
+ m1 = torch.linalg.vector_norm(s1, dim=(1))
96
+ m2 = torch.linalg.vector_norm(s2, dim=(1))
97
+
98
+ s1 = torch.nan_to_num(s1 / m1)
99
+ s2 = torch.nan_to_num(s2 / m2)
100
+
101
+ t = (s1 * ratio + s2 * (1.0 - ratio))
102
+ mt = torch.linalg.vector_norm(t, dim=(1))
103
+ st = torch.nan_to_num(t / mt)
104
+
105
+ samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
106
+ return (samples_out,)
107
+
108
+ class LatentBatch:
109
+ @classmethod
110
+ def INPUT_TYPES(s):
111
+ return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
112
+
113
+ RETURN_TYPES = ("LATENT",)
114
+ FUNCTION = "batch"
115
+
116
+ CATEGORY = "latent/batch"
117
+
118
+ def batch(self, samples1, samples2):
119
+ samples_out = samples1.copy()
120
+ s1 = samples1["samples"]
121
+ s2 = samples2["samples"]
122
+
123
+ if s1.shape[1:] != s2.shape[1:]:
124
+ s2 = ldm_patched.modules.utils.common_upscale(s2, s1.shape[3], s1.shape[2], "bilinear", "center")
125
+ s = torch.cat((s1, s2), dim=0)
126
+ samples_out["samples"] = s
127
+ samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
128
+ return (samples_out,)
129
+
130
+ class LatentBatchSeedBehavior:
131
+ @classmethod
132
+ def INPUT_TYPES(s):
133
+ return {"required": { "samples": ("LATENT",),
134
+ "seed_behavior": (["random", "fixed"],{"default": "fixed"}),}}
135
+
136
+ RETURN_TYPES = ("LATENT",)
137
+ FUNCTION = "op"
138
+
139
+ CATEGORY = "latent/advanced"
140
+
141
+ def op(self, samples, seed_behavior):
142
+ samples_out = samples.copy()
143
+ latent = samples["samples"]
144
+ if seed_behavior == "random":
145
+ if 'batch_index' in samples_out:
146
+ samples_out.pop('batch_index')
147
+ elif seed_behavior == "fixed":
148
+ batch_number = samples_out.get("batch_index", [0])[0]
149
+ samples_out["batch_index"] = [batch_number] * latent.shape[0]
150
+
151
+ return (samples_out,)
152
+
153
+ class LatentApplyOperation:
154
+ @classmethod
155
+ def INPUT_TYPES(s):
156
+ return {"required": { "samples": ("LATENT",),
157
+ "operation": ("LATENT_OPERATION",),
158
+ }}
159
+ RETURN_TYPES = ("LATENT",)
160
+ FUNCTION = "op"
161
+ CATEGORY = "latent/advanced/operations"
162
+ EXPERIMENTAL = True
163
+ def op(self, samples, operation):
164
+ samples_out = samples.copy()
165
+ s1 = samples["samples"]
166
+ samples_out["samples"] = operation(latent=s1)
167
+ return (samples_out,)
168
+ class LatentApplyOperationCFG:
169
+ @classmethod
170
+ def INPUT_TYPES(s):
171
+ return {"required": { "model": ("MODEL",),
172
+ "operation": ("LATENT_OPERATION",),
173
+ }}
174
+ RETURN_TYPES = ("MODEL",)
175
+ FUNCTION = "patch"
176
+ CATEGORY = "latent/advanced/operations"
177
+ EXPERIMENTAL = True
178
+ def patch(self, model, operation):
179
+ m = model.clone()
180
+ def pre_cfg_function(args):
181
+ conds_out = args["conds_out"]
182
+ if len(conds_out) == 2:
183
+ conds_out[0] = operation(latent=(conds_out[0] - conds_out[1])) + conds_out[1]
184
+ else:
185
+ conds_out[0] = operation(latent=conds_out[0])
186
+ return conds_out
187
+ m.set_model_sampler_pre_cfg_function(pre_cfg_function)
188
+ return (m, )
189
+ class LatentOperationTonemapReinhard:
190
+ @classmethod
191
+ def INPUT_TYPES(s):
192
+ return {"required": { "multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
193
+ }}
194
+ RETURN_TYPES = ("LATENT_OPERATION",)
195
+ FUNCTION = "op"
196
+ CATEGORY = "latent/advanced/operations"
197
+ EXPERIMENTAL = True
198
+ def op(self, multiplier):
199
+ def tonemap_reinhard(latent, **kwargs):
200
+ latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None]
201
+ normalized_latent = latent / latent_vector_magnitude
202
+ mean = torch.mean(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
203
+ std = torch.std(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
204
+ top = (std * 5 + mean) * multiplier
205
+ #reinhard
206
+ latent_vector_magnitude *= (1.0 / top)
207
+ new_magnitude = latent_vector_magnitude / (latent_vector_magnitude + 1.0)
208
+ new_magnitude *= top
209
+ return normalized_latent * new_magnitude
210
+ return (tonemap_reinhard,)
211
+
212
+ class LatentOperationSharpen:
213
+ @classmethod
214
+ def INPUT_TYPES(s):
215
+ return {"required": {
216
+ "sharpen_radius": ("INT", {
217
+ "default": 9,
218
+ "min": 1,
219
+ "max": 31,
220
+ "step": 1
221
+ }),
222
+ "sigma": ("FLOAT", {
223
+ "default": 1.0,
224
+ "min": 0.1,
225
+ "max": 10.0,
226
+ "step": 0.1
227
+ }),
228
+ "alpha": ("FLOAT", {
229
+ "default": 0.1,
230
+ "min": 0.0,
231
+ "max": 5.0,
232
+ "step": 0.01
233
+ }),
234
+ }}
235
+ RETURN_TYPES = ("LATENT_OPERATION",)
236
+ FUNCTION = "op"
237
+ CATEGORY = "latent/advanced/operations"
238
+ EXPERIMENTAL = True
239
+ def op(self, sharpen_radius, sigma, alpha):
240
+ def sharpen(latent, **kwargs):
241
+ luminance = (torch.linalg.vector_norm(latent, dim=(1)) + 1e-6)[:,None]
242
+ normalized_latent = latent / luminance
243
+ channels = latent.shape[1]
244
+ kernel_size = sharpen_radius * 2 + 1
245
+ kernel = ldm_patched.contrib.nodes_post_processing.gaussian_kernel(kernel_size, sigma, device=luminance.device)
246
+ center = kernel_size // 2
247
+ kernel *= alpha * -10
248
+ kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0
249
+ padded_image = torch.nn.functional.pad(normalized_latent, (sharpen_radius,sharpen_radius,sharpen_radius,sharpen_radius), 'reflect')
250
+ sharpened = torch.nn.functional.conv2d(padded_image, kernel.repeat(channels, 1, 1).unsqueeze(1), padding=kernel_size // 2, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius]
251
+ return luminance * sharpened
252
+ return (sharpen,)
253
+
254
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
255
+ NODE_CLASS_MAPPINGS = {
256
+ "LatentAdd": LatentAdd,
257
+ "LatentSubtract": LatentSubtract,
258
+ "LatentMultiply": LatentMultiply,
259
+ "LatentInterpolate": LatentInterpolate,
260
+ "LatentBatch": LatentBatch,
261
+ "LatentBatchSeedBehavior": LatentBatchSeedBehavior,
262
+ "LatentApplyOperation": LatentApplyOperation,
263
+ "LatentApplyOperationCFG": LatentApplyOperationCFG,
264
+ "LatentOperationTonemapReinhard": LatentOperationTonemapReinhard,
265
+ "LatentOperationSharpen": LatentOperationSharpen,
266
+ }
ldm_patched/contrib/nodes_load_3d.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import nodes
2
+ # import folder_paths
3
+ # import os
4
+
5
+ # from comfy.comfy_types import IO
6
+ # from comfy_api.input_impl import VideoFromFile
7
+
8
+ # from pathlib import Path
9
+
10
+
11
+ # def normalize_path(path):
12
+ # return path.replace('\\', '/')
13
+
14
+ # class Load3D():
15
+ # @classmethod
16
+ # def INPUT_TYPES(s):
17
+ # input_dir = os.path.join(folder_paths.get_input_directory(), "3d")
18
+
19
+ # os.makedirs(input_dir, exist_ok=True)
20
+
21
+ # input_path = Path(input_dir)
22
+ # base_path = Path(folder_paths.get_input_directory())
23
+
24
+ # files = [
25
+ # normalize_path(str(file_path.relative_to(base_path)))
26
+ # for file_path in input_path.rglob("*")
27
+ # if file_path.suffix.lower() in {'.gltf', '.glb', '.obj', '.fbx', '.stl'}
28
+ # ]
29
+
30
+ # return {"required": {
31
+ # "model_file": (sorted(files), {"file_upload": True}),
32
+ # "image": ("LOAD_3D", {}),
33
+ # "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
34
+ # "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
35
+ # }}
36
+
37
+ # RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO)
38
+ # RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info", "recording_video")
39
+
40
+ # FUNCTION = "process"
41
+ # EXPERIMENTAL = True
42
+
43
+ # CATEGORY = "3d"
44
+
45
+ # def process(self, model_file, image, **kwargs):
46
+ # image_path = folder_paths.get_annotated_filepath(image['image'])
47
+ # mask_path = folder_paths.get_annotated_filepath(image['mask'])
48
+ # normal_path = folder_paths.get_annotated_filepath(image['normal'])
49
+ # lineart_path = folder_paths.get_annotated_filepath(image['lineart'])
50
+
51
+ # load_image_node = nodes.LoadImage()
52
+ # output_image, ignore_mask = load_image_node.load_image(image=image_path)
53
+ # ignore_image, output_mask = load_image_node.load_image(image=mask_path)
54
+ # normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
55
+ # lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path)
56
+
57
+ # video = None
58
+
59
+ # if image['recording'] != "":
60
+ # recording_video_path = folder_paths.get_annotated_filepath(image['recording'])
61
+
62
+ # video = VideoFromFile(recording_video_path)
63
+
64
+ # return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info'], video
65
+
66
+ # class Load3DAnimation():
67
+ # @classmethod
68
+ # def INPUT_TYPES(s):
69
+ # input_dir = os.path.join(folder_paths.get_input_directory(), "3d")
70
+
71
+ # os.makedirs(input_dir, exist_ok=True)
72
+
73
+ # input_path = Path(input_dir)
74
+ # base_path = Path(folder_paths.get_input_directory())
75
+
76
+ # files = [
77
+ # normalize_path(str(file_path.relative_to(base_path)))
78
+ # for file_path in input_path.rglob("*")
79
+ # if file_path.suffix.lower() in {'.gltf', '.glb', '.fbx'}
80
+ # ]
81
+
82
+ # return {"required": {
83
+ # "model_file": (sorted(files), {"file_upload": True}),
84
+ # "image": ("LOAD_3D_ANIMATION", {}),
85
+ # "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
86
+ # "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
87
+ # }}
88
+
89
+ # RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO)
90
+ # RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info", "recording_video")
91
+
92
+ # FUNCTION = "process"
93
+ # EXPERIMENTAL = True
94
+
95
+ # CATEGORY = "3d"
96
+
97
+ # def process(self, model_file, image, **kwargs):
98
+ # image_path = folder_paths.get_annotated_filepath(image['image'])
99
+ # mask_path = folder_paths.get_annotated_filepath(image['mask'])
100
+ # normal_path = folder_paths.get_annotated_filepath(image['normal'])
101
+
102
+ # load_image_node = nodes.LoadImage()
103
+ # output_image, ignore_mask = load_image_node.load_image(image=image_path)
104
+ # ignore_image, output_mask = load_image_node.load_image(image=mask_path)
105
+ # normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
106
+
107
+ # video = None
108
+
109
+ # if image['recording'] != "":
110
+ # recording_video_path = folder_paths.get_annotated_filepath(image['recording'])
111
+
112
+ # video = VideoFromFile(recording_video_path)
113
+
114
+ # return output_image, output_mask, model_file, normal_image, image['camera_info'], video
115
+
116
+ # class Preview3D():
117
+ # @classmethod
118
+ # def INPUT_TYPES(s):
119
+ # return {"required": {
120
+ # "model_file": ("STRING", {"default": "", "multiline": False}),
121
+ # },
122
+ # "optional": {
123
+ # "camera_info": ("LOAD3D_CAMERA", {})
124
+ # }}
125
+
126
+ # OUTPUT_NODE = True
127
+ # RETURN_TYPES = ()
128
+
129
+ # CATEGORY = "3d"
130
+
131
+ # FUNCTION = "process"
132
+ # EXPERIMENTAL = True
133
+
134
+ # def process(self, model_file, **kwargs):
135
+ # camera_info = kwargs.get("camera_info", None)
136
+
137
+ # return {
138
+ # "ui": {
139
+ # "result": [model_file, camera_info]
140
+ # }
141
+ # }
142
+
143
+ # class Preview3DAnimation():
144
+ # @classmethod
145
+ # def INPUT_TYPES(s):
146
+ # return {"required": {
147
+ # "model_file": ("STRING", {"default": "", "multiline": False}),
148
+ # },
149
+ # "optional": {
150
+ # "camera_info": ("LOAD3D_CAMERA", {})
151
+ # }}
152
+
153
+ # OUTPUT_NODE = True
154
+ # RETURN_TYPES = ()
155
+
156
+ # CATEGORY = "3d"
157
+
158
+ # FUNCTION = "process"
159
+ # EXPERIMENTAL = True
160
+
161
+ # def process(self, model_file, **kwargs):
162
+ # camera_info = kwargs.get("camera_info", None)
163
+
164
+ # return {
165
+ # "ui": {
166
+ # "result": [model_file, camera_info]
167
+ # }
168
+ # }
169
+
170
+ # # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
171
+ #NODE_CLASS_MAPPINGS = {
172
+ # "Load3D": Load3D,
173
+ # "Load3DAnimation": Load3DAnimation,
174
+ # "Preview3D": Preview3D,
175
+ # "Preview3DAnimation": Preview3DAnimation
176
+ # }
177
+
178
+ # NODE_DISPLAY_NAME_MAPPINGS = {
179
+ # "Load3D": "Load 3D",
180
+ # "Load3DAnimation": "Load 3D - Animation",
181
+ # "Preview3D": "Preview 3D",
182
+ # "Preview3DAnimation": "Preview 3D - Animation"
183
+ # }
ldm_patched/contrib/nodes_lotus.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import ldm_patched.modules.model_management as mm
3
+
4
+ class LotusConditioning:
5
+ @classmethod
6
+ def INPUT_TYPES(s):
7
+ return {
8
+ "required": {
9
+ },
10
+ }
11
+
12
+ RETURN_TYPES = ("CONDITIONING",)
13
+ RETURN_NAMES = ("conditioning",)
14
+ FUNCTION = "conditioning"
15
+ CATEGORY = "conditioning/lotus"
16
+
17
+ def conditioning(self):
18
+ device = mm.get_torch_device()
19
+ #lotus uses a frozen encoder and null conditioning, i'm just inlining the results of that operation since it doesn't change
20
+ #and getting parity with the reference implementation would otherwise require inference and 800mb of tensors
21
+ prompt_embeds = torch.tensor([[[-0.3134765625, -0.447509765625, -0.00823974609375, -0.22802734375, 0.1785888671875, -0.2342529296875, -0.2188720703125, -0.0089111328125, -0.31396484375, 0.196533203125, -0.055877685546875, -0.3828125, -0.0965576171875, 0.0073394775390625, -0.284423828125, 0.07470703125, -0.086181640625, -0.211181640625, 0.0599365234375, 0.10693359375, 0.0007929801940917969, -0.78076171875, -0.382568359375, -0.1851806640625, -0.140625, -0.0936279296875, -0.1229248046875, -0.152099609375, -0.203857421875, -0.2349853515625, -0.2437744140625, -0.10858154296875, -0.08990478515625, 0.08892822265625, -0.2391357421875, -0.1611328125, -0.427978515625, -0.1336669921875, -0.27685546875, -0.1781005859375, -0.3857421875, 0.251953125, -0.055999755859375, -0.0712890625, -0.00130462646484375, 0.033477783203125, -0.26416015625, 0.07171630859375, -0.0090789794921875, -0.2025146484375, -0.2763671875, -0.09869384765625, -0.45751953125, -0.23095703125, 0.004528045654296875, -0.369140625, -0.366943359375, -0.205322265625, -0.1505126953125, -0.45166015625, -0.2059326171875, 0.0168609619140625, -0.305419921875, -0.150634765625, 0.02685546875, -0.609375, -0.019012451171875, 0.050445556640625, -0.0084381103515625, -0.31005859375, -0.184326171875, -0.15185546875, 0.06732177734375, 0.150390625, -0.10919189453125, -0.08837890625, -0.50537109375, -0.389892578125, -0.0294342041015625, -0.10491943359375, -0.187255859375, -0.43212890625, -0.328125, -1.060546875, 0.011871337890625, 0.04730224609375, -0.09521484375, -0.07452392578125, -0.29296875, -0.109130859375, -0.250244140625, -0.3828125, -0.171875, -0.03399658203125, -0.15478515625, -0.1861572265625, -0.2398681640625, 0.1053466796875, -0.22314453125, -0.1932373046875, -0.18798828125, -0.430419921875, -0.05364990234375, -0.474609375, -0.261474609375, -0.1077880859375, -0.439208984375, 0.08966064453125, -0.185302734375, -0.338134765625, -0.297119140625, -0.298583984375, -0.175537109375, -0.373291015625, -0.1397705078125, -0.260498046875, -0.383544921875, -0.09979248046875, -0.319580078125, -0.06884765625, -0.4365234375, -0.183837890625, -0.393310546875, -0.002277374267578125, 0.11236572265625, -0.260498046875, -0.2242431640625, -0.19384765625, -0.51123046875, 0.03216552734375, -0.048004150390625, -0.279052734375, -0.2978515625, -0.255615234375, 0.115478515625, -4.08984375, -0.1668701171875, -0.278076171875, -0.5712890625, -0.1385498046875, -0.244384765625, -0.41455078125, -0.244140625, -0.0677490234375, -0.141357421875, -0.11590576171875, -0.1439208984375, -0.0185394287109375, -2.490234375, -0.1549072265625, -0.2305908203125, -0.3828125, -0.1173095703125, -0.08258056640625, -0.1719970703125, -0.325439453125, -0.292724609375, -0.08154296875, -0.412353515625, -0.3115234375, -0.00832366943359375, 0.00489044189453125, -0.2236328125, -0.151123046875, -0.457275390625, -0.135009765625, -0.163330078125, -0.0819091796875, 0.06689453125, 0.0209197998046875, -0.11907958984375, -0.10369873046875, -0.2998046875, -0.478759765625, -0.07940673828125, -0.01517486572265625, -0.3017578125, -0.343994140625, -0.258544921875, -0.44775390625, -0.392822265625, -0.0255584716796875, -0.2998046875, 0.10833740234375, -0.271728515625, -0.36181640625, -0.255859375, -0.2056884765625, -0.055450439453125, 0.060516357421875, -0.45751953125, -0.2322998046875, -0.1737060546875, -0.40576171875, -0.2286376953125, -0.053070068359375, -0.0283660888671875, -0.1898193359375, -4.291534423828125e-05, -0.6591796875, -0.1717529296875, -0.479736328125, -0.1400146484375, -0.40771484375, 0.154296875, 0.003101348876953125, 0.00661468505859375, -0.2073974609375, -0.493408203125, 2.171875, -0.45361328125, -0.283935546875, -0.302001953125, -0.25146484375, -0.207275390625, -0.1524658203125, -0.72998046875, -0.08203125, 0.053192138671875, -0.2685546875, 0.1834716796875, -0.270263671875, -0.091552734375, -0.08319091796875, -0.1297607421875, -0.453857421875, 0.0687255859375, 0.0268096923828125, -0.16552734375, -0.4208984375, -0.1552734375, -0.057373046875, -0.300537109375, -0.04541015625, -0.486083984375, -0.2205810546875, -0.39013671875, 0.007488250732421875, -0.005329132080078125, -0.09759521484375, -0.1448974609375, -0.21923828125, -0.429443359375, -0.40087890625, -0.19384765625, -0.064453125, -0.0306243896484375, -0.045806884765625, -0.056793212890625, 0.119384765625, -0.2073974609375, -0.356201171875, -0.168212890625, -0.291748046875, -0.289794921875, -0.205322265625, -0.419677734375, -0.478271484375, -0.2037353515625, -0.368408203125, -0.186279296875, -0.427734375, -0.1756591796875, 0.07501220703125, -0.2457275390625, -0.03692626953125, 0.003997802734375, -5.7578125, -0.01052093505859375, -0.2305908203125, -0.2252197265625, -0.197509765625, -0.1566162109375, -0.1668701171875, -0.383056640625, -0.05413818359375, 0.12188720703125, -0.369873046875, -0.0184478759765625, -0.150146484375, -0.51123046875, -0.45947265625, -0.1561279296875, 0.060455322265625, 0.043487548828125, -0.1370849609375, -0.069091796875, -0.285888671875, -0.44482421875, -0.2374267578125, -0.2191162109375, -0.434814453125, -0.0360107421875, 0.1298828125, 0.0217742919921875, -0.51220703125, -0.13525390625, -0.09381103515625, -0.276611328125, -0.171875, -0.17138671875, -0.4443359375, -0.2178955078125, -0.269775390625, -0.38623046875, -0.31591796875, -0.42333984375, -0.280029296875, -0.255615234375, -0.17041015625, 0.06268310546875, -0.1878662109375, -0.00677490234375, -0.23583984375, -0.08795166015625, -0.2232666015625, -0.1719970703125, -0.484130859375, -0.328857421875, 0.04669189453125, -0.0419921875, -0.11114501953125, 0.02313232421875, -0.0033130645751953125, -0.6005859375, 0.09051513671875, -0.1884765625, -0.262939453125, -0.375732421875, -0.525390625, -0.1170654296875, -0.3779296875, -0.242919921875, -0.419921875, 0.0665283203125, -0.343017578125, 0.06658935546875, -0.346435546875, -0.1363525390625, -0.2000732421875, -0.3837890625, 0.028167724609375, 0.043853759765625, -0.0171051025390625, -0.477294921875, -0.107421875, -0.129150390625, -0.319580078125, -0.32177734375, -0.4951171875, -0.010589599609375, -0.1778564453125, -0.40234375, -0.0810546875, 0.03314208984375, -0.13720703125, -0.31591796875, -0.048248291015625, -0.274658203125, -0.0689697265625, -0.027130126953125, -0.0953369140625, 0.146728515625, -0.38671875, -0.025390625, -0.42333984375, -0.41748046875, -0.379638671875, -0.1978759765625, -0.533203125, -0.33544921875, 0.0694580078125, -0.322998046875, -0.1876220703125, 0.0094451904296875, 0.1839599609375, -0.254150390625, -0.30078125, -0.09228515625, -0.0885009765625, 0.12371826171875, 0.1500244140625, -0.12152099609375, -0.29833984375, 0.03924560546875, -0.1470947265625, -0.1610107421875, -0.2049560546875, -0.01708984375, -0.2470703125, -0.1522216796875, -0.25830078125, 0.10870361328125, -0.302490234375, -0.2376708984375, -0.360107421875, -0.443359375, -0.0784912109375, -0.63623046875, -0.0980224609375, -0.332275390625, -0.1749267578125, -0.30859375, -0.1968994140625, -0.250244140625, -0.447021484375, -0.18408203125, -0.006908416748046875, -0.2044677734375, -0.2548828125, -0.369140625, -0.11328125, -0.1103515625, -0.27783203125, -0.325439453125, 0.01381683349609375, 0.036773681640625, -0.1458740234375, -0.34619140625, -0.232177734375, -0.0562744140625, -0.4482421875, -0.21875, -0.0855712890625, -0.276123046875, -0.1544189453125, -0.223388671875, -0.259521484375, 0.0865478515625, -0.0038013458251953125, -0.340087890625, -0.076171875, -0.25341796875, -0.0007548332214355469, -0.060455322265625, -0.352294921875, 0.035736083984375, -0.2181396484375, -0.2318115234375, -0.1707763671875, 0.018646240234375, 0.093505859375, -0.197021484375, 0.033477783203125, -0.035247802734375, 0.0440673828125, -0.2056884765625, -0.040924072265625, -0.05865478515625, 0.056884765625, -0.08807373046875, -0.10845947265625, 0.09564208984375, -0.10888671875, -0.332275390625, -0.1119384765625, -0.115478515625, 13.0234375, 0.0030040740966796875, -0.53662109375, -0.1856689453125, -0.068115234375, -0.143798828125, -0.177978515625, -0.32666015625, -0.353515625, -0.1563720703125, -0.3203125, 0.0085906982421875, -0.1043701171875, -0.365478515625, -0.303466796875, -0.34326171875, -0.410888671875, -0.03790283203125, -0.11419677734375, -0.2939453125, 0.074462890625, -0.21826171875, 0.0242767333984375, -0.226318359375, -0.353515625, -0.177734375, -0.169189453125, -0.2423095703125, -0.12115478515625, -0.07843017578125, -0.341064453125, -0.2117919921875, -0.505859375, -0.544921875, -0.3935546875, -0.10772705078125, -0.2054443359375, -0.136474609375, -0.1796875, -0.396240234375, -0.1971435546875, -0.68408203125, -0.032684326171875, -0.03863525390625, -0.0709228515625, -0.1005859375, -0.156005859375, -0.3837890625, -0.319580078125, 0.11102294921875, -0.394287109375, 0.0799560546875, -0.50341796875, -0.1572265625, 0.004131317138671875, -0.12286376953125, -0.2347412109375, -0.29150390625, -0.10321044921875, -0.286376953125, 0.018798828125, -0.152099609375, -0.321044921875, 0.0191650390625, -0.11376953125, -0.54736328125, 0.15869140625, -0.257568359375, -0.2490234375, -0.3115234375, -0.09765625, -0.350830078125, -0.36376953125, -0.0771484375, -0.2298583984375, -0.30615234375, -0.052154541015625, -0.12091064453125, -0.40283203125, -0.1649169921875, 0.0206451416015625, -0.312744140625, -0.10308837890625, -0.50341796875, -0.1754150390625, -0.2003173828125, -0.173583984375, -0.204833984375, -0.1876220703125, -0.12176513671875, -0.06201171875, -0.03485107421875, -0.20068359375, -0.21484375, -0.246337890625, -0.006587982177734375, -0.09674072265625, -0.4658203125, -0.3994140625, -0.2210693359375, -0.09588623046875, -0.126220703125, -0.09222412109375, -0.145751953125, -0.217529296875, -0.289306640625, -0.28271484375, -0.1787109375, -0.169189453125, -0.359375, -0.21826171875, -0.043792724609375, -0.205322265625, -0.2900390625, -0.055419921875, -0.1490478515625, -0.340576171875, -0.045928955078125, -0.30517578125, -0.51123046875, -0.1046142578125, -0.349853515625, -0.10882568359375, -0.16748046875, -0.267333984375, -0.122314453125, -0.0985107421875, -0.3076171875, -0.1766357421875, -0.251708984375, 0.1964111328125, -0.2220458984375, -0.2349853515625, -0.035980224609375, -0.1749267578125, -0.237060546875, -0.480224609375, -0.240234375, -0.09539794921875, -0.2481689453125, -0.389404296875, -0.1748046875, -0.370849609375, -0.010650634765625, -0.147705078125, -0.0035457611083984375, -0.32568359375, -0.29931640625, -0.1395263671875, -0.28173828125, -0.09820556640625, -0.0176239013671875, -0.05926513671875, -0.0755615234375, -0.1746826171875, -0.283203125, -0.1617431640625, -0.4404296875, 0.046234130859375, -0.183837890625, -0.052032470703125, -0.24658203125, -0.11224365234375, -0.100830078125, -0.162841796875, -0.29736328125, -0.396484375, 0.11798095703125, -0.006496429443359375, -0.32568359375, -0.347900390625, -0.04595947265625, -0.09637451171875, -0.344970703125, -0.01166534423828125, -0.346435546875, -0.2861328125, -0.1845703125, -0.276611328125, -0.01312255859375, -0.395263671875, -0.50927734375, -0.1114501953125, -0.1861572265625, -0.2158203125, -0.1812744140625, 0.055419921875, -0.294189453125, 0.06500244140625, -0.1444091796875, -0.06365966796875, -0.18408203125, -0.0091705322265625, -0.1640625, -0.1856689453125, 0.090087890625, 0.024566650390625, -0.0195159912109375, -0.5546875, -0.301025390625, -0.438232421875, -0.072021484375, 0.030517578125, -0.1490478515625, 0.04888916015625, -0.23681640625, -0.1553955078125, -0.018096923828125, -0.229736328125, -0.2919921875, -0.355712890625, -0.285400390625, -0.1756591796875, -0.08355712890625, -0.416259765625, 0.022674560546875, -0.417236328125, 0.410400390625, -0.249755859375, 0.015625, -0.033599853515625, -0.040313720703125, -0.51708984375, -0.0518798828125, -0.08843994140625, -0.2022705078125, -0.3740234375, -0.285888671875, -0.176025390625, -0.292724609375, -0.369140625, -0.08367919921875, -0.356689453125, -0.38623046875, 0.06549072265625, 0.1669921875, -0.2099609375, -0.007434844970703125, 0.12890625, -0.0040740966796875, -0.2174072265625, -0.025115966796875, -0.2364501953125, -0.1695556640625, -0.0469970703125, -0.03924560546875, -0.36181640625, -0.047515869140625, -0.3154296875, -0.275634765625, -0.25634765625, -0.061920166015625, -0.12164306640625, -0.47314453125, -0.10784912109375, -0.74755859375, -0.13232421875, -0.32421875, -0.04998779296875, -0.286376953125, 0.10345458984375, -0.1710205078125, -0.388916015625, 0.12744140625, -0.3359375, -0.302490234375, -0.238525390625, -0.1455078125, -0.15869140625, -0.2427978515625, -0.0355224609375, -0.11944580078125, -0.31298828125, 0.11456298828125, -0.287841796875, -0.5439453125, -0.3076171875, -0.08642578125, -0.2408447265625, -0.283447265625, -0.428466796875, -0.085693359375, -0.1683349609375, 0.255126953125, 0.07635498046875, -0.38623046875, -0.2025146484375, -0.1331787109375, -0.10821533203125, -0.49951171875, 0.09130859375, -0.19677734375, -0.01904296875, -0.151123046875, -0.344482421875, -0.316650390625, -0.03900146484375, 0.1397705078125, 0.1334228515625, -0.037200927734375, -0.01861572265625, -0.1351318359375, -0.07037353515625, -0.380615234375, -0.34033203125, -0.06903076171875, 0.219970703125, 0.0132598876953125, -0.15869140625, -0.6376953125, 0.158935546875, -0.5283203125, -0.2320556640625, -0.185791015625, -0.2132568359375, -0.436767578125, -0.430908203125, -0.1763916015625, -0.0007672309875488281, -0.424072265625, -0.06719970703125, -0.347900390625, -0.14453125, -0.3056640625, -0.36474609375, -0.35986328125, -0.46240234375, -0.446044921875, -0.1905517578125, -0.1114501953125, -0.42919921875, -0.0643310546875, -0.3662109375, -0.4296875, -0.10968017578125, -0.2998046875, -0.1756591796875, -0.4052734375, -0.0841064453125, -0.252197265625, -0.047393798828125, 0.00434112548828125, -0.10040283203125, -0.271484375, -0.185302734375, -0.1910400390625, 0.10260009765625, 0.01393890380859375, -0.03350830078125, -0.33935546875, -0.329345703125, 0.0574951171875, -0.18896484375, -0.17724609375, -0.42919921875, -0.26708984375, -0.4189453125, -0.149169921875, -0.265625, -0.198974609375, -0.1722412109375, 0.1563720703125, -0.20947265625, -0.267822265625, -0.06353759765625, -0.365478515625, -0.340087890625, -0.3095703125, -0.320068359375, -0.0880126953125, -0.353759765625, -0.0005812644958496094, -0.1617431640625, -0.1866455078125, -0.201416015625, -0.181396484375, -0.2349853515625, -0.384765625, -0.5244140625, 0.01227569580078125, -0.21337890625, -0.30810546875, -0.17578125, -0.3037109375, -0.52978515625, -0.1561279296875, -0.296142578125, 0.057342529296875, -0.369384765625, -0.107666015625, -0.338623046875, -0.2060546875, -0.0213775634765625, -0.394775390625, -0.219482421875, -0.125732421875, -0.03997802734375, -0.42431640625, -0.134521484375, -0.2418212890625, -0.10504150390625, 0.1552734375, 0.1126708984375, -0.1427001953125, -0.133544921875, -0.111083984375, -0.375732421875, -0.2783203125, -0.036834716796875, -0.11053466796875, 0.2471923828125, -0.2529296875, -0.56494140625, -0.374755859375, -0.326416015625, 0.2137451171875, -0.09454345703125, -0.337158203125, -0.3359375, -0.34375, -0.0999755859375, -0.388671875, 0.0103302001953125, 0.14990234375, -0.2041015625, -0.39501953125, -0.39013671875, -0.1258544921875, 0.1453857421875, -0.250732421875, -0.06732177734375, -0.10638427734375, -0.032379150390625, -0.35888671875, -0.098876953125, -0.172607421875, 0.05126953125, -0.1956787109375, -0.183837890625, -0.37060546875, 0.1556396484375, -0.34375, -0.28662109375, -0.06982421875, -0.302490234375, -0.281005859375, -0.1640625, -0.5302734375, -0.1368408203125, -0.1268310546875, -0.35302734375, -0.1473388671875, -0.45556640625, -0.35986328125, -0.273681640625, -0.2249755859375, -0.1893310546875, 0.09356689453125, -0.248291015625, -0.197998046875, -0.3525390625, -0.30126953125, -0.228271484375, -0.2421875, -0.0906982421875, 0.227783203125, -0.296875, -0.009796142578125, -0.2939453125, -0.1021728515625, -0.215576171875, -0.267822265625, -0.052642822265625, 0.203369140625, -0.1417236328125, 0.18505859375, 0.12347412109375, -0.0972900390625, -0.54052734375, -0.430419921875, -0.0906982421875, -0.5419921875, -0.22900390625, -0.0625, -0.12152099609375, -0.495849609375, -0.206787109375, -0.025848388671875, 0.039031982421875, -0.453857421875, -0.318359375, -0.426025390625, -0.3701171875, -0.2169189453125, 0.0845947265625, -0.045654296875, 0.11090087890625, 0.0012454986572265625, 0.2066650390625, -0.046356201171875, -0.2337646484375, -0.295654296875, 0.057891845703125, -0.1639404296875, -0.0535888671875, -0.2607421875, -0.1488037109375, -0.16015625, -0.54345703125, -0.2305908203125, -0.55029296875, -0.178955078125, -0.222412109375, -0.0711669921875, -0.12298583984375, -0.119140625, -0.253662109375, -0.33984375, -0.11322021484375, -0.10723876953125, -0.205078125, -0.360595703125, 0.085205078125, -0.252197265625, -0.365966796875, -0.26953125, 0.2000732421875, -0.50634765625, 0.05706787109375, -0.3115234375, 0.0242919921875, -0.1689453125, -0.2401123046875, -0.3759765625, -0.2125244140625, 0.076416015625, -0.489013671875, -0.11749267578125, -0.55908203125, -0.313232421875, -0.572265625, -0.1387939453125, -0.037078857421875, -0.385498046875, 0.0323486328125, -0.39404296875, -0.05072021484375, -0.10430908203125, -0.10919189453125, -0.28759765625, -0.37451171875, -0.016937255859375, -0.2200927734375, -0.296875, -0.0286712646484375, -0.213134765625, 0.052001953125, -0.052337646484375, -0.253662109375, 0.07269287109375, -0.2498779296875, -0.150146484375, -0.09930419921875, -0.343505859375, 0.254150390625, -0.032440185546875, -0.296142578125], [1.4111328125, 0.00757598876953125, -0.428955078125, 0.089599609375, 0.0227813720703125, -0.0350341796875, -1.0986328125, 0.194091796875, 2.115234375, -0.75439453125, 0.269287109375, -0.73486328125, -1.1025390625, -0.050262451171875, -0.5830078125, 0.0268707275390625, -0.603515625, -0.6025390625, -1.1689453125, 0.25048828125, -0.4189453125, -0.5517578125, -0.30322265625, 0.7724609375, 0.931640625, -0.1422119140625, 2.27734375, -0.56591796875, 1.013671875, -0.9638671875, -0.66796875, -0.8125, 1.3740234375, -1.060546875, -1.029296875, -1.6796875, 0.62890625, 0.49365234375, 0.671875, 0.99755859375, -1.0185546875, -0.047027587890625, -0.374267578125, 0.2354736328125, 1.4970703125, -1.5673828125, 0.448974609375, 0.2078857421875, -1.060546875, -0.171875, -0.6201171875, -0.1607666015625, 0.7548828125, -0.58935546875, -0.2052001953125, 0.060791015625, 0.200439453125, 3.154296875, -3.87890625, 2.03515625, 1.126953125, 0.1640625, -1.8447265625, 0.002620697021484375, 0.7998046875, -0.337158203125, 0.47216796875, -0.5849609375, 0.9970703125, 0.3935546875, 1.22265625, -1.5048828125, -0.65673828125, 1.1474609375, -1.73046875, -1.8701171875, 1.529296875, -0.6787109375, -1.4453125, 1.556640625, -0.327392578125, 2.986328125, -0.146240234375, -2.83984375, 0.303466796875, -0.71728515625, -0.09698486328125, -0.2423095703125, 0.6767578125, -2.197265625, -0.86279296875, -0.53857421875, -1.2236328125, 1.669921875, -1.1689453125, -0.291259765625, -0.54736328125, -0.036346435546875, 1.041015625, -1.7265625, -0.6064453125, -0.1634521484375, 0.2381591796875, 0.65087890625, -1.169921875, 1.9208984375, 0.5634765625, 0.37841796875, 0.798828125, -1.021484375, -0.4091796875, 2.275390625, -0.302734375, -1.7783203125, 1.0458984375, 1.478515625, 0.708984375, -1.541015625, -0.0006041526794433594, 1.1884765625, 2.041015625, 0.560546875, -0.1131591796875, 1.0341796875, 0.06121826171875, 2.6796875, -0.53369140625, -1.2490234375, -0.7333984375, -1.017578125, -1.0078125, 1.3212890625, -0.47607421875, -1.4189453125, 0.54052734375, -0.796875, -0.73095703125, -1.412109375, -0.94873046875, -2.2734375, -1.1220703125, -1.3837890625, -0.5087890625, -1.0380859375, -0.93603515625, -0.58349609375, -1.0703125, -1.10546875, -2.60546875, 0.062225341796875, 0.38232421875, -0.411376953125, -0.369140625, -0.9833984375, -0.7294921875, -0.181396484375, -0.47216796875, -0.56884765625, -0.11041259765625, -2.673828125, 0.27783203125, -0.857421875, 0.9296875, 1.9580078125, 0.1385498046875, -1.91796875, -1.529296875, 0.53857421875, 0.509765625, -0.90380859375, -0.0947265625, -2.083984375, 0.9228515625, -0.28564453125, -0.80859375, -0.093505859375, -0.6015625, -1.255859375, 0.6533203125, 0.327880859375, -0.07598876953125, -0.22705078125, -0.30078125, -0.5185546875, -1.6044921875, 1.5927734375, 1.416015625, -0.91796875, -0.276611328125, -0.75830078125, -1.1689453125, -1.7421875, 1.0546875, -0.26513671875, -0.03314208984375, 0.278076171875, -1.337890625, 0.055023193359375, 0.10546875, -1.064453125, 1.048828125, -1.4052734375, -1.1240234375, -0.51416015625, -1.05859375, -1.7265625, -1.1328125, 0.43310546875, -2.576171875, -2.140625, -0.79345703125, 0.50146484375, 1.96484375, 0.98583984375, 0.337646484375, -0.77978515625, 0.85498046875, -0.65185546875, -0.484375, 2.708984375, 0.55810546875, -0.147216796875, -0.5537109375, -0.75439453125, -1.736328125, 1.1259765625, -1.095703125, -0.2587890625, 2.978515625, 0.335205078125, 0.357666015625, -0.09356689453125, 0.295654296875, -0.23779296875, 1.5751953125, 0.10400390625, 1.7001953125, -0.72900390625, -1.466796875, -0.2012939453125, 0.634765625, -0.1556396484375, -2.01171875, 0.32666015625, 0.047454833984375, -0.1671142578125, -0.78369140625, -0.994140625, 0.7802734375, -0.1429443359375, -0.115234375, 0.53271484375, -0.96142578125, -0.064208984375, 1.396484375, 1.654296875, -1.6015625, -0.77392578125, 0.276123046875, -0.42236328125, 0.8642578125, 0.533203125, 0.397216796875, -1.21484375, 0.392578125, -0.501953125, -0.231689453125, 1.474609375, 1.6669921875, 1.8662109375, -1.2998046875, 0.223876953125, -0.51318359375, -0.437744140625, -1.16796875, -0.7724609375, 1.6826171875, 0.62255859375, 2.189453125, -0.599609375, -0.65576171875, -1.1005859375, -0.45263671875, -0.292236328125, 2.58203125, -1.3779296875, 0.23486328125, -1.708984375, -1.4111328125, -0.5078125, -0.8525390625, -0.90771484375, 0.861328125, -2.22265625, -1.380859375, 0.7275390625, 0.85595703125, -0.77978515625, 2.044921875, -0.430908203125, 0.78857421875, -1.21484375, -0.09130859375, 0.5146484375, -1.92578125, -0.1396484375, 0.289306640625, 0.60498046875, 0.93896484375, -0.09295654296875, -0.45751953125, -0.986328125, -0.66259765625, 1.48046875, 0.274169921875, -0.267333984375, -1.3017578125, -1.3623046875, -1.982421875, -0.86083984375, -0.41259765625, -0.2939453125, -1.91015625, 1.6826171875, 0.437255859375, 1.0029296875, 0.376220703125, -0.010467529296875, -0.82861328125, -0.513671875, -3.134765625, 1.0205078125, -1.26171875, -1.009765625, 1.0869140625, -0.95703125, 0.0103759765625, 1.642578125, 0.78564453125, 1.029296875, 0.496826171875, 1.2880859375, 0.5234375, 0.05322265625, -0.206787109375, -0.79443359375, -1.1669921875, 0.049530029296875, -0.27978515625, 0.0237884521484375, -0.74169921875, -1.068359375, 0.86083984375, 1.1787109375, 0.91064453125, -0.453857421875, -1.822265625, -0.9228515625, -0.50048828125, 0.359130859375, 0.802734375, -1.3564453125, -0.322509765625, -1.1123046875, -1.0390625, -0.52685546875, -1.291015625, -0.343017578125, -1.2109375, -0.19091796875, 2.146484375, -0.04315185546875, -0.3701171875, -2.044921875, -0.429931640625, -0.56103515625, -0.166015625, -0.4658203125, -2.29296875, -1.078125, -1.0927734375, -0.1033935546875, -0.56103515625, -0.05743408203125, -1.986328125, -0.513671875, 0.70361328125, -2.484375, -1.3037109375, -1.6650390625, 0.4814453125, -0.84912109375, -2.697265625, -0.197998046875, 0.0869140625, -0.172607421875, -1.326171875, -1.197265625, 1.23828125, -0.38720703125, -0.075927734375, 0.02569580078125, -1.2119140625, 0.09027099609375, -2.12890625, -1.640625, -0.1524658203125, 0.2373046875, 1.37109375, 2.248046875, 1.4619140625, 0.3134765625, 0.50244140625, -0.1383056640625, -1.2705078125, 0.7353515625, 0.65771484375, -0.431396484375, -1.341796875, 0.10089111328125, 0.208984375, -0.0099945068359375, 0.83203125, 1.314453125, -0.422607421875, -1.58984375, -0.6044921875, 0.23681640625, -1.60546875, -0.61083984375, -1.5615234375, 1.62890625, -0.6728515625, -0.68212890625, -0.5224609375, -0.9150390625, -0.468994140625, 0.268310546875, 0.287353515625, -0.025543212890625, 0.443603515625, 1.62109375, -1.08984375, -0.5556640625, 1.03515625, -0.31298828125, -0.041778564453125, 0.260986328125, 0.34716796875, -2.326171875, 0.228271484375, -0.85107421875, -2.255859375, 0.3486328125, -0.25830078125, -0.3671875, -0.796875, -1.115234375, 1.8369140625, -0.19775390625, -1.236328125, -0.0447998046875, 0.69921875, 1.37890625, 1.11328125, 0.0928955078125, 0.6318359375, -0.62353515625, 0.55859375, -0.286865234375, 1.5361328125, -0.391357421875, -0.052215576171875, -1.12890625, 0.55517578125, -0.28515625, -0.3603515625, 0.68896484375, 0.67626953125, 0.003070831298828125, 1.2236328125, 0.1597900390625, -1.3076171875, 0.99951171875, -2.5078125, -1.2119140625, 0.1749267578125, -1.1865234375, -1.234375, -0.1180419921875, -1.751953125, 0.033050537109375, 0.234130859375, -3.107421875, -1.0380859375, 0.61181640625, -0.87548828125, 0.3154296875, -1.103515625, 0.261474609375, -1.130859375, -0.7470703125, -0.43408203125, 1.3828125, -0.41259765625, -1.7587890625, 0.765625, 0.004852294921875, 0.135498046875, -0.76953125, -0.1314697265625, 0.400390625, 1.43359375, 0.07135009765625, 0.0645751953125, -0.5869140625, -0.5810546875, -0.2900390625, -1.3037109375, 0.1287841796875, -0.27490234375, 0.59228515625, 2.333984375, -0.54541015625, -0.556640625, 0.447265625, -0.806640625, 0.09149169921875, -0.70654296875, -0.357177734375, -1.099609375, -0.5576171875, -0.44189453125, 0.400390625, -0.666015625, -1.4619140625, 0.728515625, -1.5986328125, 0.153076171875, -0.126708984375, -2.83984375, -1.84375, -0.2469482421875, 0.677734375, 0.43701171875, 3.298828125, 1.1591796875, -0.7158203125, -0.8251953125, 0.451171875, -2.376953125, -0.58642578125, -0.86767578125, 0.0789794921875, 0.1351318359375, -0.325439453125, 0.484375, 1.166015625, -0.1610107421875, -0.15234375, -0.54638671875, -0.806640625, 0.285400390625, 0.1661376953125, -0.50146484375, -1.0478515625, 1.5751953125, 0.0313720703125, 0.2396240234375, -0.6572265625, -0.1258544921875, -1.060546875, 1.3076171875, -0.301513671875, -1.2412109375, 0.6376953125, -1.5693359375, 0.354248046875, 0.2427978515625, -0.392333984375, 0.61962890625, -0.58837890625, -1.71484375, -0.2098388671875, -0.828125, 0.330810546875, 0.16357421875, -0.2259521484375, 0.0972900390625, -0.451416015625, 1.79296875, -1.673828125, -1.58203125, -2.099609375, -0.487548828125, -0.87060546875, 0.62646484375, -1.470703125, -0.1558837890625, 0.4609375, 1.3369140625, 0.2322998046875, 0.1632080078125, 0.65966796875, 1.0810546875, 0.1041259765625, 0.63232421875, -0.32421875, -1.04296875, -1.046875, -1.3720703125, -0.8486328125, 0.1290283203125, 0.137939453125, 0.1549072265625, -1.0908203125, 0.0167694091796875, -0.31689453125, 1.390625, 0.07269287109375, 1.0390625, 1.1162109375, -0.455810546875, -0.06689453125, -0.053741455078125, 0.5048828125, -0.8408203125, -1.19921875, 0.87841796875, 0.7421875, 0.2030029296875, 0.109619140625, -0.59912109375, -1.337890625, -0.74169921875, -0.64453125, -1.326171875, 0.21044921875, -1.3583984375, -1.685546875, -0.472900390625, -0.270263671875, 0.99365234375, -0.96240234375, 1.1279296875, -0.45947265625, -0.45654296875, -0.99169921875, -3.515625, -1.9853515625, 0.73681640625, 0.92333984375, -0.56201171875, -1.4453125, -2.078125, 0.94189453125, -1.333984375, 0.0982666015625, 0.60693359375, 0.367431640625, 3.015625, -1.1357421875, -1.5634765625, 0.90234375, -0.1783447265625, 0.1802978515625, -0.317138671875, -0.513671875, 1.2353515625, -0.033203125, 1.4482421875, 1.0087890625, 0.9248046875, 0.10418701171875, 0.7626953125, -1.3798828125, 0.276123046875, 0.55224609375, 1.1005859375, -0.62158203125, -0.806640625, 0.65087890625, 0.270263671875, -0.339111328125, -0.9384765625, -0.09381103515625, -0.7216796875, 1.37890625, -0.398193359375, -0.3095703125, -1.4912109375, 0.96630859375, 0.43798828125, 0.62255859375, 0.0213470458984375, 0.235595703125, -1.2958984375, 0.0157318115234375, -0.810546875, 1.9736328125, -0.2462158203125, 0.720703125, 0.822265625, -0.755859375, -0.658203125, 0.344482421875, -2.892578125, -0.282470703125, 1.2529296875, -0.294189453125, 0.6748046875, -0.80859375, 0.9287109375, 1.27734375, -1.71875, -0.166015625, 0.47412109375, -0.41259765625, -1.3681640625, -0.978515625, -0.77978515625, -1.044921875, -0.90380859375, -0.08184814453125, -0.86181640625, -0.10772705078125, -0.299560546875, -0.4306640625, -0.47119140625, 0.95703125, 1.107421875, 0.91796875, 0.76025390625, 0.7392578125, -0.09161376953125, -0.7392578125, 0.9716796875, -0.395751953125, -0.75390625, -0.164306640625, -0.087646484375, 0.028564453125, -0.91943359375, -0.66796875, 2.486328125, 0.427734375, 0.626953125, 0.474853515625, 0.0926513671875, 0.830078125, -0.6923828125, 0.7841796875, -0.89208984375, -2.482421875, 0.034912109375, -1.3447265625, -0.475341796875, -0.286376953125, -0.732421875, 0.190673828125, -0.491455078125, -3.091796875, -1.2783203125, -0.66015625, -0.1507568359375, 0.042236328125, -1.025390625, 0.12744140625, -1.984375, -0.393798828125, -1.25, -1.140625, 1.77734375, 0.2457275390625, -0.8017578125, 0.7763671875, -0.387939453125, -0.3662109375, 1.1572265625, 0.123291015625, -0.07135009765625, 1.412109375, -0.685546875, -3.078125, 0.031524658203125, -0.70458984375, 0.78759765625, 0.433837890625, -1.861328125, -1.33203125, 2.119140625, -1.3544921875, -0.6591796875, -1.4970703125, 0.40625, -2.078125, -1.30859375, 0.050262451171875, -0.60107421875, 1.0078125, 0.05657958984375, -0.96826171875, 0.0264892578125, 0.159912109375, 0.84033203125, -1.1494140625, -0.0433349609375, -0.2034912109375, 1.09765625, -1.142578125, -0.283203125, -0.427978515625, 1.0927734375, -0.67529296875, -0.61572265625, 2.517578125, 0.84130859375, 1.8662109375, 0.1748046875, -0.407958984375, -0.029449462890625, -0.27587890625, -0.958984375, -0.10028076171875, 1.248046875, -0.0792236328125, -0.45556640625, 0.7685546875, 1.5556640625, -1.8759765625, -0.131591796875, -1.3583984375, 0.7890625, 0.80810546875, -1.0322265625, -0.53076171875, -0.1484375, -1.7841796875, -1.2470703125, 0.17138671875, -0.04864501953125, -0.80322265625, -0.0933837890625, 0.984375, 0.7001953125, 0.5380859375, 0.2022705078125, -1.1865234375, 0.5439453125, 1.1318359375, 0.79931640625, 0.32666015625, -1.26171875, 0.457763671875, 1.1591796875, -0.34423828125, 0.65771484375, 0.216552734375, 1.19140625, -0.2744140625, -0.020416259765625, -0.86376953125, 0.93017578125, 1.0556640625, 0.69873046875, -0.15087890625, -0.33056640625, 0.8505859375, 0.06890869140625, 0.359375, -0.262939453125, 0.12493896484375, 0.017059326171875, -0.98974609375, 0.5107421875, 0.2408447265625, 0.615234375, -0.62890625, 0.86962890625, -0.07427978515625, 0.85595703125, 0.300537109375, -1.072265625, -1.6064453125, -0.353515625, -0.484130859375, -0.6044921875, -0.455810546875, 0.95849609375, 1.3671875, 0.544921875, 0.560546875, 0.34521484375, -0.6513671875, -0.410400390625, -0.2021484375, -0.1656494140625, 0.073486328125, 0.84716796875, -1.7998046875, -1.0126953125, -0.1324462890625, 0.95849609375, -0.669921875, -0.79052734375, -2.193359375, -0.42529296875, -1.7275390625, -1.04296875, 0.716796875, -0.4423828125, -1.193359375, 0.61572265625, -1.5224609375, 0.62890625, -0.705078125, 0.677734375, -0.213134765625, -1.6748046875, -1.087890625, -0.65185546875, -1.1337890625, 2.314453125, -0.352783203125, -0.27001953125, -2.01953125, -1.2685546875, 0.308837890625, -0.280517578125, -1.3798828125, -1.595703125, 0.642578125, 1.693359375, -0.82470703125, -1.255859375, 0.57373046875, 1.5859375, 1.068359375, -0.876953125, 0.370849609375, 1.220703125, 0.59765625, 0.007602691650390625, 0.09326171875, -0.9521484375, -0.024932861328125, -0.94775390625, -0.299560546875, -0.002536773681640625, 1.41796875, -0.06903076171875, -1.5927734375, 0.353515625, 3.63671875, -0.765625, -1.1142578125, 0.4287109375, -0.86865234375, -0.9267578125, -0.21826171875, -1.10546875, 0.29296875, -0.225830078125, 0.5400390625, -0.45556640625, -0.68701171875, -0.79150390625, -1.0810546875, 0.25439453125, -1.2998046875, -0.494140625, -0.1510009765625, 1.5615234375, -0.4248046875, -0.486572265625, 0.45458984375, 0.047637939453125, -0.11639404296875, 0.057403564453125, 0.130126953125, -0.10125732421875, -0.56201171875, 1.4765625, -1.7451171875, 1.34765625, -0.45703125, 0.873046875, -0.056121826171875, -0.8876953125, -0.986328125, 1.5654296875, 0.49853515625, 0.55859375, -0.2198486328125, 0.62548828125, 0.2734375, -0.63671875, -0.41259765625, -1.2705078125, 0.0665283203125, 1.3369140625, 0.90283203125, -0.77685546875, -1.5, -1.8525390625, -1.314453125, -0.86767578125, -0.331787109375, 0.1590576171875, 0.94775390625, -0.1771240234375, 1.638671875, -2.17578125, 0.58740234375, 0.424560546875, -0.3466796875, 0.642578125, 0.473388671875, 0.96435546875, 1.38671875, -0.91357421875, 1.0361328125, -0.67333984375, 1.5009765625]]]).to(device)
22
+
23
+ cond = [[prompt_embeds, {}]]
24
+
25
+ return (cond,)
26
+
27
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
28
+ NODE_CLASS_MAPPINGS = {
29
+ "LotusConditioning" : LotusConditioning,
30
+ }
ldm_patched/contrib/nodes_lt.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import io
2
+ # import nodes
3
+ # import ldm_patched.contrib.node_helpers
4
+ # import torch
5
+ # import ldm_patched.modules.model_management
6
+ # import ldm_patched.modules.model_sampling
7
+ # import ldm_patched.modules.utils
8
+ # import math
9
+ # import numpy as np
10
+ # import av
11
+ # from ldm_patched.ldm.lightricks.symmetric_patchifier import SymmetricPatchifier, latent_to_pixel_coords
12
+
13
+ # class EmptyLTXVLatentVideo:
14
+ # @classmethod
15
+ # def INPUT_TYPES(s):
16
+ # return {"required": { "width": ("INT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}),
17
+ # "height": ("INT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}),
18
+ # "length": ("INT", {"default": 97, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 8}),
19
+ # "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
20
+ # RETURN_TYPES = ("LATENT",)
21
+ # FUNCTION = "generate"
22
+
23
+ # CATEGORY = "latent/video/ltxv"
24
+
25
+ # def generate(self, width, height, length, batch_size=1):
26
+ # latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=ldm_patched.modules.model_management.intermediate_device())
27
+ # return ({"samples": latent}, )
28
+
29
+
30
+ # class LTXVImgToVideo:
31
+ # @classmethod
32
+ # def INPUT_TYPES(s):
33
+ # return {"required": {"positive": ("CONDITIONING", ),
34
+ # "negative": ("CONDITIONING", ),
35
+ # "vae": ("VAE",),
36
+ # "image": ("IMAGE",),
37
+ # "width": ("INT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}),
38
+ # "height": ("INT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}),
39
+ # "length": ("INT", {"default": 97, "min": 9, "max": nodes.MAX_RESOLUTION, "step": 8}),
40
+ # "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
41
+ # "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}),
42
+ # }}
43
+
44
+ # RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
45
+ # RETURN_NAMES = ("positive", "negative", "latent")
46
+
47
+ # CATEGORY = "conditioning/video_models"
48
+ # FUNCTION = "generate"
49
+
50
+ # def generate(self, positive, negative, image, vae, width, height, length, batch_size, strength):
51
+ # pixels = ldm_patched.modules.utils.common_upscale(image.movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
52
+ # encode_pixels = pixels[:, :, :, :3]
53
+ # t = vae.encode(encode_pixels)
54
+
55
+ # latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=ldm_patched.modules.model_management.intermediate_device())
56
+ # latent[:, :, :t.shape[2]] = t
57
+
58
+ # conditioning_latent_frames_mask = torch.ones(
59
+ # (batch_size, 1, latent.shape[2], 1, 1),
60
+ # dtype=torch.float32,
61
+ # device=latent.device,
62
+ # )
63
+ # conditioning_latent_frames_mask[:, :, :t.shape[2]] = 1.0 - strength
64
+
65
+ # return (positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask}, )
66
+
67
+
68
+ # def conditioning_get_any_value(conditioning, key, default=None):
69
+ # for t in conditioning:
70
+ # if key in t[1]:
71
+ # return t[1][key]
72
+ # return default
73
+
74
+
75
+ # def get_noise_mask(latent):
76
+ # noise_mask = latent.get("noise_mask", None)
77
+ # latent_image = latent["samples"]
78
+ # if noise_mask is None:
79
+ # batch_size, _, latent_length, _, _ = latent_image.shape
80
+ # noise_mask = torch.ones(
81
+ # (batch_size, 1, latent_length, 1, 1),
82
+ # dtype=torch.float32,
83
+ # device=latent_image.device,
84
+ # )
85
+ # else:
86
+ # noise_mask = noise_mask.clone()
87
+ # return noise_mask
88
+
89
+ # def get_keyframe_idxs(cond):
90
+ # keyframe_idxs = conditioning_get_any_value(cond, "keyframe_idxs", None)
91
+ # if keyframe_idxs is None:
92
+ # return None, 0
93
+ # num_keyframes = torch.unique(keyframe_idxs[:, 0]).shape[0]
94
+ # return keyframe_idxs, num_keyframes
95
+
96
+ # class LTXVAddGuide:
97
+ # @classmethod
98
+ # def INPUT_TYPES(s):
99
+ # return {"required": {"positive": ("CONDITIONING", ),
100
+ # "negative": ("CONDITIONING", ),
101
+ # "vae": ("VAE",),
102
+ # "latent": ("LATENT",),
103
+ # "image": ("IMAGE", {"tooltip": "Image or video to condition the latent video on. Must be 8*n + 1 frames."
104
+ # "If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames."}),
105
+ # "frame_idx": ("INT", {"default": 0, "min": -9999, "max": 9999,
106
+ # "tooltip": "Frame index to start the conditioning at. For single-frame images or "
107
+ # "videos with 1-8 frames, any frame_idx value is acceptable. For videos with 9+ "
108
+ # "frames, frame_idx must be divisible by 8, otherwise it will be rounded down to "
109
+ # "the nearest multiple of 8. Negative values are counted from the end of the video."}),
110
+ # "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
111
+ # }
112
+ # }
113
+
114
+ # RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
115
+ # RETURN_NAMES = ("positive", "negative", "latent")
116
+
117
+ # CATEGORY = "conditioning/video_models"
118
+ # FUNCTION = "generate"
119
+
120
+ # def __init__(self):
121
+ # self._num_prefix_frames = 2
122
+ # self._patchifier = SymmetricPatchifier(1)
123
+
124
+ # def encode(self, vae, latent_width, latent_height, images, scale_factors):
125
+ # time_scale_factor, width_scale_factor, height_scale_factor = scale_factors
126
+ # images = images[:(images.shape[0] - 1) // time_scale_factor * time_scale_factor + 1]
127
+ # pixels = ldm_patched.modules.utils.common_upscale(images.movedim(-1, 1), latent_width * width_scale_factor, latent_height * height_scale_factor, "bilinear", crop="disabled").movedim(1, -1)
128
+ # encode_pixels = pixels[:, :, :, :3]
129
+ # t = vae.encode(encode_pixels)
130
+ # return encode_pixels, t
131
+
132
+ # def get_latent_index(self, cond, latent_length, guide_length, frame_idx, scale_factors):
133
+ # time_scale_factor, _, _ = scale_factors
134
+ # _, num_keyframes = get_keyframe_idxs(cond)
135
+ # latent_count = latent_length - num_keyframes
136
+ # frame_idx = frame_idx if frame_idx >= 0 else max((latent_count - 1) * time_scale_factor + 1 + frame_idx, 0)
137
+ # if guide_length > 1 and frame_idx != 0:
138
+ # frame_idx = (frame_idx - 1) // time_scale_factor * time_scale_factor + 1 # frame index - 1 must be divisible by 8 or frame_idx == 0
139
+
140
+ # latent_idx = (frame_idx + time_scale_factor - 1) // time_scale_factor
141
+
142
+ # return frame_idx, latent_idx
143
+
144
+ # def add_keyframe_index(self, cond, frame_idx, guiding_latent, scale_factors):
145
+ # keyframe_idxs, _ = get_keyframe_idxs(cond)
146
+ # _, latent_coords = self._patchifier.patchify(guiding_latent)
147
+ # pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0
148
+ # pixel_coords[:, 0] += frame_idx
149
+ # if keyframe_idxs is None:
150
+ # keyframe_idxs = pixel_coords
151
+ # else:
152
+ # keyframe_idxs = torch.cat([keyframe_idxs, pixel_coords], dim=2)
153
+ # return ldm_patched.contrib.node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs})
154
+
155
+ # def append_keyframe(self, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors):
156
+ # _, latent_idx = self.get_latent_index(
157
+ # cond=positive,
158
+ # latent_length=latent_image.shape[2],
159
+ # guide_length=guiding_latent.shape[2],
160
+ # frame_idx=frame_idx,
161
+ # scale_factors=scale_factors,
162
+ # )
163
+ # noise_mask[:, :, latent_idx:latent_idx + guiding_latent.shape[2]] = 1.0
164
+
165
+ # positive = self.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors)
166
+ # negative = self.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors)
167
+
168
+ # mask = torch.full(
169
+ # (noise_mask.shape[0], 1, guiding_latent.shape[2], 1, 1),
170
+ # 1.0 - strength,
171
+ # dtype=noise_mask.dtype,
172
+ # device=noise_mask.device,
173
+ # )
174
+
175
+ # latent_image = torch.cat([latent_image, guiding_latent], dim=2)
176
+ # noise_mask = torch.cat([noise_mask, mask], dim=2)
177
+ # return positive, negative, latent_image, noise_mask
178
+
179
+ # def replace_latent_frames(self, latent_image, noise_mask, guiding_latent, latent_idx, strength):
180
+ # cond_length = guiding_latent.shape[2]
181
+ # assert latent_image.shape[2] >= latent_idx + cond_length, "Conditioning frames exceed the length of the latent sequence."
182
+
183
+ # mask = torch.full(
184
+ # (noise_mask.shape[0], 1, cond_length, 1, 1),
185
+ # 1.0 - strength,
186
+ # dtype=noise_mask.dtype,
187
+ # device=noise_mask.device,
188
+ # )
189
+
190
+ # latent_image = latent_image.clone()
191
+ # noise_mask = noise_mask.clone()
192
+
193
+ # latent_image[:, :, latent_idx : latent_idx + cond_length] = guiding_latent
194
+ # noise_mask[:, :, latent_idx : latent_idx + cond_length] = mask
195
+
196
+ # return latent_image, noise_mask
197
+
198
+ # def generate(self, positive, negative, vae, latent, image, frame_idx, strength):
199
+ # scale_factors = vae.downscale_index_formula
200
+ # latent_image = latent["samples"]
201
+ # noise_mask = get_noise_mask(latent)
202
+
203
+ # _, _, latent_length, latent_height, latent_width = latent_image.shape
204
+ # image, t = self.encode(vae, latent_width, latent_height, image, scale_factors)
205
+
206
+ # frame_idx, latent_idx = self.get_latent_index(positive, latent_length, len(image), frame_idx, scale_factors)
207
+ # assert latent_idx + t.shape[2] <= latent_length, "Conditioning frames exceed the length of the latent sequence."
208
+
209
+ # num_prefix_frames = min(self._num_prefix_frames, t.shape[2])
210
+
211
+ # positive, negative, latent_image, noise_mask = self.append_keyframe(
212
+ # positive,
213
+ # negative,
214
+ # frame_idx,
215
+ # latent_image,
216
+ # noise_mask,
217
+ # t[:, :, :num_prefix_frames],
218
+ # strength,
219
+ # scale_factors,
220
+ # )
221
+
222
+ # latent_idx += num_prefix_frames
223
+
224
+ # t = t[:, :, num_prefix_frames:]
225
+ # if t.shape[2] == 0:
226
+ # return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},)
227
+
228
+ # latent_image, noise_mask = self.replace_latent_frames(
229
+ # latent_image,
230
+ # noise_mask,
231
+ # t,
232
+ # latent_idx,
233
+ # strength,
234
+ # )
235
+
236
+ # return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},)
237
+
238
+
239
+ # class LTXVCropGuides:
240
+ # @classmethod
241
+ # def INPUT_TYPES(s):
242
+ # return {"required": {"positive": ("CONDITIONING", ),
243
+ # "negative": ("CONDITIONING", ),
244
+ # "latent": ("LATENT",),
245
+ # }
246
+ # }
247
+
248
+ # RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
249
+ # RETURN_NAMES = ("positive", "negative", "latent")
250
+
251
+ # CATEGORY = "conditioning/video_models"
252
+ # FUNCTION = "crop"
253
+
254
+ # def __init__(self):
255
+ # self._patchifier = SymmetricPatchifier(1)
256
+
257
+ # def crop(self, positive, negative, latent):
258
+ # latent_image = latent["samples"].clone()
259
+ # noise_mask = get_noise_mask(latent)
260
+
261
+ # _, num_keyframes = get_keyframe_idxs(positive)
262
+ # if num_keyframes == 0:
263
+ # return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},)
264
+
265
+ # latent_image = latent_image[:, :, :-num_keyframes]
266
+ # noise_mask = noise_mask[:, :, :-num_keyframes]
267
+
268
+ # positive = ldm_patched.contrib.node_helpers.conditioning_set_values(positive, {"keyframe_idxs": None})
269
+ # negative = ldm_patched.contrib.node_helpers.conditioning_set_values(negative, {"keyframe_idxs": None})
270
+
271
+ # return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},)
272
+
273
+
274
+ # class LTXVConditioning:
275
+ # @classmethod
276
+ # def INPUT_TYPES(s):
277
+ # return {"required": {"positive": ("CONDITIONING", ),
278
+ # "negative": ("CONDITIONING", ),
279
+ # "frame_rate": ("FLOAT", {"default": 25.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
280
+ # }}
281
+ # RETURN_TYPES = ("CONDITIONING", "CONDITIONING")
282
+ # RETURN_NAMES = ("positive", "negative")
283
+ # FUNCTION = "append"
284
+
285
+ # CATEGORY = "conditioning/video_models"
286
+
287
+ # def append(self, positive, negative, frame_rate):
288
+ # positive = ldm_patched.contrib.node_helpers.conditioning_set_values(positive, {"frame_rate": frame_rate})
289
+ # negative = ldm_patched.contrib.node_helpers.conditioning_set_values(negative, {"frame_rate": frame_rate})
290
+ # return (positive, negative)
291
+
292
+
293
+ # class ModelSamplingLTXV:
294
+ # @classmethod
295
+ # def INPUT_TYPES(s):
296
+ # return {"required": { "model": ("MODEL",),
297
+ # "max_shift": ("FLOAT", {"default": 2.05, "min": 0.0, "max": 100.0, "step":0.01}),
298
+ # "base_shift": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 100.0, "step":0.01}),
299
+ # },
300
+ # "optional": {"latent": ("LATENT",), }
301
+ # }
302
+
303
+ # RETURN_TYPES = ("MODEL",)
304
+ # FUNCTION = "patch"
305
+
306
+ # CATEGORY = "advanced/model"
307
+
308
+ # def patch(self, model, max_shift, base_shift, latent=None):
309
+ # m = model.clone()
310
+
311
+ # if latent is None:
312
+ # tokens = 4096
313
+ # else:
314
+ # tokens = math.prod(latent["samples"].shape[2:])
315
+
316
+ # x1 = 1024
317
+ # x2 = 4096
318
+ # mm = (max_shift - base_shift) / (x2 - x1)
319
+ # b = base_shift - mm * x1
320
+ # shift = (tokens) * mm + b
321
+
322
+ # sampling_base = ldm_patched.modules.model_sampling.ModelSamplingFlux
323
+ # sampling_type = ldm_patched.modules.model_sampling.CONST
324
+
325
+ # class ModelSamplingAdvanced(sampling_base, sampling_type):
326
+ # pass
327
+
328
+ # model_sampling = ModelSamplingAdvanced(model.model.model_config)
329
+ # model_sampling.set_parameters(shift=shift)
330
+ # m.add_object_patch("model_sampling", model_sampling)
331
+
332
+ # return (m, )
333
+
334
+
335
+ # class LTXVScheduler:
336
+ # @classmethod
337
+ # def INPUT_TYPES(s):
338
+ # return {"required":
339
+ # {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
340
+ # "max_shift": ("FLOAT", {"default": 2.05, "min": 0.0, "max": 100.0, "step":0.01}),
341
+ # "base_shift": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 100.0, "step":0.01}),
342
+ # "stretch": ("BOOLEAN", {
343
+ # "default": True,
344
+ # "tooltip": "Stretch the sigmas to be in the range [terminal, 1]."
345
+ # }),
346
+ # "terminal": (
347
+ # "FLOAT",
348
+ # {
349
+ # "default": 0.1, "min": 0.0, "max": 0.99, "step": 0.01,
350
+ # "tooltip": "The terminal value of the sigmas after stretching."
351
+ # },
352
+ # ),
353
+ # },
354
+ # "optional": {"latent": ("LATENT",), }
355
+ # }
356
+
357
+ # RETURN_TYPES = ("SIGMAS",)
358
+ # CATEGORY = "sampling/custom_sampling/schedulers"
359
+
360
+ # FUNCTION = "get_sigmas"
361
+
362
+ # def get_sigmas(self, steps, max_shift, base_shift, stretch, terminal, latent=None):
363
+ # if latent is None:
364
+ # tokens = 4096
365
+ # else:
366
+ # tokens = math.prod(latent["samples"].shape[2:])
367
+
368
+ # sigmas = torch.linspace(1.0, 0.0, steps + 1)
369
+
370
+ # x1 = 1024
371
+ # x2 = 4096
372
+ # mm = (max_shift - base_shift) / (x2 - x1)
373
+ # b = base_shift - mm * x1
374
+ # sigma_shift = (tokens) * mm + b
375
+
376
+ # power = 1
377
+ # sigmas = torch.where(
378
+ # sigmas != 0,
379
+ # math.exp(sigma_shift) / (math.exp(sigma_shift) + (1 / sigmas - 1) ** power),
380
+ # 0,
381
+ # )
382
+
383
+ # # Stretch sigmas so that its final value matches the given terminal value.
384
+ # if stretch:
385
+ # non_zero_mask = sigmas != 0
386
+ # non_zero_sigmas = sigmas[non_zero_mask]
387
+ # one_minus_z = 1.0 - non_zero_sigmas
388
+ # scale_factor = one_minus_z[-1] / (1.0 - terminal)
389
+ # stretched = 1.0 - (one_minus_z / scale_factor)
390
+ # sigmas[non_zero_mask] = stretched
391
+
392
+ # return (sigmas,)
393
+
394
+ # def encode_single_frame(output_file, image_array: np.ndarray, crf):
395
+ # container = av.open(output_file, "w", format="mp4")
396
+ # try:
397
+ # stream = container.add_stream(
398
+ # "libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
399
+ # )
400
+ # stream.height = image_array.shape[0]
401
+ # stream.width = image_array.shape[1]
402
+ # av_frame = av.VideoFrame.from_ndarray(image_array, format="rgb24").reformat(
403
+ # format="yuv420p"
404
+ # )
405
+ # container.mux(stream.encode(av_frame))
406
+ # container.mux(stream.encode())
407
+ # finally:
408
+ # container.close()
409
+
410
+
411
+ # def decode_single_frame(video_file):
412
+ # container = av.open(video_file)
413
+ # try:
414
+ # stream = next(s for s in container.streams if s.type == "video")
415
+ # frame = next(container.decode(stream))
416
+ # finally:
417
+ # container.close()
418
+ # return frame.to_ndarray(format="rgb24")
419
+
420
+
421
+ # def preprocess(image: torch.Tensor, crf=29):
422
+ # if crf == 0:
423
+ # return image
424
+
425
+ # image_array = (image[:(image.shape[0] // 2) * 2, :(image.shape[1] // 2) * 2] * 255.0).byte().cpu().numpy()
426
+ # with io.BytesIO() as output_file:
427
+ # encode_single_frame(output_file, image_array, crf)
428
+ # video_bytes = output_file.getvalue()
429
+ # with io.BytesIO(video_bytes) as video_file:
430
+ # image_array = decode_single_frame(video_file)
431
+ # tensor = torch.tensor(image_array, dtype=image.dtype, device=image.device) / 255.0
432
+ # return tensor
433
+
434
+
435
+ # class LTXVPreprocess:
436
+ # @classmethod
437
+ # def INPUT_TYPES(s):
438
+ # return {
439
+ # "required": {
440
+ # "image": ("IMAGE",),
441
+ # "img_compression": (
442
+ # "INT",
443
+ # {
444
+ # "default": 35,
445
+ # "min": 0,
446
+ # "max": 100,
447
+ # "tooltip": "Amount of compression to apply on image.",
448
+ # },
449
+ # ),
450
+ # }
451
+ # }
452
+
453
+ # FUNCTION = "preprocess"
454
+ # RETURN_TYPES = ("IMAGE",)
455
+ # RETURN_NAMES = ("output_image",)
456
+ # CATEGORY = "image"
457
+
458
+ # def preprocess(self, image, img_compression):
459
+ # output_images = []
460
+ # for i in range(image.shape[0]):
461
+ # output_images.append(preprocess(image[i], img_compression))
462
+ # return (torch.stack(output_images),)
463
+
464
+
465
+ # # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
466
+ #NODE_CLASS_MAPPINGS = {
467
+ # "EmptyLTXVLatentVideo": EmptyLTXVLatentVideo,
468
+ # "LTXVImgToVideo": LTXVImgToVideo,
469
+ # "ModelSamplingLTXV": ModelSamplingLTXV,
470
+ # "LTXVConditioning": LTXVConditioning,
471
+ # "LTXVScheduler": LTXVScheduler,
472
+ # "LTXVAddGuide": LTXVAddGuide,
473
+ # "LTXVPreprocess": LTXVPreprocess,
474
+ # "LTXVCropGuides": LTXVCropGuides,
475
+ # }
ldm_patched/contrib/nodes_lumina2.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ldm_patched.modules.ldmpatched_types.node_typing import IO, ComfyNodeABC, InputTypeDict
2
+ import torch
3
+
4
+
5
+ class RenormCFG:
6
+ @classmethod
7
+ def INPUT_TYPES(s):
8
+ return {"required": { "model": ("MODEL",),
9
+ "cfg_trunc": ("FLOAT", {"default": 100, "min": 0.0, "max": 100.0, "step": 0.01}),
10
+ "renorm_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
11
+ }}
12
+ RETURN_TYPES = ("MODEL",)
13
+ FUNCTION = "patch"
14
+
15
+ CATEGORY = "advanced/model"
16
+
17
+ def patch(self, model, cfg_trunc, renorm_cfg):
18
+ def renorm_cfg_func(args):
19
+ cond_denoised = args["cond_denoised"]
20
+ uncond_denoised = args["uncond_denoised"]
21
+ cond_scale = args["cond_scale"]
22
+ timestep = args["timestep"]
23
+ x_orig = args["input"]
24
+ in_channels = model.model.diffusion_model.in_channels
25
+
26
+ if timestep[0] < cfg_trunc:
27
+ cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels]
28
+ cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:]
29
+ half_eps = uncond_eps + cond_scale * (cond_eps - uncond_eps)
30
+ half_rest = cond_rest
31
+
32
+ if float(renorm_cfg) > 0.0:
33
+ ori_pos_norm = torch.linalg.vector_norm(cond_eps
34
+ , dim=tuple(range(1, len(cond_eps.shape))), keepdim=True
35
+ )
36
+ max_new_norm = ori_pos_norm * float(renorm_cfg)
37
+ new_pos_norm = torch.linalg.vector_norm(
38
+ half_eps, dim=tuple(range(1, len(half_eps.shape))), keepdim=True
39
+ )
40
+ if new_pos_norm >= max_new_norm:
41
+ half_eps = half_eps * (max_new_norm / new_pos_norm)
42
+ else:
43
+ cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels]
44
+ cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:]
45
+ half_eps = cond_eps
46
+ half_rest = cond_rest
47
+
48
+ cfg_result = torch.cat([half_eps, half_rest], dim=1)
49
+
50
+ # cfg_result = uncond_denoised + (cond_denoised - uncond_denoised) * cond_scale
51
+
52
+ return x_orig - cfg_result
53
+
54
+ m = model.clone()
55
+ m.set_model_sampler_cfg_function(renorm_cfg_func)
56
+ return (m, )
57
+
58
+
59
+ class CLIPTextEncodeLumina2(ComfyNodeABC):
60
+ SYSTEM_PROMPT = {
61
+ "superior": "You are an assistant designed to generate superior images with the superior "\
62
+ "degree of image-text alignment based on textual prompts or user prompts.",
63
+ "alignment": "You are an assistant designed to generate high-quality images with the "\
64
+ "highest degree of image-text alignment based on textual prompts."
65
+ }
66
+ SYSTEM_PROMPT_TIP = "Lumina2 provide two types of system prompts:" \
67
+ "Superior: You are an assistant designed to generate superior images with the superior "\
68
+ "degree of image-text alignment based on textual prompts or user prompts. "\
69
+ "Alignment: You are an assistant designed to generate high-quality images with the highest "\
70
+ "degree of image-text alignment based on textual prompts."
71
+ @classmethod
72
+ def INPUT_TYPES(s) -> InputTypeDict:
73
+ return {
74
+ "required": {
75
+ "system_prompt": (list(CLIPTextEncodeLumina2.SYSTEM_PROMPT.keys()), {"tooltip": CLIPTextEncodeLumina2.SYSTEM_PROMPT_TIP}),
76
+ "user_prompt": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
77
+ "clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
78
+ }
79
+ }
80
+ RETURN_TYPES = (IO.CONDITIONING,)
81
+ OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",)
82
+ FUNCTION = "encode"
83
+
84
+ CATEGORY = "conditioning"
85
+ DESCRIPTION = "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
86
+
87
+ def encode(self, clip, user_prompt, system_prompt):
88
+ if clip is None:
89
+ raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
90
+ system_prompt = CLIPTextEncodeLumina2.SYSTEM_PROMPT[system_prompt]
91
+ prompt = f'{system_prompt} <Prompt Start> {user_prompt}'
92
+ tokens = clip.tokenize(prompt)
93
+ return (clip.encode_from_tokens_scheduled(tokens), )
94
+
95
+
96
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
97
+ NODE_CLASS_MAPPINGS = {
98
+ "CLIPTextEncodeLumina2": CLIPTextEncodeLumina2,
99
+ "RenormCFG": RenormCFG
100
+ }
101
+
102
+
103
+ NODE_DISPLAY_NAME_MAPPINGS = {
104
+ "CLIPTextEncodeLumina2": "CLIP Text Encode for Lumina2",
105
+ }
ldm_patched/contrib/nodes_mahiro.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ class Mahiro:
5
+ @classmethod
6
+ def INPUT_TYPES(s):
7
+ return {"required": {"model": ("MODEL",),
8
+ }}
9
+ RETURN_TYPES = ("MODEL",)
10
+ RETURN_NAMES = ("patched_model",)
11
+ FUNCTION = "patch"
12
+ CATEGORY = "_for_testing"
13
+ DESCRIPTION = "Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt."
14
+ def patch(self, model):
15
+ m = model.clone()
16
+ def mahiro_normd(args):
17
+ scale: float = args['cond_scale']
18
+ cond_p: torch.Tensor = args['cond_denoised']
19
+ uncond_p: torch.Tensor = args['uncond_denoised']
20
+ #naive leap
21
+ leap = cond_p * scale
22
+ #sim with uncond leap
23
+ u_leap = uncond_p * scale
24
+ cfg = args["denoised"]
25
+ merge = (leap + cfg) / 2
26
+ normu = torch.sqrt(u_leap.abs()) * u_leap.sign()
27
+ normm = torch.sqrt(merge.abs()) * merge.sign()
28
+ sim = F.cosine_similarity(normu, normm).mean()
29
+ simsc = 2 * (sim+1)
30
+ wm = (simsc*cfg + (4-simsc)*leap) / 4
31
+ return wm
32
+ m.set_model_sampler_post_cfg_function(mahiro_normd)
33
+ return (m, )
34
+
35
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
36
+ NODE_CLASS_MAPPINGS = {
37
+ "Mahiro": Mahiro
38
+ }
39
+
40
+ NODE_DISPLAY_NAME_MAPPINGS = {
41
+ "Mahiro": "Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)",
42
+ }
ldm_patched/contrib/nodes_mask.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code from Comfy, https://github.com/comfyanonymous/ComfyUI
2
+
3
+
4
+
5
+ import numpy as np
6
+ import scipy.ndimage
7
+ import torch
8
+ import ldm_patched.modules.utils
9
+
10
+ from ldm_patched.contrib.nodes import MAX_RESOLUTION
11
+
12
+ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
13
+ source = source.to(destination.device)
14
+ if resize_source:
15
+ source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear")
16
+
17
+ source = ldm_patched.modules.utils.repeat_to_batch_size(source, destination.shape[0])
18
+
19
+ x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
20
+ y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
21
+
22
+ left, top = (x // multiplier, y // multiplier)
23
+ right, bottom = (left + source.shape[3], top + source.shape[2],)
24
+
25
+ if mask is None:
26
+ mask = torch.ones_like(source)
27
+ else:
28
+ mask = mask.to(destination.device, copy=True)
29
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear")
30
+ mask = ldm_patched.modules.utils.repeat_to_batch_size(mask, source.shape[0])
31
+
32
+ # calculate the bounds of the source that will be overlapping the destination
33
+ # this prevents the source trying to overwrite latent pixels that are out of bounds
34
+ # of the destination
35
+ visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
36
+
37
+ mask = mask[:, :, :visible_height, :visible_width]
38
+ inverse_mask = torch.ones_like(mask) - mask
39
+
40
+ source_portion = mask * source[:, :, :visible_height, :visible_width]
41
+ destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
42
+
43
+ destination[:, :, top:bottom, left:right] = source_portion + destination_portion
44
+ return destination
45
+
46
+ class LatentCompositeMasked:
47
+ @classmethod
48
+ def INPUT_TYPES(s):
49
+ return {
50
+ "required": {
51
+ "destination": ("LATENT",),
52
+ "source": ("LATENT",),
53
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
54
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
55
+ "resize_source": ("BOOLEAN", {"default": False}),
56
+ },
57
+ "optional": {
58
+ "mask": ("MASK",),
59
+ }
60
+ }
61
+ RETURN_TYPES = ("LATENT",)
62
+ FUNCTION = "composite"
63
+
64
+ CATEGORY = "latent"
65
+
66
+ def composite(self, destination, source, x, y, resize_source, mask = None):
67
+ output = destination.copy()
68
+ destination = destination["samples"].clone()
69
+ source = source["samples"]
70
+ output["samples"] = composite(destination, source, x, y, mask, 8, resize_source)
71
+ return (output,)
72
+
73
+ class ImageCompositeMasked:
74
+ @classmethod
75
+ def INPUT_TYPES(s):
76
+ return {
77
+ "required": {
78
+ "destination": ("IMAGE",),
79
+ "source": ("IMAGE",),
80
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
81
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
82
+ "resize_source": ("BOOLEAN", {"default": False}),
83
+ },
84
+ "optional": {
85
+ "mask": ("MASK",),
86
+ }
87
+ }
88
+ RETURN_TYPES = ("IMAGE",)
89
+ FUNCTION = "composite"
90
+
91
+ CATEGORY = "image"
92
+
93
+ def composite(self, destination, source, x, y, resize_source, mask = None):
94
+ if destination.shape[-1] < source.shape[-1]:
95
+ source = source[...,:destination.shape[-1]]
96
+ elif destination.shape[-1] > source.shape[-1]:
97
+ destination = torch.nn.functional.pad(destination, (0, 1))
98
+ destination[..., -1] = source[..., -1]
99
+ destination = destination.clone().movedim(-1, 1)
100
+ output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1)
101
+ return (output,)
102
+
103
+ class MaskToImage:
104
+ @classmethod
105
+ def INPUT_TYPES(s):
106
+ return {
107
+ "required": {
108
+ "mask": ("MASK",),
109
+ }
110
+ }
111
+
112
+ CATEGORY = "mask"
113
+
114
+ RETURN_TYPES = ("IMAGE",)
115
+ FUNCTION = "mask_to_image"
116
+
117
+ def mask_to_image(self, mask):
118
+ result = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
119
+ return (result,)
120
+
121
+ class ImageToMask:
122
+ @classmethod
123
+ def INPUT_TYPES(s):
124
+ return {
125
+ "required": {
126
+ "image": ("IMAGE",),
127
+ "channel": (["red", "green", "blue", "alpha"],),
128
+ }
129
+ }
130
+
131
+ CATEGORY = "mask"
132
+
133
+ RETURN_TYPES = ("MASK",)
134
+ FUNCTION = "image_to_mask"
135
+
136
+ def image_to_mask(self, image, channel):
137
+ channels = ["red", "green", "blue", "alpha"]
138
+ mask = image[:, :, :, channels.index(channel)]
139
+ return (mask,)
140
+
141
+ class ImageColorToMask:
142
+ @classmethod
143
+ def INPUT_TYPES(s):
144
+ return {
145
+ "required": {
146
+ "image": ("IMAGE",),
147
+ "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
148
+ }
149
+ }
150
+
151
+ CATEGORY = "mask"
152
+
153
+ RETURN_TYPES = ("MASK",)
154
+ FUNCTION = "image_to_mask"
155
+
156
+ def image_to_mask(self, image, color):
157
+ temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int)
158
+ temp = torch.bitwise_left_shift(temp[:,:,:,0], 16) + torch.bitwise_left_shift(temp[:,:,:,1], 8) + temp[:,:,:,2]
159
+ mask = torch.where(temp == color, 255, 0).float()
160
+ return (mask,)
161
+
162
+ class SolidMask:
163
+ @classmethod
164
+ def INPUT_TYPES(cls):
165
+ return {
166
+ "required": {
167
+ "value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
168
+ "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
169
+ "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
170
+ }
171
+ }
172
+
173
+ CATEGORY = "mask"
174
+
175
+ RETURN_TYPES = ("MASK",)
176
+
177
+ FUNCTION = "solid"
178
+
179
+ def solid(self, value, width, height):
180
+ out = torch.full((1, height, width), value, dtype=torch.float32, device="cpu")
181
+ return (out,)
182
+
183
+ class InvertMask:
184
+ @classmethod
185
+ def INPUT_TYPES(cls):
186
+ return {
187
+ "required": {
188
+ "mask": ("MASK",),
189
+ }
190
+ }
191
+
192
+ CATEGORY = "mask"
193
+
194
+ RETURN_TYPES = ("MASK",)
195
+
196
+ FUNCTION = "invert"
197
+
198
+ def invert(self, mask):
199
+ out = 1.0 - mask
200
+ return (out,)
201
+
202
+ class CropMask:
203
+ @classmethod
204
+ def INPUT_TYPES(cls):
205
+ return {
206
+ "required": {
207
+ "mask": ("MASK",),
208
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
209
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
210
+ "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
211
+ "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
212
+ }
213
+ }
214
+
215
+ CATEGORY = "mask"
216
+
217
+ RETURN_TYPES = ("MASK",)
218
+
219
+ FUNCTION = "crop"
220
+
221
+ def crop(self, mask, x, y, width, height):
222
+ mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1]))
223
+ out = mask[:, y:y + height, x:x + width]
224
+ return (out,)
225
+
226
+ class MaskComposite:
227
+ @classmethod
228
+ def INPUT_TYPES(cls):
229
+ return {
230
+ "required": {
231
+ "destination": ("MASK",),
232
+ "source": ("MASK",),
233
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
234
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
235
+ "operation": (["multiply", "add", "subtract", "and", "or", "xor"],),
236
+ }
237
+ }
238
+
239
+ CATEGORY = "mask"
240
+
241
+ RETURN_TYPES = ("MASK",)
242
+
243
+ FUNCTION = "combine"
244
+
245
+ def combine(self, destination, source, x, y, operation):
246
+ output = destination.reshape((-1, destination.shape[-2], destination.shape[-1])).clone()
247
+ source = source.reshape((-1, source.shape[-2], source.shape[-1]))
248
+
249
+ left, top = (x, y,)
250
+ right, bottom = (min(left + source.shape[-1], destination.shape[-1]), min(top + source.shape[-2], destination.shape[-2]))
251
+ visible_width, visible_height = (right - left, bottom - top,)
252
+
253
+ source_portion = source[:, :visible_height, :visible_width]
254
+ destination_portion = destination[:, top:bottom, left:right]
255
+
256
+ if operation == "multiply":
257
+ output[:, top:bottom, left:right] = destination_portion * source_portion
258
+ elif operation == "add":
259
+ output[:, top:bottom, left:right] = destination_portion + source_portion
260
+ elif operation == "subtract":
261
+ output[:, top:bottom, left:right] = destination_portion - source_portion
262
+ elif operation == "and":
263
+ output[:, top:bottom, left:right] = torch.bitwise_and(destination_portion.round().bool(), source_portion.round().bool()).float()
264
+ elif operation == "or":
265
+ output[:, top:bottom, left:right] = torch.bitwise_or(destination_portion.round().bool(), source_portion.round().bool()).float()
266
+ elif operation == "xor":
267
+ output[:, top:bottom, left:right] = torch.bitwise_xor(destination_portion.round().bool(), source_portion.round().bool()).float()
268
+
269
+ output = torch.clamp(output, 0.0, 1.0)
270
+
271
+ return (output,)
272
+
273
+ class FeatherMask:
274
+ @classmethod
275
+ def INPUT_TYPES(cls):
276
+ return {
277
+ "required": {
278
+ "mask": ("MASK",),
279
+ "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
280
+ "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
281
+ "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
282
+ "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
283
+ }
284
+ }
285
+
286
+ CATEGORY = "mask"
287
+
288
+ RETURN_TYPES = ("MASK",)
289
+
290
+ FUNCTION = "feather"
291
+
292
+ def feather(self, mask, left, top, right, bottom):
293
+ output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone()
294
+
295
+ left = min(left, output.shape[-1])
296
+ right = min(right, output.shape[-1])
297
+ top = min(top, output.shape[-2])
298
+ bottom = min(bottom, output.shape[-2])
299
+
300
+ for x in range(left):
301
+ feather_rate = (x + 1.0) / left
302
+ output[:, :, x] *= feather_rate
303
+
304
+ for x in range(right):
305
+ feather_rate = (x + 1) / right
306
+ output[:, :, -x] *= feather_rate
307
+
308
+ for y in range(top):
309
+ feather_rate = (y + 1) / top
310
+ output[:, y, :] *= feather_rate
311
+
312
+ for y in range(bottom):
313
+ feather_rate = (y + 1) / bottom
314
+ output[:, -y, :] *= feather_rate
315
+
316
+ return (output,)
317
+
318
+ class GrowMask:
319
+ @classmethod
320
+ def INPUT_TYPES(cls):
321
+ return {
322
+ "required": {
323
+ "mask": ("MASK",),
324
+ "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}),
325
+ "tapered_corners": ("BOOLEAN", {"default": True}),
326
+ },
327
+ }
328
+
329
+ CATEGORY = "mask"
330
+
331
+ RETURN_TYPES = ("MASK",)
332
+
333
+ FUNCTION = "expand_mask"
334
+
335
+ def expand_mask(self, mask, expand, tapered_corners):
336
+ c = 0 if tapered_corners else 1
337
+ kernel = np.array([[c, 1, c],
338
+ [1, 1, 1],
339
+ [c, 1, c]])
340
+ mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1]))
341
+ out = []
342
+ for m in mask:
343
+ output = m.numpy()
344
+ for _ in range(abs(expand)):
345
+ if expand < 0:
346
+ output = scipy.ndimage.grey_erosion(output, footprint=kernel)
347
+ else:
348
+ output = scipy.ndimage.grey_dilation(output, footprint=kernel)
349
+ output = torch.from_numpy(output)
350
+ out.append(output)
351
+ return (torch.stack(out, dim=0),)
352
+
353
+
354
+
355
+ # Original code and file from ComfyUI, https://github.com/comfyanonymous/ComfyUI
356
+ NODE_CLASS_MAPPINGS = {
357
+ "LatentCompositeMasked": LatentCompositeMasked,
358
+ "ImageCompositeMasked": ImageCompositeMasked,
359
+ "MaskToImage": MaskToImage,
360
+ "ImageToMask": ImageToMask,
361
+ "ImageColorToMask": ImageColorToMask,
362
+ "SolidMask": SolidMask,
363
+ "InvertMask": InvertMask,
364
+ "CropMask": CropMask,
365
+ "MaskComposite": MaskComposite,
366
+ "FeatherMask": FeatherMask,
367
+ "GrowMask": GrowMask,
368
+ }
369
+
370
+ NODE_DISPLAY_NAME_MAPPINGS = {
371
+ "ImageToMask": "Convert Image to Mask",
372
+ "MaskToImage": "Convert Mask to Image",
373
+ }