jaxmetaverse commited on
Commit
80bb2a3
·
verified ·
1 Parent(s): cf22903

Delete custom_nodes

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. custom_nodes/Comfy-WaveSpeed/.github/workflows/publish_action.yml +0 -20
  2. custom_nodes/Comfy-WaveSpeed/.gitignore +0 -162
  3. custom_nodes/Comfy-WaveSpeed/LICENSE +0 -21
  4. custom_nodes/Comfy-WaveSpeed/README.md +0 -104
  5. custom_nodes/Comfy-WaveSpeed/__init__.py +0 -53
  6. custom_nodes/Comfy-WaveSpeed/assets/flux_optimized.png +0 -3
  7. custom_nodes/Comfy-WaveSpeed/assets/flux_original.png +0 -3
  8. custom_nodes/Comfy-WaveSpeed/assets/usage_compile.png +0 -3
  9. custom_nodes/Comfy-WaveSpeed/assets/usage_fbcache.png +0 -0
  10. custom_nodes/Comfy-WaveSpeed/fbcache_nodes.py +0 -294
  11. custom_nodes/Comfy-WaveSpeed/first_block_cache.py +0 -841
  12. custom_nodes/Comfy-WaveSpeed/misc_nodes.py +0 -152
  13. custom_nodes/Comfy-WaveSpeed/patchers.py +0 -139
  14. custom_nodes/Comfy-WaveSpeed/pyproject.toml +0 -14
  15. custom_nodes/Comfy-WaveSpeed/utils.py +0 -127
  16. custom_nodes/Comfy-WaveSpeed/velocator_nodes.py +0 -413
  17. custom_nodes/Comfy-WaveSpeed/workflows/flux.json +0 -994
  18. custom_nodes/Comfy-WaveSpeed/workflows/flux_controlnet.json +0 -888
  19. custom_nodes/Comfy-WaveSpeed/workflows/hunyuan_video.json +0 -851
  20. custom_nodes/Comfy-WaveSpeed/workflows/ltxv.json +0 -723
  21. custom_nodes/Comfy-WaveSpeed/workflows/sd3.5.json +0 -657
  22. custom_nodes/Comfy-WaveSpeed/workflows/sdxl.json +0 -706
  23. custom_nodes/ComfyUI-3D-Pack/.gitattributes +0 -2
  24. custom_nodes/ComfyUI-3D-Pack/.github/FUNDING.yml +0 -13
  25. custom_nodes/ComfyUI-3D-Pack/.github/workflows/publish.yml +0 -21
  26. custom_nodes/ComfyUI-3D-Pack/.gitignore +0 -172
  27. custom_nodes/ComfyUI-3D-Pack/.vscode/settings.json +0 -8
  28. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CRM/Put Convolutional Reconstruction Model here.txt +0 -0
  29. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CRM_T2I_V3/Put CRM_T2I_V3 model here.txt +0 -0
  30. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/.gitattributes +0 -35
  31. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/2D_Stage/models/image_encoder/config.json +0 -23
  32. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/3D_Stage/models/base/README.md +0 -60
  33. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/3D_Stage/models/base/config.json +0 -24
  34. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/3D_Stage/models/base/preprocessor_config.json +0 -27
  35. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/Put CharacterGen Model here.txt +0 -0
  36. custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/README.md +0 -22
  37. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Craftsman/image-to-shape-diffusion/clip-mvrgb-modln-l256-e64-ne8-nd16-nl6-aligned-vae/Put Craftsman Model here.txt +0 -0
  38. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/JeffreyXiang/TRELLIS-image-large/.gitattributes +0 -35
  39. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/JeffreyXiang/TRELLIS-image-large/README.md +0 -16
  40. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/.gitattributes +0 -35
  41. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/README.md +0 -5
  42. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/feature_extractor/preprocessor_config.json +0 -44
  43. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/image_encoder/config.json +0 -23
  44. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/model_index.json +0 -31
  45. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/scheduler/scheduler_config.json +0 -20
  46. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/unet/config.json +0 -68
  47. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/vae/config.json +0 -34
  48. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2normal/feature_extractor/preprocessor_config.json +0 -44
  49. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2normal/image_encoder/config.json +0 -23
  50. custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2normal/model_index.json +0 -31
custom_nodes/Comfy-WaveSpeed/.github/workflows/publish_action.yml DELETED
@@ -1,20 +0,0 @@
1
- name: Publish to Comfy registry
2
- on:
3
- workflow_dispatch:
4
- push:
5
- branches:
6
- - main
7
- paths:
8
- - "pyproject.toml"
9
-
10
- jobs:
11
- publish-node:
12
- name: Publish Custom Node to registry
13
- runs-on: ubuntu-latest
14
- steps:
15
- - name: Check out code
16
- uses: actions/checkout@v4
17
- - name: Publish Custom Node
18
- uses: Comfy-Org/publish-node-action@main
19
- with:
20
- personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} ## Add your own personal access token to your Github Repository secrets and reference it here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/.gitignore DELETED
@@ -1,162 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # pdm
105
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
- #pdm.lock
107
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
- # in version control.
109
- # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
- .pdm.toml
111
- .pdm-python
112
- .pdm-build/
113
-
114
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
- __pypackages__/
116
-
117
- # Celery stuff
118
- celerybeat-schedule
119
- celerybeat.pid
120
-
121
- # SageMath parsed files
122
- *.sage.py
123
-
124
- # Environments
125
- .env
126
- .venv
127
- env/
128
- venv/
129
- ENV/
130
- env.bak/
131
- venv.bak/
132
-
133
- # Spyder project settings
134
- .spyderproject
135
- .spyproject
136
-
137
- # Rope project settings
138
- .ropeproject
139
-
140
- # mkdocs documentation
141
- /site
142
-
143
- # mypy
144
- .mypy_cache/
145
- .dmypy.json
146
- dmypy.json
147
-
148
- # Pyre type checker
149
- .pyre/
150
-
151
- # pytype static type analyzer
152
- .pytype/
153
-
154
- # Cython debug symbols
155
- cython_debug/
156
-
157
- # PyCharm
158
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
- # and can be added to the global gitignore or merged into this file. For a more nuclear
161
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
- #.idea/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/LICENSE DELETED
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2024 C
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/README.md DELETED
@@ -1,104 +0,0 @@
1
- # Comfy-WaveSpeed
2
-
3
- [WIP] The all in one inference optimization solution for ComfyUI, universal, flexible, and fast.
4
-
5
- - [x] [Dynamic Caching (First Block Cache)](https://github.com/chengzeyi/ParaAttention?tab=readme-ov-file#first-block-cache-our-dynamic-caching)
6
- - [x] Enhanced `torch.compile`
7
-
8
- More to come...
9
-
10
- - [ ] Multi-GPU Inference (ComfyUI version of [ParaAttention's Context Parallelism](https://github.com/chengzeyi/ParaAttention?tab=readme-ov-file#context-parallelism))
11
-
12
- | FLUX.1-dev Original | FLUX.1-dev with First Block Cache and Compilation |
13
- | - | - |
14
- | ![FLUX.1-dev Original](./assets/flux_original.png) | ![FLUX.1-dev with First Block Cache and Compilation](./assets/flux_optimized.png) |
15
-
16
- This is just launched, and we are working on it. Please stay tuned.
17
- For any request or question, please join the Discord server.
18
-
19
- [Discord Server](https://discord.gg/xtk6jUtYtr)
20
-
21
- [Comfy Registry](https://registry.comfy.org/nodes/wavespeed)
22
-
23
- # Installation
24
-
25
- ```bash
26
- cd custom_nodes
27
- git clone https://github.com/chengzeyi/Comfy-WaveSpeed.git
28
- ```
29
-
30
- # Usage
31
-
32
- ## Demo Workflows
33
-
34
- You can find demo workflows in the `workflows` folder.
35
-
36
- | Workflow | Path |
37
- | - | - |
38
- | FLUX.1-dev with First Block Cache and Compilation | [workflows/flux.json](./workflows/flux.json)
39
- | FLUX.1-dev ControlNet with First Block Cache and Compilation | [workflows/flux_controlnet.json](./workflows/flux_controlnet.json)
40
- | LTXV with First Block Cache and Compilation | [workflows/ltxv.json](./workflows/ltxv.json)
41
- | HunyuanVideo with First Block Cache | [workflows/hunyuan_video.json](./workflows/hunyuan_video.json)
42
- | SD3.5 with First Block Cache and Compilation | [workflows/sd3.5.json](./workflows/sd3.5.json)
43
- | SDXL with First Block Cache | [workflows/sdxl.json](./workflows/sdxl.json)
44
-
45
- **NOTE**: The `Compile Model+` node requires your computation to meet some software and hardware requirements, please refer to the [Enhanced `torch.compile`](#enhanced-torchcompile) section for more information.
46
- If you have problems with the compilation node, you can remove it from the workflow and only use the `Apply First Block Cache` node.
47
- The `Apply First Block Cache` node can still bring you a significant speedup.
48
-
49
- ## Dynamic Caching ([First Block Cache](https://github.com/chengzeyi/ParaAttention?tab=readme-ov-file#first-block-cache-our-dynamic-caching))
50
-
51
- Inspired by TeaCache and other denoising caching algorithms, we introduce [First Block Cache (FBCache)](https://github.com/chengzeyi/ParaAttention?tab=readme-ov-file#first-block-cache-our-dynamic-caching) to use the residual output of the first transformer block as the cache indicator.
52
- If the difference between the current and the previous residual output of the first transformer block is small enough, we can reuse the previous final residual output and skip the computation of all the following transformer blocks.
53
- This can significantly reduce the computation cost of the model, achieving a speedup of up to 2x while maintaining high accuracy.
54
-
55
- To use first block cache, simply add the `wavespeed->Apply First Block Cache` node to your workflow after your `Load Diffusion Model` node and adjust the `residual_diff_threashold` value to a suitable value for your model, for example: `0.12` for `flux-dev.safetensors` with `fp8_e4m3fn_fast` and 28 steps.
56
- It is expected to see a speedup of 1.5x to 3.0x with acceptable accuracy loss.
57
-
58
- It supports many models like `FLUX`, `LTXV (native and non-native)`, `HunyuanVideo (native)`, `SD3.5` and `SDXL`, feel free to try it out and let us know if you have any issues!
59
-
60
- Some configurations for different models that you can try:
61
-
62
- | Model | Steps | `residual_diff_threashold` |
63
- | - | - | - |
64
- | `flux-dev.safetensors` with `fp8_e4m3fn_fast` | 28 | 0.12 |
65
- | `ltx-video-2b-v0.9.1.safetensors` | 30 | 0.1 |
66
- | `hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors` | 20 | 0.1 |
67
- | `sd3.5_large_fp8_scaled.safetensors` | 30 | 0.12 |
68
- | `sd_xl_base_1.0.safetensors` | 25 | 0.2 |
69
-
70
- **NOTE**: SDXL First Block Cache is incompatible with the [FreeU Advanced](https://github.com/WASasquatch/FreeU_Advanced) node pack and will not function properly if it is installed and enabled.
71
-
72
- See [Apply First Block Cache on FLUX.1-dev](https://github.com/chengzeyi/ParaAttention/blob/main/doc/fastest_flux.md#apply-first-block-cache-on-flux1-dev) for more information and detailed comparison on quality and speed.
73
-
74
- ![Usage of First Block Cache](./assets/usage_fbcache.png)
75
-
76
- ## Enhanced `torch.compile`
77
-
78
- To use the Enhanced `torch.compile`, simply add the `wavespeed->Compile Model+` node to your workflow after your `Load Diffusion Model` node or `Apply First Block Cache` node.
79
- The compilation process happens the first time you run the workflow, and it takes quite a long time, but it will be cached for future runs.
80
- You can pass different `mode` values to make it runs faster, for example `max-autotune` or `max-autotune-no-cudagraphs`.
81
- One of the advantages of this node over the original `TorchCompileModel` node is that it works with LoRA.
82
-
83
- It is suggested to pass `--gpu-only` when launching your `ComfyUI` if you are using this node, for example, if you are using `comfy-cli`:
84
-
85
- ```bash
86
- comfy launch -- --gpu-only
87
- ```
88
-
89
- If you encounter any problems with frequent compilation after changing the resolution or text prompt, you could try enabling the `dynamic` option of the `Compile Model+` node.
90
- Or you could launch your `ComfyUI` with environment variable `TORCH_LOGS=recompiles_verbose` to debug the reason for recompilation.
91
-
92
- **NOTE**: `torch.compile` might not be able to work with model offloading well, you could try passing `--gpu-only` when launching your `ComfyUI` to disable model offloading.
93
-
94
- **NOTE**: `torch.compile` does not work on Windows offcially, you should not use this node if you are facing this problem, or search on Google to find out how to make it work.
95
-
96
- **NOTE**: Compiling a model with FP8 quantization does not work on pre-Ada GPUs like RTX 3090, you should try using FP16/BF16 models or removing the compilation node.
97
-
98
- ![Usage of Enhanced `torch.compile`](./assets/usage_compile.png)
99
-
100
- # Others
101
-
102
- ## Use with `diffusers`
103
-
104
- Please refer to [ParaAttention](https://github.com/chengzeyi/ParaAttention) for more information.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/__init__.py DELETED
@@ -1,53 +0,0 @@
1
- from .fbcache_nodes import ApplyFBCacheOnModel
2
- from .misc_nodes import (
3
- EnhancedLoadDiffusionModel,
4
- EnhancedCompileModel,
5
- )
6
- from .velocator_nodes import (
7
- VelocatorCompileModel,
8
- VelocatorLoadAndQuantizeClip,
9
- VelocatorLoadAndQuantizeDiffusionModel,
10
- VelocatorQuantizeModel,
11
- )
12
-
13
-
14
- def patch_cast_to():
15
- def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False):
16
- if device is None or weight.device == device:
17
- if not copy:
18
- if dtype is None or weight.dtype == dtype:
19
- return weight
20
- return weight.to(dtype=dtype, copy=copy)
21
-
22
- # torch.empty_like does not work with tensor subclasses well
23
- # r = torch.empty_like(weight, dtype=dtype, device=device)
24
- # r.copy_(weight, non_blocking=non_blocking)
25
- r = weight.to(device=device, dtype=dtype, non_blocking=non_blocking, copy=copy)
26
- return r
27
-
28
- from comfy import model_management
29
-
30
- model_management.cast_to = cast_to
31
-
32
-
33
- patch_cast_to()
34
-
35
- NODE_CLASS_MAPPINGS = {
36
- "ApplyFBCacheOnModel": ApplyFBCacheOnModel,
37
- "EnhancedLoadDiffusionModel": EnhancedLoadDiffusionModel,
38
- "EnhancedCompileModel": EnhancedCompileModel,
39
- "VelocatorLoadAndQuantizeDiffusionModel": VelocatorLoadAndQuantizeDiffusionModel,
40
- "VelocatorLoadAndQuantizeClip": VelocatorLoadAndQuantizeClip,
41
- "VelocatorQuantizeModel": VelocatorQuantizeModel,
42
- "VelocatorCompileModel": VelocatorCompileModel,
43
- }
44
-
45
- NODE_DISPLAY_NAME_MAPPINGS = {
46
- "ApplyFBCacheOnModel": "Apply First Block Cache",
47
- "EnhancedLoadDiffusionModel": "Load Diffusion Model+",
48
- "EnhancedCompileModel": "Compile Model+",
49
- "VelocatorLoadAndQuantizeDiffusionModel": "🚀Load & Quantize Diffusion Model",
50
- "VelocatorLoadAndQuantizeClip": "🚀Load & Quantize CLIP",
51
- "VelocatorQuantizeModel": "🚀Quantize Model",
52
- "VelocatorCompileModel": "🚀Compile Model",
53
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/assets/flux_optimized.png DELETED

Git LFS Details

  • SHA256: 8d57901bf60abda7ac005e95ca1d7177226a4f752241b644e9917cfab110978f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.75 MB
custom_nodes/Comfy-WaveSpeed/assets/flux_original.png DELETED

Git LFS Details

  • SHA256: 39fa61ea7b1c5482ed3cee7184c4f12040516f3f9dc131e1554921e76ecfe0bd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.82 MB
custom_nodes/Comfy-WaveSpeed/assets/usage_compile.png DELETED

Git LFS Details

  • SHA256: 590808a5429b79740b9232041a1a588a851a6c1d496d45ad7f8695768093af8e
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
custom_nodes/Comfy-WaveSpeed/assets/usage_fbcache.png DELETED
Binary file (40 kB)
 
custom_nodes/Comfy-WaveSpeed/fbcache_nodes.py DELETED
@@ -1,294 +0,0 @@
1
- import contextlib
2
- import unittest
3
- import torch
4
-
5
- from comfy import model_management
6
-
7
- from . import first_block_cache
8
-
9
-
10
- class ApplyFBCacheOnModel:
11
-
12
- @classmethod
13
- def INPUT_TYPES(s):
14
- return {
15
- "required": {
16
- "model": ("MODEL", ),
17
- "object_to_patch": (
18
- "STRING",
19
- {
20
- "default": "diffusion_model",
21
- },
22
- ),
23
- "residual_diff_threshold": (
24
- "FLOAT",
25
- {
26
- "default":
27
- 0.0,
28
- "min":
29
- 0.0,
30
- "max":
31
- 1.0,
32
- "step":
33
- 0.001,
34
- "tooltip":
35
- "Controls the tolerance for caching with lower values being more strict. Setting this to 0 disables the FBCache effect.",
36
- },
37
- ),
38
- "start": (
39
- "FLOAT",
40
- {
41
- "default":
42
- 0.0,
43
- "step":
44
- 0.01,
45
- "max":
46
- 1.0,
47
- "min":
48
- 0.0,
49
- "tooltip":
50
- "Start time as a percentage of sampling where the FBCache effect can apply. Example: 0.0 would signify 0% (the beginning of sampling), 0.5 would signify 50%.",
51
- },
52
- ),
53
- "end": ("FLOAT", {
54
- "default":
55
- 1.0,
56
- "step":
57
- 0.01,
58
- "max":
59
- 1.0,
60
- "min":
61
- 0.0,
62
- "tooltip":
63
- "End time as a percentage of sampling where the FBCache effect can apply. Example: 1.0 would signify 100% (the end of sampling), 0.5 would signify 50%.",
64
- }),
65
- "max_consecutive_cache_hits": (
66
- "INT",
67
- {
68
- "default":
69
- -1,
70
- "min":
71
- -1,
72
- "tooltip":
73
- "Allows limiting how many cached results can be used in a row. For example, setting this to 1 will mean there will be at least one full model call after each cached result. Set to 0 to disable FBCache effect, or -1 to allow unlimited consecutive cache hits.",
74
- },
75
- ),
76
- }
77
- }
78
-
79
- RETURN_TYPES = ("MODEL", )
80
- FUNCTION = "patch"
81
-
82
- CATEGORY = "wavespeed"
83
-
84
- def patch(
85
- self,
86
- model,
87
- object_to_patch,
88
- residual_diff_threshold,
89
- max_consecutive_cache_hits=-1,
90
- start=0.0,
91
- end=1.0,
92
- ):
93
- if residual_diff_threshold <= 0.0 or max_consecutive_cache_hits == 0:
94
- return (model, )
95
-
96
- first_block_cache.patch_get_output_data()
97
-
98
- using_validation = max_consecutive_cache_hits >= 0 or start > 0 or end < 1
99
- if using_validation:
100
- model_sampling = model.get_model_object("model_sampling")
101
- start_sigma, end_sigma = (float(
102
- model_sampling.percent_to_sigma(pct)) for pct in (start, end))
103
- del model_sampling
104
-
105
- @torch.compiler.disable()
106
- def validate_use_cache(use_cached):
107
- nonlocal consecutive_cache_hits
108
- use_cached = use_cached and end_sigma <= current_timestep <= start_sigma
109
- use_cached = use_cached and (max_consecutive_cache_hits < 0
110
- or consecutive_cache_hits
111
- < max_consecutive_cache_hits)
112
- consecutive_cache_hits = consecutive_cache_hits + 1 if use_cached else 0
113
- return use_cached
114
- else:
115
- validate_use_cache = None
116
-
117
- prev_timestep = None
118
- prev_input_state = None
119
- current_timestep = None
120
- consecutive_cache_hits = 0
121
-
122
- def reset_cache_state():
123
- # Resets the cache state and hits/time tracking variables.
124
- nonlocal prev_input_state, prev_timestep, consecutive_cache_hits
125
- prev_input_state = prev_timestep = None
126
- consecutive_cache_hits = 0
127
- first_block_cache.set_current_cache_context(
128
- first_block_cache.create_cache_context())
129
-
130
- def ensure_cache_state(model_input: torch.Tensor, timestep: float):
131
- # Validates the current cache state and hits/time tracking variables
132
- # and triggers a reset if necessary. Also updates current_timestep and
133
- # maintains the cache context sequence number.
134
- nonlocal current_timestep
135
- input_state = (model_input.shape, model_input.dtype, model_input.device)
136
- cache_context = first_block_cache.get_current_cache_context()
137
- # We reset when:
138
- need_reset = (
139
- # The previous timestep or input state is not set
140
- prev_timestep is None or
141
- prev_input_state is None or
142
- # Or dtype/device have changed
143
- prev_input_state[1:] != input_state[1:] or
144
- # Or the input state after the batch dimension has changed
145
- prev_input_state[0][1:] != input_state[0][1:] or
146
- # Or there is no cache context (in this case reset is just making a context)
147
- cache_context is None or
148
- # Or the current timestep is higher than the previous one
149
- timestep > prev_timestep
150
- )
151
- if need_reset:
152
- reset_cache_state()
153
- elif timestep == prev_timestep:
154
- # When the current timestep is the same as the previous, we assume ComfyUI has split up
155
- # the model evaluation into multiple chunks. In this case, we increment the sequence number.
156
- # Note: No need to check if cache_context is None for these branches as need_reset would be True
157
- # if so.
158
- cache_context.sequence_num += 1
159
- elif timestep < prev_timestep:
160
- # When the timestep is less than the previous one, we can reset the context sequence number
161
- cache_context.sequence_num = 0
162
- current_timestep = timestep
163
-
164
- def update_cache_state(model_input: torch.Tensor, timestep: float):
165
- # Updates the previous timestep and input state validation variables.
166
- nonlocal prev_timestep, prev_input_state
167
- prev_timestep = timestep
168
- prev_input_state = (model_input.shape, model_input.dtype, model_input.device)
169
-
170
- model = model.clone()
171
- diffusion_model = model.get_model_object(object_to_patch)
172
-
173
- if diffusion_model.__class__.__name__ in ("UNetModel", "Flux"):
174
-
175
- if diffusion_model.__class__.__name__ == "UNetModel":
176
- create_patch_function = first_block_cache.create_patch_unet_model__forward
177
- elif diffusion_model.__class__.__name__ == "Flux":
178
- create_patch_function = first_block_cache.create_patch_flux_forward_orig
179
- else:
180
- raise ValueError(
181
- f"Unsupported model {diffusion_model.__class__.__name__}")
182
-
183
- patch_forward = create_patch_function(
184
- diffusion_model,
185
- residual_diff_threshold=residual_diff_threshold,
186
- validate_can_use_cache_function=validate_use_cache,
187
- )
188
-
189
- def model_unet_function_wrapper(model_function, kwargs):
190
- try:
191
- input = kwargs["input"]
192
- timestep = kwargs["timestep"]
193
- c = kwargs["c"]
194
- t = timestep[0].item()
195
-
196
- ensure_cache_state(input, t)
197
-
198
- with patch_forward():
199
- result = model_function(input, timestep, **c)
200
- update_cache_state(input, t)
201
- return result
202
- except Exception as exc:
203
- reset_cache_state()
204
- raise exc from None
205
- else:
206
- is_non_native_ltxv = False
207
- if diffusion_model.__class__.__name__ == "LTXVTransformer3D":
208
- is_non_native_ltxv = True
209
- diffusion_model = diffusion_model.transformer
210
-
211
- double_blocks_name = None
212
- single_blocks_name = None
213
- if hasattr(diffusion_model, "transformer_blocks"):
214
- double_blocks_name = "transformer_blocks"
215
- elif hasattr(diffusion_model, "double_blocks"):
216
- double_blocks_name = "double_blocks"
217
- elif hasattr(diffusion_model, "joint_blocks"):
218
- double_blocks_name = "joint_blocks"
219
- else:
220
- raise ValueError(
221
- f"No double blocks found for {diffusion_model.__class__.__name__}"
222
- )
223
-
224
- if hasattr(diffusion_model, "single_blocks"):
225
- single_blocks_name = "single_blocks"
226
-
227
- if is_non_native_ltxv:
228
- original_create_skip_layer_mask = getattr(
229
- diffusion_model, "create_skip_layer_mask", None)
230
- if original_create_skip_layer_mask is not None:
231
- # original_double_blocks = getattr(diffusion_model,
232
- # double_blocks_name)
233
-
234
- def new_create_skip_layer_mask(self, *args, **kwargs):
235
- # with unittest.mock.patch.object(self, double_blocks_name,
236
- # original_double_blocks):
237
- # return original_create_skip_layer_mask(*args, **kwargs)
238
- # return original_create_skip_layer_mask(*args, **kwargs)
239
- raise RuntimeError(
240
- "STG is not supported with FBCache yet")
241
-
242
- diffusion_model.create_skip_layer_mask = new_create_skip_layer_mask.__get__(
243
- diffusion_model)
244
-
245
- cached_transformer_blocks = torch.nn.ModuleList([
246
- first_block_cache.CachedTransformerBlocks(
247
- None if double_blocks_name is None else getattr(
248
- diffusion_model, double_blocks_name),
249
- None if single_blocks_name is None else getattr(
250
- diffusion_model, single_blocks_name),
251
- residual_diff_threshold=residual_diff_threshold,
252
- validate_can_use_cache_function=validate_use_cache,
253
- cat_hidden_states_first=diffusion_model.__class__.__name__
254
- == "HunyuanVideo",
255
- return_hidden_states_only=diffusion_model.__class__.
256
- __name__ == "LTXVModel" or is_non_native_ltxv,
257
- clone_original_hidden_states=diffusion_model.__class__.
258
- __name__ == "LTXVModel",
259
- return_hidden_states_first=diffusion_model.__class__.
260
- __name__ != "OpenAISignatureMMDITWrapper",
261
- accept_hidden_states_first=diffusion_model.__class__.
262
- __name__ != "OpenAISignatureMMDITWrapper",
263
- )
264
- ])
265
- dummy_single_transformer_blocks = torch.nn.ModuleList()
266
-
267
- def model_unet_function_wrapper(model_function, kwargs):
268
- try:
269
- input = kwargs["input"]
270
- timestep = kwargs["timestep"]
271
- c = kwargs["c"]
272
- t = timestep[0].item()
273
-
274
- ensure_cache_state(input, t)
275
-
276
- with unittest.mock.patch.object(
277
- diffusion_model,
278
- double_blocks_name,
279
- cached_transformer_blocks,
280
- ), unittest.mock.patch.object(
281
- diffusion_model,
282
- single_blocks_name,
283
- dummy_single_transformer_blocks,
284
- ) if single_blocks_name is not None else contextlib.nullcontext(
285
- ):
286
- result = model_function(input, timestep, **c)
287
- update_cache_state(input, t)
288
- return result
289
- except Exception as exc:
290
- reset_cache_state()
291
- raise exc from None
292
-
293
- model.set_model_unet_function_wrapper(model_unet_function_wrapper)
294
- return (model, )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/first_block_cache.py DELETED
@@ -1,841 +0,0 @@
1
- import contextlib
2
- import dataclasses
3
- import unittest
4
- from collections import defaultdict
5
- from typing import DefaultDict, Dict
6
-
7
- import torch
8
-
9
-
10
- @dataclasses.dataclass
11
- class CacheContext:
12
- buffers: Dict[str, list] = dataclasses.field(default_factory=dict)
13
- incremental_name_counters: DefaultDict[str, int] = dataclasses.field(
14
- default_factory=lambda: defaultdict(int))
15
- sequence_num: int = 0
16
- use_cache: bool = False
17
-
18
- def get_incremental_name(self, name=None):
19
- if name is None:
20
- name = "default"
21
- idx = self.incremental_name_counters[name]
22
- self.incremental_name_counters[name] += 1
23
- return f"{name}_{idx}"
24
-
25
- def reset_incremental_names(self):
26
- self.incremental_name_counters.clear()
27
-
28
- @torch.compiler.disable()
29
- def get_buffer(self, name):
30
- item = self.buffers.get(name)
31
- if item is None or self.sequence_num >= len(item):
32
- return None
33
- return item[self.sequence_num]
34
-
35
- @torch.compiler.disable()
36
- def set_buffer(self, name, buffer):
37
- curr_item = self.buffers.get(name)
38
- if curr_item is None:
39
- curr_item = []
40
- self.buffers[name] = curr_item
41
- curr_item += [None] * (self.sequence_num - len(curr_item) + 1)
42
- curr_item[self.sequence_num] = buffer
43
-
44
- def clear_buffers(self):
45
- self.sequence_num = 0
46
- self.buffers.clear()
47
-
48
-
49
- @torch.compiler.disable()
50
- def get_buffer(name):
51
- cache_context = get_current_cache_context()
52
- assert cache_context is not None, "cache_context must be set before"
53
- return cache_context.get_buffer(name)
54
-
55
-
56
- @torch.compiler.disable()
57
- def set_buffer(name, buffer):
58
- cache_context = get_current_cache_context()
59
- assert cache_context is not None, "cache_context must be set before"
60
- cache_context.set_buffer(name, buffer)
61
-
62
-
63
- _current_cache_context = None
64
-
65
-
66
- def create_cache_context():
67
- return CacheContext()
68
-
69
-
70
- def get_current_cache_context():
71
- return _current_cache_context
72
-
73
-
74
- def set_current_cache_context(cache_context=None):
75
- global _current_cache_context
76
- _current_cache_context = cache_context
77
-
78
-
79
- @contextlib.contextmanager
80
- def cache_context(cache_context):
81
- global _current_cache_context
82
- old_cache_context = _current_cache_context
83
- _current_cache_context = cache_context
84
- try:
85
- yield
86
- finally:
87
- _current_cache_context = old_cache_context
88
-
89
-
90
- def patch_get_output_data():
91
- import execution
92
-
93
- get_output_data = getattr(execution, "get_output_data", None)
94
- if get_output_data is None:
95
- return
96
-
97
- if getattr(get_output_data, "_patched", False):
98
- return
99
-
100
- def new_get_output_data(*args, **kwargs):
101
- out = get_output_data(*args, **kwargs)
102
- cache_context = get_current_cache_context()
103
- if cache_context is not None:
104
- cache_context.clear_buffers()
105
- set_current_cache_context(None)
106
- return out
107
-
108
- new_get_output_data._patched = True
109
- execution.get_output_data = new_get_output_data
110
-
111
-
112
- @torch.compiler.disable()
113
- def are_two_tensors_similar(t1, t2, *, threshold, only_shape=False):
114
- if t1.shape != t2.shape:
115
- return False
116
- elif only_shape:
117
- return True
118
- mean_diff = (t1 - t2).abs().mean()
119
- mean_t1 = t1.abs().mean()
120
- diff = mean_diff / mean_t1
121
- return diff.item() < threshold
122
-
123
-
124
- @torch.compiler.disable()
125
- def apply_prev_hidden_states_residual(hidden_states,
126
- encoder_hidden_states=None):
127
- hidden_states_residual = get_buffer("hidden_states_residual")
128
- assert hidden_states_residual is not None, "hidden_states_residual must be set before"
129
- hidden_states = hidden_states_residual + hidden_states
130
- hidden_states = hidden_states.contiguous()
131
-
132
- if encoder_hidden_states is None:
133
- return hidden_states
134
-
135
- encoder_hidden_states_residual = get_buffer(
136
- "encoder_hidden_states_residual")
137
- if encoder_hidden_states_residual is None:
138
- encoder_hidden_states = None
139
- else:
140
- encoder_hidden_states = encoder_hidden_states_residual + encoder_hidden_states
141
- encoder_hidden_states = encoder_hidden_states.contiguous()
142
-
143
- return hidden_states, encoder_hidden_states
144
-
145
-
146
- @torch.compiler.disable()
147
- def get_can_use_cache(first_hidden_states_residual,
148
- threshold,
149
- parallelized=False,
150
- validation_function=None):
151
- prev_first_hidden_states_residual = get_buffer(
152
- "first_hidden_states_residual")
153
- cache_context = get_current_cache_context()
154
- if cache_context is None or prev_first_hidden_states_residual is None:
155
- return False
156
- can_use_cache = are_two_tensors_similar(
157
- prev_first_hidden_states_residual,
158
- first_hidden_states_residual,
159
- threshold=threshold,
160
- only_shape=cache_context.sequence_num > 0,
161
- )
162
- if cache_context.sequence_num > 0:
163
- cache_context.use_cache &= can_use_cache
164
- else:
165
- if validation_function is not None:
166
- can_use_cache = validation_function(can_use_cache)
167
- cache_context.use_cache = can_use_cache
168
- return cache_context.use_cache
169
-
170
-
171
- class CachedTransformerBlocks(torch.nn.Module):
172
-
173
- def __init__(
174
- self,
175
- transformer_blocks,
176
- single_transformer_blocks=None,
177
- *,
178
- residual_diff_threshold,
179
- validate_can_use_cache_function=None,
180
- return_hidden_states_first=True,
181
- accept_hidden_states_first=True,
182
- cat_hidden_states_first=False,
183
- return_hidden_states_only=False,
184
- clone_original_hidden_states=False,
185
- ):
186
- super().__init__()
187
- self.transformer_blocks = transformer_blocks
188
- self.single_transformer_blocks = single_transformer_blocks
189
- self.residual_diff_threshold = residual_diff_threshold
190
- self.validate_can_use_cache_function = validate_can_use_cache_function
191
- self.return_hidden_states_first = return_hidden_states_first
192
- self.accept_hidden_states_first = accept_hidden_states_first
193
- self.cat_hidden_states_first = cat_hidden_states_first
194
- self.return_hidden_states_only = return_hidden_states_only
195
- self.clone_original_hidden_states = clone_original_hidden_states
196
-
197
- def forward(self, *args, **kwargs):
198
- img_arg_name = None
199
- if "img" in kwargs:
200
- img_arg_name = "img"
201
- elif "hidden_states" in kwargs:
202
- img_arg_name = "hidden_states"
203
- txt_arg_name = None
204
- if "txt" in kwargs:
205
- txt_arg_name = "txt"
206
- elif "context" in kwargs:
207
- txt_arg_name = "context"
208
- elif "encoder_hidden_states" in kwargs:
209
- txt_arg_name = "encoder_hidden_states"
210
- if self.accept_hidden_states_first:
211
- if args:
212
- img = args[0]
213
- args = args[1:]
214
- else:
215
- img = kwargs.pop(img_arg_name)
216
- if args:
217
- txt = args[0]
218
- args = args[1:]
219
- else:
220
- txt = kwargs.pop(txt_arg_name)
221
- else:
222
- if args:
223
- txt = args[0]
224
- args = args[1:]
225
- else:
226
- txt = kwargs.pop(txt_arg_name)
227
- if args:
228
- img = args[0]
229
- args = args[1:]
230
- else:
231
- img = kwargs.pop(img_arg_name)
232
- hidden_states = img
233
- encoder_hidden_states = txt
234
- if self.residual_diff_threshold <= 0.0:
235
- for block in self.transformer_blocks:
236
- if txt_arg_name == "encoder_hidden_states":
237
- hidden_states = block(
238
- hidden_states,
239
- *args,
240
- encoder_hidden_states=encoder_hidden_states,
241
- **kwargs)
242
- else:
243
- if self.accept_hidden_states_first:
244
- hidden_states = block(hidden_states,
245
- encoder_hidden_states, *args,
246
- **kwargs)
247
- else:
248
- hidden_states = block(encoder_hidden_states,
249
- hidden_states, *args, **kwargs)
250
- if not self.return_hidden_states_only:
251
- hidden_states, encoder_hidden_states = hidden_states
252
- if not self.return_hidden_states_first:
253
- hidden_states, encoder_hidden_states = encoder_hidden_states, hidden_states
254
- if self.single_transformer_blocks is not None:
255
- hidden_states = torch.cat(
256
- [hidden_states, encoder_hidden_states]
257
- if self.cat_hidden_states_first else
258
- [encoder_hidden_states, hidden_states],
259
- dim=1)
260
- for block in self.single_transformer_blocks:
261
- hidden_states = block(hidden_states, *args, **kwargs)
262
- hidden_states = hidden_states[:,
263
- encoder_hidden_states.shape[1]:]
264
- if self.return_hidden_states_only:
265
- return hidden_states
266
- else:
267
- return ((hidden_states, encoder_hidden_states)
268
- if self.return_hidden_states_first else
269
- (encoder_hidden_states, hidden_states))
270
-
271
- original_hidden_states = hidden_states
272
- if self.clone_original_hidden_states:
273
- original_hidden_states = original_hidden_states.clone()
274
- first_transformer_block = self.transformer_blocks[0]
275
- if txt_arg_name == "encoder_hidden_states":
276
- hidden_states = first_transformer_block(
277
- hidden_states,
278
- *args,
279
- encoder_hidden_states=encoder_hidden_states,
280
- **kwargs)
281
- else:
282
- if self.accept_hidden_states_first:
283
- hidden_states = first_transformer_block(
284
- hidden_states, encoder_hidden_states, *args, **kwargs)
285
- else:
286
- hidden_states = first_transformer_block(
287
- encoder_hidden_states, hidden_states, *args, **kwargs)
288
- if not self.return_hidden_states_only:
289
- hidden_states, encoder_hidden_states = hidden_states
290
- if not self.return_hidden_states_first:
291
- hidden_states, encoder_hidden_states = encoder_hidden_states, hidden_states
292
- first_hidden_states_residual = hidden_states - original_hidden_states
293
- del original_hidden_states
294
-
295
- can_use_cache = get_can_use_cache(
296
- first_hidden_states_residual,
297
- threshold=self.residual_diff_threshold,
298
- validation_function=self.validate_can_use_cache_function,
299
- )
300
-
301
- torch._dynamo.graph_break()
302
- if can_use_cache:
303
- del first_hidden_states_residual
304
- hidden_states, encoder_hidden_states = apply_prev_hidden_states_residual(
305
- hidden_states, encoder_hidden_states)
306
- else:
307
- set_buffer("first_hidden_states_residual",
308
- first_hidden_states_residual)
309
- del first_hidden_states_residual
310
- (
311
- hidden_states,
312
- encoder_hidden_states,
313
- hidden_states_residual,
314
- encoder_hidden_states_residual,
315
- ) = self.call_remaining_transformer_blocks(
316
- hidden_states,
317
- encoder_hidden_states,
318
- *args,
319
- txt_arg_name=txt_arg_name,
320
- **kwargs)
321
- set_buffer("hidden_states_residual", hidden_states_residual)
322
- if encoder_hidden_states_residual is not None:
323
- set_buffer("encoder_hidden_states_residual",
324
- encoder_hidden_states_residual)
325
- torch._dynamo.graph_break()
326
-
327
- if self.return_hidden_states_only:
328
- return hidden_states
329
- else:
330
- return ((hidden_states, encoder_hidden_states)
331
- if self.return_hidden_states_first else
332
- (encoder_hidden_states, hidden_states))
333
-
334
- def call_remaining_transformer_blocks(self,
335
- hidden_states,
336
- encoder_hidden_states,
337
- *args,
338
- txt_arg_name=None,
339
- **kwargs):
340
- original_hidden_states = hidden_states
341
- original_encoder_hidden_states = encoder_hidden_states
342
- if self.clone_original_hidden_states:
343
- original_hidden_states = original_hidden_states.clone()
344
- original_encoder_hidden_states = original_encoder_hidden_states.clone(
345
- )
346
- for block in self.transformer_blocks[1:]:
347
- if txt_arg_name == "encoder_hidden_states":
348
- hidden_states = block(
349
- hidden_states,
350
- *args,
351
- encoder_hidden_states=encoder_hidden_states,
352
- **kwargs)
353
- else:
354
- if self.accept_hidden_states_first:
355
- hidden_states = block(hidden_states, encoder_hidden_states,
356
- *args, **kwargs)
357
- else:
358
- hidden_states = block(encoder_hidden_states, hidden_states,
359
- *args, **kwargs)
360
- if not self.return_hidden_states_only:
361
- hidden_states, encoder_hidden_states = hidden_states
362
- if not self.return_hidden_states_first:
363
- hidden_states, encoder_hidden_states = encoder_hidden_states, hidden_states
364
- if self.single_transformer_blocks is not None:
365
- hidden_states = torch.cat([hidden_states, encoder_hidden_states]
366
- if self.cat_hidden_states_first else
367
- [encoder_hidden_states, hidden_states],
368
- dim=1)
369
- for block in self.single_transformer_blocks:
370
- hidden_states = block(hidden_states, *args, **kwargs)
371
- if self.cat_hidden_states_first:
372
- hidden_states, encoder_hidden_states = hidden_states.split(
373
- [
374
- hidden_states.shape[1] -
375
- encoder_hidden_states.shape[1],
376
- encoder_hidden_states.shape[1]
377
- ],
378
- dim=1)
379
- else:
380
- encoder_hidden_states, hidden_states = hidden_states.split(
381
- [
382
- encoder_hidden_states.shape[1],
383
- hidden_states.shape[1] - encoder_hidden_states.shape[1]
384
- ],
385
- dim=1)
386
-
387
- hidden_states_shape = hidden_states.shape
388
- hidden_states = hidden_states.flatten().contiguous().reshape(
389
- hidden_states_shape)
390
-
391
- if encoder_hidden_states is not None:
392
- encoder_hidden_states_shape = encoder_hidden_states.shape
393
- encoder_hidden_states = encoder_hidden_states.flatten().contiguous(
394
- ).reshape(encoder_hidden_states_shape)
395
-
396
- hidden_states_residual = hidden_states - original_hidden_states
397
- if encoder_hidden_states is None:
398
- encoder_hidden_states_residual = None
399
- else:
400
- encoder_hidden_states_residual = encoder_hidden_states - original_encoder_hidden_states
401
- return hidden_states, encoder_hidden_states, hidden_states_residual, encoder_hidden_states_residual
402
-
403
-
404
- # Based on 90f349f93df3083a507854d7fc7c3e1bb9014e24
405
- def create_patch_unet_model__forward(model,
406
- *,
407
- residual_diff_threshold,
408
- validate_can_use_cache_function=None):
409
- from comfy.ldm.modules.diffusionmodules.openaimodel import timestep_embedding, forward_timestep_embed, apply_control
410
-
411
- def call_remaining_blocks(self, transformer_options, control,
412
- transformer_patches, hs, h, *args, **kwargs):
413
- original_hidden_states = h
414
-
415
- for id, module in enumerate(self.input_blocks):
416
- if id < 2:
417
- continue
418
- transformer_options["block"] = ("input", id)
419
- h = forward_timestep_embed(module, h, *args, **kwargs)
420
- h = apply_control(h, control, 'input')
421
- if "input_block_patch" in transformer_patches:
422
- patch = transformer_patches["input_block_patch"]
423
- for p in patch:
424
- h = p(h, transformer_options)
425
-
426
- hs.append(h)
427
- if "input_block_patch_after_skip" in transformer_patches:
428
- patch = transformer_patches["input_block_patch_after_skip"]
429
- for p in patch:
430
- h = p(h, transformer_options)
431
-
432
- transformer_options["block"] = ("middle", 0)
433
- if self.middle_block is not None:
434
- h = forward_timestep_embed(self.middle_block, h, *args, **kwargs)
435
- h = apply_control(h, control, 'middle')
436
-
437
- for id, module in enumerate(self.output_blocks):
438
- transformer_options["block"] = ("output", id)
439
- hsp = hs.pop()
440
- hsp = apply_control(hsp, control, 'output')
441
-
442
- if "output_block_patch" in transformer_patches:
443
- patch = transformer_patches["output_block_patch"]
444
- for p in patch:
445
- h, hsp = p(h, hsp, transformer_options)
446
-
447
- h = torch.cat([h, hsp], dim=1)
448
- del hsp
449
- if len(hs) > 0:
450
- output_shape = hs[-1].shape
451
- else:
452
- output_shape = None
453
- h = forward_timestep_embed(module, h, *args, output_shape,
454
- **kwargs)
455
- hidden_states_residual = h - original_hidden_states
456
- return h, hidden_states_residual
457
-
458
- def unet_model__forward(self,
459
- x,
460
- timesteps=None,
461
- context=None,
462
- y=None,
463
- control=None,
464
- transformer_options={},
465
- **kwargs):
466
- """
467
- Apply the model to an input batch.
468
- :param x: an [N x C x ...] Tensor of inputs.
469
- :param timesteps: a 1-D batch of timesteps.
470
- :param context: conditioning plugged in via crossattn
471
- :param y: an [N] Tensor of labels, if class-conditional.
472
- :return: an [N x C x ...] Tensor of outputs.
473
- """
474
- transformer_options["original_shape"] = list(x.shape)
475
- transformer_options["transformer_index"] = 0
476
- transformer_patches = transformer_options.get("patches", {})
477
-
478
- num_video_frames = kwargs.get("num_video_frames",
479
- self.default_num_video_frames)
480
- image_only_indicator = kwargs.get("image_only_indicator", None)
481
- time_context = kwargs.get("time_context", None)
482
-
483
- assert (y is not None) == (
484
- self.num_classes is not None
485
- ), "must specify y if and only if the model is class-conditional"
486
- hs = []
487
- t_emb = timestep_embedding(timesteps,
488
- self.model_channels,
489
- repeat_only=False).to(x.dtype)
490
- emb = self.time_embed(t_emb)
491
-
492
- if "emb_patch" in transformer_patches:
493
- patch = transformer_patches["emb_patch"]
494
- for p in patch:
495
- emb = p(emb, self.model_channels, transformer_options)
496
-
497
- if self.num_classes is not None:
498
- assert y.shape[0] == x.shape[0]
499
- emb = emb + self.label_emb(y)
500
-
501
- can_use_cache = False
502
-
503
- h = x
504
- for id, module in enumerate(self.input_blocks):
505
- if id >= 2:
506
- break
507
- transformer_options["block"] = ("input", id)
508
- if id == 1:
509
- original_h = h
510
- h = forward_timestep_embed(
511
- module,
512
- h,
513
- emb,
514
- context,
515
- transformer_options,
516
- time_context=time_context,
517
- num_video_frames=num_video_frames,
518
- image_only_indicator=image_only_indicator)
519
- h = apply_control(h, control, 'input')
520
- if "input_block_patch" in transformer_patches:
521
- patch = transformer_patches["input_block_patch"]
522
- for p in patch:
523
- h = p(h, transformer_options)
524
-
525
- hs.append(h)
526
- if "input_block_patch_after_skip" in transformer_patches:
527
- patch = transformer_patches["input_block_patch_after_skip"]
528
- for p in patch:
529
- h = p(h, transformer_options)
530
-
531
- if id == 1:
532
- first_hidden_states_residual = h - original_h
533
- can_use_cache = get_can_use_cache(
534
- first_hidden_states_residual,
535
- threshold=residual_diff_threshold,
536
- validation_function=validate_can_use_cache_function,
537
- )
538
- if not can_use_cache:
539
- set_buffer("first_hidden_states_residual",
540
- first_hidden_states_residual)
541
- del first_hidden_states_residual
542
-
543
- torch._dynamo.graph_break()
544
- if can_use_cache:
545
- h = apply_prev_hidden_states_residual(h)
546
- else:
547
- h, hidden_states_residual = call_remaining_blocks(
548
- self,
549
- transformer_options,
550
- control,
551
- transformer_patches,
552
- hs,
553
- h,
554
- emb,
555
- context,
556
- transformer_options,
557
- time_context=time_context,
558
- num_video_frames=num_video_frames,
559
- image_only_indicator=image_only_indicator)
560
- set_buffer("hidden_states_residual", hidden_states_residual)
561
- torch._dynamo.graph_break()
562
-
563
- h = h.type(x.dtype)
564
-
565
- if self.predict_codebook_ids:
566
- return self.id_predictor(h)
567
- else:
568
- return self.out(h)
569
-
570
- new__forward = unet_model__forward.__get__(model)
571
-
572
- @contextlib.contextmanager
573
- def patch__forward():
574
- with unittest.mock.patch.object(model, "_forward", new__forward):
575
- yield
576
-
577
- return patch__forward
578
-
579
-
580
- # Based on 90f349f93df3083a507854d7fc7c3e1bb9014e24
581
- def create_patch_flux_forward_orig(model,
582
- *,
583
- residual_diff_threshold,
584
- validate_can_use_cache_function=None):
585
- from torch import Tensor
586
- from comfy.ldm.flux.model import timestep_embedding
587
-
588
- def call_remaining_blocks(self, blocks_replace, control, img, txt, vec, pe,
589
- attn_mask, ca_idx, timesteps, transformer_options):
590
- original_hidden_states = img
591
-
592
- extra_block_forward_kwargs = {}
593
- if attn_mask is not None:
594
- extra_block_forward_kwargs["attn_mask"] = attn_mask
595
-
596
- for i, block in enumerate(self.double_blocks):
597
- if i < 1:
598
- continue
599
- if ("double_block", i) in blocks_replace:
600
-
601
- def block_wrap(args):
602
- out = {}
603
- out["img"], out["txt"] = block(
604
- img=args["img"],
605
- txt=args["txt"],
606
- vec=args["vec"],
607
- pe=args["pe"],
608
- **extra_block_forward_kwargs)
609
- return out
610
-
611
- out = blocks_replace[("double_block",
612
- i)]({
613
- "img": img,
614
- "txt": txt,
615
- "vec": vec,
616
- "pe": pe,
617
- **extra_block_forward_kwargs
618
- }, {
619
- "original_block": block_wrap,
620
- "transformer_options": transformer_options
621
- })
622
- txt = out["txt"]
623
- img = out["img"]
624
- else:
625
- img, txt = block(img=img,
626
- txt=txt,
627
- vec=vec,
628
- pe=pe,
629
- **extra_block_forward_kwargs)
630
-
631
- if control is not None: # Controlnet
632
- control_i = control.get("input")
633
- if i < len(control_i):
634
- add = control_i[i]
635
- if add is not None:
636
- img += add
637
-
638
- # PuLID attention
639
- if getattr(self, "pulid_data", {}):
640
- if i % self.pulid_double_interval == 0:
641
- # Will calculate influence of all pulid nodes at once
642
- for _, node_data in self.pulid_data.items():
643
- if torch.any((node_data['sigma_start'] >= timesteps)
644
- & (timesteps >= node_data['sigma_end'])):
645
- img = img + node_data['weight'] * self.pulid_ca[
646
- ca_idx](node_data['embedding'], img)
647
- ca_idx += 1
648
-
649
- img = torch.cat((txt, img), 1)
650
-
651
- for i, block in enumerate(self.single_blocks):
652
- if ("single_block", i) in blocks_replace:
653
-
654
- def block_wrap(args):
655
- out = {}
656
- out["img"] = block(args["img"],
657
- vec=args["vec"],
658
- pe=args["pe"],
659
- **extra_block_forward_kwargs)
660
- return out
661
-
662
- out = blocks_replace[("single_block",
663
- i)]({
664
- "img": img,
665
- "vec": vec,
666
- "pe": pe,
667
- **extra_block_forward_kwargs
668
- }, {
669
- "original_block": block_wrap,
670
- "transformer_options": transformer_options
671
- })
672
- img = out["img"]
673
- else:
674
- img = block(img, vec=vec, pe=pe, **extra_block_forward_kwargs)
675
-
676
- if control is not None: # Controlnet
677
- control_o = control.get("output")
678
- if i < len(control_o):
679
- add = control_o[i]
680
- if add is not None:
681
- img[:, txt.shape[1]:, ...] += add
682
-
683
- # PuLID attention
684
- if getattr(self, "pulid_data", {}):
685
- real_img, txt = img[:, txt.shape[1]:,
686
- ...], img[:, :txt.shape[1], ...]
687
- if i % self.pulid_single_interval == 0:
688
- # Will calculate influence of all nodes at once
689
- for _, node_data in self.pulid_data.items():
690
- if torch.any((node_data['sigma_start'] >= timesteps)
691
- & (timesteps >= node_data['sigma_end'])):
692
- real_img = real_img + node_data[
693
- 'weight'] * self.pulid_ca[ca_idx](
694
- node_data['embedding'], real_img)
695
- ca_idx += 1
696
- img = torch.cat((txt, real_img), 1)
697
-
698
- img = img[:, txt.shape[1]:, ...]
699
-
700
- img = img.contiguous()
701
- hidden_states_residual = img - original_hidden_states
702
- return img, hidden_states_residual
703
-
704
- def forward_orig(
705
- self,
706
- img: Tensor,
707
- img_ids: Tensor,
708
- txt: Tensor,
709
- txt_ids: Tensor,
710
- timesteps: Tensor,
711
- y: Tensor,
712
- guidance: Tensor = None,
713
- control=None,
714
- transformer_options={},
715
- attn_mask: Tensor = None,
716
- ) -> Tensor:
717
- patches_replace = transformer_options.get("patches_replace", {})
718
- if img.ndim != 3 or txt.ndim != 3:
719
- raise ValueError(
720
- "Input img and txt tensors must have 3 dimensions.")
721
-
722
- # running on sequences img
723
- img = self.img_in(img)
724
- vec = self.time_in(timestep_embedding(timesteps, 256).to(img.dtype))
725
- if self.params.guidance_embed:
726
- if guidance is None:
727
- raise ValueError(
728
- "Didn't get guidance strength for guidance distilled model."
729
- )
730
- vec = vec + self.guidance_in(
731
- timestep_embedding(guidance, 256).to(img.dtype))
732
-
733
- vec = vec + self.vector_in(y[:, :self.params.vec_in_dim])
734
- txt = self.txt_in(txt)
735
-
736
- ids = torch.cat((txt_ids, img_ids), dim=1)
737
- pe = self.pe_embedder(ids)
738
-
739
- ca_idx = 0
740
- extra_block_forward_kwargs = {}
741
- if attn_mask is not None:
742
- extra_block_forward_kwargs["attn_mask"] = attn_mask
743
- blocks_replace = patches_replace.get("dit", {})
744
- for i, block in enumerate(self.double_blocks):
745
- if i >= 1:
746
- break
747
- if ("double_block", i) in blocks_replace:
748
-
749
- def block_wrap(args):
750
- out = {}
751
- out["img"], out["txt"] = block(
752
- img=args["img"],
753
- txt=args["txt"],
754
- vec=args["vec"],
755
- pe=args["pe"],
756
- **extra_block_forward_kwargs)
757
- return out
758
-
759
- out = blocks_replace[("double_block",
760
- i)]({
761
- "img": img,
762
- "txt": txt,
763
- "vec": vec,
764
- "pe": pe,
765
- **extra_block_forward_kwargs
766
- }, {
767
- "original_block": block_wrap,
768
- "transformer_options": transformer_options
769
- })
770
- txt = out["txt"]
771
- img = out["img"]
772
- else:
773
- img, txt = block(img=img,
774
- txt=txt,
775
- vec=vec,
776
- pe=pe,
777
- **extra_block_forward_kwargs)
778
-
779
- if control is not None: # Controlnet
780
- control_i = control.get("input")
781
- if i < len(control_i):
782
- add = control_i[i]
783
- if add is not None:
784
- img += add
785
-
786
- # PuLID attention
787
- if getattr(self, "pulid_data", {}):
788
- if i % self.pulid_double_interval == 0:
789
- # Will calculate influence of all pulid nodes at once
790
- for _, node_data in self.pulid_data.items():
791
- if torch.any((node_data['sigma_start'] >= timesteps)
792
- & (timesteps >= node_data['sigma_end'])):
793
- img = img + node_data['weight'] * self.pulid_ca[
794
- ca_idx](node_data['embedding'], img)
795
- ca_idx += 1
796
-
797
- if i == 0:
798
- first_hidden_states_residual = img
799
- can_use_cache = get_can_use_cache(
800
- first_hidden_states_residual,
801
- threshold=residual_diff_threshold,
802
- validation_function=validate_can_use_cache_function,
803
- )
804
- if not can_use_cache:
805
- set_buffer("first_hidden_states_residual",
806
- first_hidden_states_residual)
807
- del first_hidden_states_residual
808
-
809
- torch._dynamo.graph_break()
810
- if can_use_cache:
811
- img = apply_prev_hidden_states_residual(img)
812
- else:
813
- img, hidden_states_residual = call_remaining_blocks(
814
- self,
815
- blocks_replace,
816
- control,
817
- img,
818
- txt,
819
- vec,
820
- pe,
821
- attn_mask,
822
- ca_idx,
823
- timesteps,
824
- transformer_options,
825
- )
826
- set_buffer("hidden_states_residual", hidden_states_residual)
827
- torch._dynamo.graph_break()
828
-
829
- img = self.final_layer(img,
830
- vec) # (N, T, patch_size ** 2 * out_channels)
831
- return img
832
-
833
- new_forward_orig = forward_orig.__get__(model)
834
-
835
- @contextlib.contextmanager
836
- def patch_forward_orig():
837
- with unittest.mock.patch.object(model, "forward_orig",
838
- new_forward_orig):
839
- yield
840
-
841
- return patch_forward_orig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/misc_nodes.py DELETED
@@ -1,152 +0,0 @@
1
- import folder_paths
2
- import importlib
3
- import json
4
- import comfy.sd
5
-
6
- from . import utils
7
-
8
-
9
- class EnhancedLoadDiffusionModel:
10
- @classmethod
11
- def INPUT_TYPES(s):
12
- return {
13
- "required": {
14
- "unet_name": (folder_paths.get_filename_list("diffusion_models"),),
15
- **utils.get_weight_dtype_inputs(),
16
- }
17
- }
18
-
19
- RETURN_TYPES = ("MODEL",)
20
- FUNCTION = "load_unet"
21
-
22
- CATEGORY = "wavespeed"
23
-
24
- def load_unet(self, unet_name, weight_dtype):
25
- model_options = {}
26
- model_options = utils.parse_weight_dtype(model_options, weight_dtype)
27
-
28
- unet_path = folder_paths.get_full_path_or_raise("diffusion_models", unet_name)
29
- model = comfy.sd.load_diffusion_model(unet_path, model_options=model_options)
30
- return (model,)
31
-
32
-
33
- class EnhancedCompileModel:
34
-
35
- @classmethod
36
- def INPUT_TYPES(s):
37
- return {
38
- "required": {
39
- "model": (utils.any_typ,),
40
- "is_patcher": (
41
- "BOOLEAN",
42
- {
43
- "default": True,
44
- },
45
- ),
46
- "object_to_patch": (
47
- "STRING",
48
- {
49
- "default": "diffusion_model",
50
- },
51
- ),
52
- "compiler": (
53
- "STRING",
54
- {
55
- "default": "torch.compile",
56
- }
57
- ),
58
- "fullgraph": (
59
- "BOOLEAN",
60
- {
61
- "default": False,
62
- },
63
- ),
64
- "dynamic": ("BOOLEAN", {"default": False}),
65
- "mode": (
66
- "STRING",
67
- {
68
- "multiline": True,
69
- "default": "",
70
- },
71
- ),
72
- "options": (
73
- "STRING",
74
- {
75
- "multiline": True,
76
- # "default": "{}",
77
- },
78
- ),
79
- "disable": (
80
- "BOOLEAN",
81
- {
82
- "default": False,
83
- },
84
- ),
85
- "backend": (
86
- "STRING",
87
- {
88
- "default": "inductor",
89
- },
90
- ),
91
- }
92
- }
93
-
94
- RETURN_TYPES = (utils.any_typ,)
95
- FUNCTION = "patch"
96
-
97
- CATEGORY = "wavespeed"
98
-
99
- def patch(
100
- self,
101
- model,
102
- is_patcher,
103
- object_to_patch,
104
- compiler,
105
- fullgraph,
106
- dynamic,
107
- mode,
108
- options,
109
- disable,
110
- backend,
111
- ):
112
- utils.patch_optimized_module()
113
- utils.patch_same_meta()
114
-
115
- import_path, function_name = compiler.rsplit(".", 1)
116
- module = importlib.import_module(import_path)
117
- compile_function = getattr(module, function_name)
118
-
119
- mode = mode if mode else None
120
- options = json.loads(options) if options else None
121
-
122
- if compiler == "torch.compile" and backend == "inductor" and dynamic:
123
- # TODO: Fix this
124
- # File "pytorch/torch/_inductor/fx_passes/post_grad.py", line 643, in same_meta
125
- # and statically_known_true(sym_eq(val1.size(), val2.size()))
126
- # AttributeError: 'SymInt' object has no attribute 'size'
127
- pass
128
-
129
- if is_patcher:
130
- patcher = model.clone()
131
- else:
132
- patcher = model.patcher
133
- patcher = patcher.clone()
134
-
135
- patcher.add_object_patch(
136
- object_to_patch,
137
- compile_function(
138
- patcher.get_model_object(object_to_patch),
139
- fullgraph=fullgraph,
140
- dynamic=dynamic,
141
- mode=mode,
142
- options=options,
143
- disable=disable,
144
- backend=backend,
145
- ),
146
- )
147
-
148
- if is_patcher:
149
- return (patcher,)
150
- else:
151
- model.patcher = patcher
152
- return (model,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/patchers.py DELETED
@@ -1,139 +0,0 @@
1
- import contextlib
2
- import copy
3
- import unittest
4
-
5
- import comfy.model_management
6
- import comfy.model_patcher
7
- import comfy.sd
8
- import comfy.utils
9
- import torch
10
-
11
-
12
- class QuantizedModelPatcher(comfy.model_patcher.ModelPatcher):
13
- _object_to_patch_default = None
14
- _quantize_fn_default = None
15
- _lowvram_default = True
16
- _full_load_default = True
17
- _is_quantized_default = False
18
-
19
- _load_device = None
20
- _offload_device = None
21
- _disable_load = False
22
-
23
- @classmethod
24
- @contextlib.contextmanager
25
- def _override_defaults(cls, **kwargs):
26
- old_defaults = {}
27
- for k in ("object_to_patch", "quantize_fn", "lowvram", "full_load"):
28
- if k in kwargs:
29
- old_defaults[k] = getattr(cls, f"_{k}_default")
30
- setattr(cls, f"_{k}_default", kwargs[k])
31
- try:
32
- yield
33
- finally:
34
- for k in old_defaults:
35
- setattr(cls, f"_{k}_default", old_defaults[k])
36
-
37
- @classmethod
38
- @contextlib.contextmanager
39
- def _set_disable_load(cls, disable_load=True):
40
- old_disable_load = cls._disable_load
41
- cls._disable_load = disable_load
42
- try:
43
- yield
44
- finally:
45
- cls._disable_load = old_disable_load
46
-
47
- def __init__(self, *args, **kwargs):
48
- super().__init__(*args, **kwargs)
49
- self._object_to_patch = QuantizedModelPatcher._object_to_patch_default
50
- self._quantize_fn = QuantizedModelPatcher._quantize_fn_default
51
- self._lowvram = QuantizedModelPatcher._lowvram_default
52
- self._full_load = QuantizedModelPatcher._full_load_default
53
- self._is_quantized = QuantizedModelPatcher._is_quantized_default
54
-
55
- def load(
56
- self, device_to=None, force_patch_weights=False, full_load=False, **kwargs
57
- ):
58
- if self._disable_load:
59
- return
60
-
61
- if self._is_quantized:
62
- super().load(
63
- device_to=device_to,
64
- force_patch_weights=force_patch_weights,
65
- full_load=full_load,
66
- **kwargs,
67
- )
68
- return
69
-
70
- with unittest.mock.patch.object(
71
- QuantizedModelPatcher, "_load_device", self.load_device
72
- ), unittest.mock.patch.object(
73
- QuantizedModelPatcher, "_offload_device", self.offload_device
74
- ):
75
- # always call `patch_weight_to_device` even for lowvram
76
- super().load(
77
- torch.device("cpu") if self._lowvram else device_to,
78
- force_patch_weights=True,
79
- full_load=self._full_load or full_load,
80
- **kwargs,
81
- )
82
-
83
- if self._quantize_fn is not None:
84
- if self._object_to_patch is None:
85
- target_model = self.model
86
- else:
87
- target_model = comfy.utils.get_attr(
88
- self.model, self._object_to_patch
89
- )
90
- target_model = self._quantize_fn(target_model)
91
- if self._object_to_patch is None:
92
- self.model = target_model
93
- else:
94
- comfy.utils.set_attr(
95
- self.model, self._object_to_patch, target_model
96
- )
97
-
98
- if self._lowvram:
99
- if device_to.type == "cuda":
100
- torch.cuda.empty_cache()
101
- self.model.to(device_to)
102
-
103
- self._is_quantized = True
104
-
105
- # def model_size(self):
106
- # return super().model_size() // 2
107
-
108
- def clone(self, *args, **kwargs):
109
- n = QuantizedModelPatcher(
110
- self.model,
111
- self.load_device,
112
- self.offload_device,
113
- self.size,
114
- weight_inplace_update=self.weight_inplace_update,
115
- )
116
- n.patches = {}
117
- for k in self.patches:
118
- n.patches[k] = self.patches[k][:]
119
- n.patches_uuid = self.patches_uuid
120
-
121
- n.object_patches = self.object_patches.copy()
122
- n.model_options = copy.deepcopy(self.model_options)
123
- n.backup = self.backup
124
- n.object_patches_backup = self.object_patches_backup
125
-
126
- n._object_to_patch = getattr(
127
- self, "_object_to_patch", QuantizedModelPatcher._object_to_patch_default
128
- )
129
- n._quantize_fn = getattr(
130
- self, "_quantize_fn", QuantizedModelPatcher._quantize_fn_default
131
- )
132
- n._lowvram = getattr(self, "_lowvram", QuantizedModelPatcher._lowvram_default)
133
- n._full_load = getattr(
134
- self, "_full_load", QuantizedModelPatcher._full_load_default
135
- )
136
- n._is_quantized = getattr(
137
- self, "_is_quantized", QuantizedModelPatcher._is_quantized_default
138
- )
139
- return n
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/pyproject.toml DELETED
@@ -1,14 +0,0 @@
1
- [project]
2
- name = "wavespeed"
3
- description = "The all in one inference optimization solution for ComfyUI, universal, flexible, and fast."
4
- version = "1.1.8"
5
- license = {file = "LICENSE"}
6
-
7
- [project.urls]
8
- Repository = "https://github.com/chengzeyi/Comfy-WaveSpeed"
9
- # Used by Comfy Registry https://comfyregistry.org
10
-
11
- [tool.comfy]
12
- PublisherId = "chengzeyi"
13
- DisplayName = "Comfy-WaveSpeed"
14
- Icon = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/utils.py DELETED
@@ -1,127 +0,0 @@
1
- import contextlib
2
- import unittest
3
-
4
- import torch
5
-
6
-
7
- # wildcard trick is taken from pythongossss's
8
- class AnyType(str):
9
-
10
- def __ne__(self, __value: object) -> bool:
11
- return False
12
-
13
-
14
- any_typ = AnyType("*")
15
-
16
-
17
- def get_weight_dtype_inputs():
18
- return {
19
- "weight_dtype": (
20
- [
21
- "default",
22
- "float32",
23
- "float64",
24
- "bfloat16",
25
- "float16",
26
- "fp8_e4m3fn",
27
- "fp8_e4m3fn_fast",
28
- "fp8_e5m2",
29
- ],
30
- ),
31
- }
32
-
33
-
34
- def parse_weight_dtype(model_options, weight_dtype):
35
- dtype = {
36
- "float32": torch.float32,
37
- "float64": torch.float64,
38
- "bfloat16": torch.bfloat16,
39
- "float16": torch.float16,
40
- "fp8_e4m3fn": torch.float8_e4m3fn,
41
- "fp8_e4m3fn_fast": torch.float8_e4m3fn,
42
- "fp8_e5m2": torch.float8_e5m2,
43
- }.get(weight_dtype, None)
44
- if dtype is not None:
45
- model_options["dtype"] = dtype
46
- if weight_dtype == "fp8_e4m3fn_fast":
47
- model_options["fp8_optimizations"] = True
48
- return model_options
49
-
50
-
51
- @contextlib.contextmanager
52
- def disable_load_models_gpu():
53
- def foo(*args, **kwargs):
54
- pass
55
-
56
- from comfy import model_management
57
-
58
- with unittest.mock.patch.object(model_management, "load_models_gpu", foo):
59
- yield
60
-
61
-
62
- def patch_optimized_module():
63
- try:
64
- from torch._dynamo.eval_frame import OptimizedModule
65
- except ImportError:
66
- return
67
-
68
- if getattr(OptimizedModule, "_patched", False):
69
- return
70
-
71
- def __getattribute__(self, name):
72
- if name == "_orig_mod":
73
- return object.__getattribute__(self, "_modules")[name]
74
- if name in (
75
- "__class__",
76
- "_modules",
77
- "state_dict",
78
- "load_state_dict",
79
- "parameters",
80
- "named_parameters",
81
- "buffers",
82
- "named_buffers",
83
- "children",
84
- "named_children",
85
- "modules",
86
- "named_modules",
87
- ):
88
- return getattr(object.__getattribute__(self, "_orig_mod"), name)
89
- return object.__getattribute__(self, name)
90
-
91
- def __delattr__(self, name):
92
- # unload_lora_weights() wants to del peft_config
93
- return delattr(self._orig_mod, name)
94
-
95
- @classmethod
96
- def __instancecheck__(cls, instance):
97
- return isinstance(instance, OptimizedModule) or issubclass(
98
- object.__getattribute__(instance, "__class__"), cls
99
- )
100
-
101
- OptimizedModule.__getattribute__ = __getattribute__
102
- OptimizedModule.__delattr__ = __delattr__
103
- OptimizedModule.__instancecheck__ = __instancecheck__
104
- OptimizedModule._patched = True
105
-
106
-
107
- def patch_same_meta():
108
- try:
109
- from torch._inductor.fx_passes import post_grad
110
- except ImportError:
111
- return
112
-
113
- same_meta = getattr(post_grad, "same_meta", None)
114
- if same_meta is None:
115
- return
116
-
117
- if getattr(same_meta, "_patched", False):
118
- return
119
-
120
- def new_same_meta(a, b):
121
- try:
122
- return same_meta(a, b)
123
- except Exception:
124
- return False
125
-
126
- post_grad.same_meta = new_same_meta
127
- new_same_meta._patched = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/velocator_nodes.py DELETED
@@ -1,413 +0,0 @@
1
- import functools
2
- import importlib
3
- import json
4
- import unittest
5
-
6
- import comfy.model_management
7
- import comfy.model_patcher
8
- import comfy.sd
9
- import folder_paths
10
- import torch
11
-
12
- from . import patchers, utils
13
-
14
- HAS_VELOCATOR = importlib.util.find_spec("xelerate") is not None
15
-
16
-
17
- def get_quant_inputs():
18
- return {
19
- "quant_type": (
20
- [
21
- "int8_dynamic",
22
- "e4m3_e4m3_dynamic",
23
- "e4m3_e4m3_dynamic_per_tensor",
24
- "int8_weightonly",
25
- "e4m3_weightonly",
26
- "e4m3_e4m3_weightonly",
27
- "e4m3_e4m3_weightonly_per_tensor",
28
- "nf4_weightonly",
29
- "af4_weightonly",
30
- "int4_weightonly",
31
- ],
32
- ),
33
- "filter_fn": (
34
- "STRING",
35
- {
36
- "default": "fnmatch_matches_fqn",
37
- },
38
- ),
39
- "filter_fn_kwargs": (
40
- "STRING",
41
- {
42
- "multiline": True,
43
- "default": '{"pattern": ["*"]}',
44
- },
45
- ),
46
- "kwargs": (
47
- "STRING",
48
- {
49
- "multiline": True,
50
- # "default": "{}",
51
- },
52
- ),
53
- }
54
-
55
-
56
- class VelocatorLoadAndQuantizeDiffusionModel:
57
- @classmethod
58
- def INPUT_TYPES(s):
59
- return {
60
- "required": {
61
- "unet_name": (folder_paths.get_filename_list("diffusion_models"),),
62
- **utils.get_weight_dtype_inputs(),
63
- "lowvram": ("BOOLEAN", {"default": True}),
64
- "full_load": ("BOOLEAN", {"default": True}),
65
- "quantize": ("BOOLEAN", {"default": True}),
66
- "quantize_on_load_device": ("BOOLEAN", {"default": True}),
67
- **get_quant_inputs(),
68
- }
69
- }
70
-
71
- RETURN_TYPES = ("MODEL",)
72
- FUNCTION = "load_unet"
73
-
74
- CATEGORY = "wavespeed/velocator"
75
-
76
- def load_unet(
77
- self,
78
- unet_name,
79
- weight_dtype,
80
- lowvram,
81
- full_load,
82
- quantize,
83
- quantize_on_load_device,
84
- quant_type,
85
- filter_fn,
86
- filter_fn_kwargs,
87
- kwargs,
88
- ):
89
- model_options = {}
90
- if lowvram:
91
- model_options["initial_device"] = torch.device("cpu")
92
- model_options = utils.parse_weight_dtype(model_options, weight_dtype)
93
-
94
- unet_path = folder_paths.get_full_path_or_raise("diffusion_models", unet_name)
95
-
96
- quantize_fn = None
97
- if quantize:
98
- assert HAS_VELOCATOR, "velocator is not installed"
99
- from xelerate.ao.quant import quantize
100
-
101
- kwargs = json.loads(kwargs) if kwargs else {}
102
-
103
- if lowvram and quantize_on_load_device:
104
- preprocessor = lambda t: (
105
- t.to(patchers.QuantizedModelPatcher._load_device)
106
- if patchers.QuantizedModelPatcher._load_device is not None
107
- else t
108
- )
109
- kwargs["preprocessor"] = preprocessor
110
- postprocessor = lambda t: (t.to(torch.device("cpu")))
111
- kwargs["postprocessor"] = postprocessor
112
-
113
- quantize_fn = functools.partial(
114
- quantize,
115
- quant_type=quant_type,
116
- filter_fn=filter_fn,
117
- filter_fn_kwargs=(
118
- json.loads(filter_fn_kwargs) if filter_fn_kwargs else {}
119
- ),
120
- **kwargs,
121
- )
122
-
123
- with patchers.QuantizedModelPatcher._override_defaults(
124
- quantize_fn=quantize_fn,
125
- lowvram=lowvram,
126
- full_load=full_load,
127
- ), utils.disable_load_models_gpu(), unittest.mock.patch.object(
128
- comfy.model_patcher, "ModelPatcher", patchers.QuantizedModelPatcher
129
- ):
130
- model = comfy.sd.load_diffusion_model(
131
- unet_path, model_options=model_options
132
- )
133
-
134
- return (model,)
135
-
136
-
137
- class VelocatorLoadAndQuantizeClip:
138
- @classmethod
139
- def INPUT_TYPES(s):
140
- return {
141
- "required": {
142
- "clip_name1": ([""] + folder_paths.get_filename_list("text_encoders"),),
143
- "clip_name2": ([""] + folder_paths.get_filename_list("text_encoders"),),
144
- "clip_name3": ([""] + folder_paths.get_filename_list("text_encoders"),),
145
- "type": ([member.name.lower() for member in comfy.sd.CLIPType],),
146
- **utils.get_weight_dtype_inputs(),
147
- "lowvram": ("BOOLEAN", {"default": True}),
148
- "full_load": ("BOOLEAN", {"default": True}),
149
- "quantize": ("BOOLEAN", {"default": True}),
150
- "quantize_on_load_device": ("BOOLEAN", {"default": True}),
151
- **get_quant_inputs(),
152
- }
153
- }
154
-
155
- RETURN_TYPES = ("CLIP",)
156
- FUNCTION = "load_clip"
157
-
158
- CATEGORY = "wavespeed/velocator"
159
-
160
- def load_clip(
161
- self,
162
- clip_name1,
163
- clip_name2,
164
- clip_name3,
165
- type,
166
- weight_dtype,
167
- lowvram,
168
- full_load,
169
- quantize,
170
- quantize_on_load_device,
171
- quant_type,
172
- filter_fn,
173
- filter_fn_kwargs,
174
- kwargs,
175
- ):
176
- model_options = {}
177
- if lowvram:
178
- model_options["initial_device"] = torch.device("cpu")
179
- model_options = utils.parse_weight_dtype(model_options, weight_dtype)
180
-
181
- clip_paths = []
182
- clip_type = None
183
- for clip_type_ in comfy.sd.CLIPType:
184
- if clip_type_.name.lower() == type:
185
- clip_type = clip_type_
186
- break
187
- assert clip_type is not None, f"Invalid clip type: {type}"
188
- for clip_name in [clip_name1, clip_name2, clip_name3]:
189
- if clip_name:
190
- clip_path = folder_paths.get_full_path_or_raise(
191
- "text_encoders", clip_name
192
- )
193
- clip_paths.append(clip_path)
194
-
195
- quantize_fn = None
196
- if quantize:
197
- assert HAS_VELOCATOR, "velocator is not installed"
198
- from xelerate.ao.quant import quantize
199
-
200
- kwargs = json.loads(kwargs) if kwargs else {}
201
-
202
- if lowvram and quantize_on_load_device:
203
- preprocessor = lambda t: (
204
- t.to(patchers.QuantizedModelPatcher._load_device)
205
- if patchers.QuantizedModelPatcher._load_device is not None
206
- else t
207
- )
208
- kwargs["preprocessor"] = preprocessor
209
- postprocessor = lambda t: (t.to(torch.device("cpu")))
210
- kwargs["postprocessor"] = postprocessor
211
-
212
- quantize_fn = functools.partial(
213
- quantize,
214
- quant_type=quant_type,
215
- filter_fn=filter_fn,
216
- filter_fn_kwargs=(
217
- json.loads(filter_fn_kwargs) if filter_fn_kwargs else {}
218
- ),
219
- **kwargs,
220
- )
221
-
222
- with patchers.QuantizedModelPatcher._override_defaults(
223
- quantize_fn=quantize_fn,
224
- lowvram=lowvram,
225
- full_load=full_load,
226
- ), utils.disable_load_models_gpu(), unittest.mock.patch.object(
227
- comfy.model_patcher, "ModelPatcher", patchers.QuantizedModelPatcher
228
- ):
229
- clip = comfy.sd.load_clip(
230
- ckpt_paths=clip_paths,
231
- embedding_directory=folder_paths.get_folder_paths("embeddings"),
232
- clip_type=clip_type,
233
- model_options=model_options,
234
- )
235
-
236
- return (clip,)
237
-
238
-
239
- class VelocatorQuantizeModel:
240
- @classmethod
241
- def INPUT_TYPES(s):
242
- return {
243
- "required": {
244
- "model": ("MODEL",),
245
- "object_to_patch": (
246
- "STRING",
247
- {
248
- "default": "diffusion_model",
249
- },
250
- ),
251
- **get_quant_inputs(),
252
- }
253
- }
254
-
255
- RETURN_TYPES = ("MODEL",)
256
- FUNCTION = "patch"
257
-
258
- CATEGORY = "wavespeed/velocator"
259
-
260
- def patch(
261
- self,
262
- model,
263
- object_to_patch,
264
- quantize,
265
- quant_type,
266
- filter_fn,
267
- filter_fn_kwargs,
268
- kwargs,
269
- ):
270
- assert HAS_VELOCATOR, "velocator is not installed"
271
-
272
- from xelerate.ao.quant import quantize
273
-
274
- if quantize:
275
- comfy.model_management.unload_all_models()
276
- comfy.model_management.load_models_gpu(
277
- [model], force_patch_weights=True, force_full_load=True
278
- )
279
-
280
- filter_fn_kwargs = json.loads(filter_fn_kwargs) if filter_fn_kwargs else {}
281
- kwargs = json.loads(kwargs) if kwargs else {}
282
-
283
- model = model.clone()
284
- model.add_object_patch(
285
- object_to_patch,
286
- quantize(
287
- model.get_model_object(object_to_patch),
288
- quant_type=quant_type,
289
- filter_fn=filter_fn,
290
- filter_fn_kwargs=filter_fn_kwargs,
291
- **kwargs,
292
- ),
293
- )
294
-
295
- return (model,)
296
-
297
-
298
- class VelocatorCompileModel:
299
- @classmethod
300
- def INPUT_TYPES(s):
301
- return {
302
- "required": {
303
- "model": (utils.any_typ,),
304
- "is_patcher": (
305
- "BOOLEAN",
306
- {
307
- "default": True,
308
- },
309
- ),
310
- "object_to_patch": (
311
- "STRING",
312
- {
313
- "default": "diffusion_model",
314
- },
315
- ),
316
- "memory_format": (
317
- ["channels_last", "contiguous_format", "preserve_format"],
318
- ),
319
- "fullgraph": (
320
- "BOOLEAN",
321
- {
322
- "default": False,
323
- },
324
- ),
325
- "dynamic": ("BOOLEAN", {"default": False}),
326
- "mode": (
327
- "STRING",
328
- {
329
- "multiline": True,
330
- "default": "cache-all:max-autotune:low-precision",
331
- },
332
- ),
333
- "options": (
334
- "STRING",
335
- {
336
- "multiline": True,
337
- # "default": "{}",
338
- },
339
- ),
340
- "disable": (
341
- "BOOLEAN",
342
- {
343
- "default": False,
344
- },
345
- ),
346
- "backend": (
347
- "STRING",
348
- {
349
- "default": "velocator",
350
- },
351
- ),
352
- }
353
- }
354
-
355
- RETURN_TYPES = (utils.any_typ,)
356
- FUNCTION = "patch"
357
-
358
- CATEGORY = "wavespeed/velocator"
359
-
360
- def patch(
361
- self,
362
- model,
363
- is_patcher,
364
- object_to_patch,
365
- memory_format,
366
- fullgraph,
367
- dynamic,
368
- mode,
369
- options,
370
- disable,
371
- backend,
372
- ):
373
- assert HAS_VELOCATOR, "velocator is not installed"
374
-
375
- from xelerate.compilers.xelerate_compiler import xelerate_compile
376
- from xelerate.utils.memory_format import apply_memory_format
377
-
378
- compile_function = xelerate_compile
379
-
380
- memory_format = getattr(torch, memory_format)
381
-
382
- mode = mode if mode else None
383
- options = json.loads(options) if options else None
384
- if backend == "velocator":
385
- backend = "xelerate"
386
-
387
- if is_patcher:
388
- patcher = model.clone()
389
- else:
390
- patcher = model.patcher
391
- patcher = patcher.clone()
392
-
393
- patcher.add_object_patch(
394
- object_to_patch,
395
- compile_function(
396
- apply_memory_format(
397
- patcher.get_model_object(object_to_patch),
398
- memory_format=memory_format,
399
- ),
400
- fullgraph=fullgraph,
401
- dynamic=dynamic,
402
- mode=mode,
403
- options=options,
404
- disable=disable,
405
- backend=backend,
406
- ),
407
- )
408
-
409
- if is_patcher:
410
- return (patcher,)
411
- else:
412
- model.patcher = patcher
413
- return (model,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/workflows/flux.json DELETED
@@ -1,994 +0,0 @@
1
- {
2
- "last_node_id": 39,
3
- "last_link_id": 119,
4
- "nodes": [
5
- {
6
- "id": 17,
7
- "type": "BasicScheduler",
8
- "pos": [
9
- 480,
10
- 1008
11
- ],
12
- "size": [
13
- 315,
14
- 106
15
- ],
16
- "flags": {},
17
- "order": 16,
18
- "mode": 0,
19
- "inputs": [
20
- {
21
- "name": "model",
22
- "type": "MODEL",
23
- "link": 55,
24
- "slot_index": 0
25
- }
26
- ],
27
- "outputs": [
28
- {
29
- "name": "SIGMAS",
30
- "type": "SIGMAS",
31
- "links": [
32
- 20
33
- ],
34
- "shape": 3
35
- }
36
- ],
37
- "properties": {
38
- "Node name for S&R": "BasicScheduler"
39
- },
40
- "widgets_values": [
41
- "simple",
42
- 20,
43
- 1
44
- ]
45
- },
46
- {
47
- "id": 16,
48
- "type": "KSamplerSelect",
49
- "pos": [
50
- 480,
51
- 912
52
- ],
53
- "size": [
54
- 315,
55
- 58
56
- ],
57
- "flags": {},
58
- "order": 0,
59
- "mode": 0,
60
- "inputs": [],
61
- "outputs": [
62
- {
63
- "name": "SAMPLER",
64
- "type": "SAMPLER",
65
- "links": [
66
- 19
67
- ],
68
- "shape": 3
69
- }
70
- ],
71
- "properties": {
72
- "Node name for S&R": "KSamplerSelect"
73
- },
74
- "widgets_values": [
75
- "euler"
76
- ]
77
- },
78
- {
79
- "id": 26,
80
- "type": "FluxGuidance",
81
- "pos": [
82
- 480,
83
- 144
84
- ],
85
- "size": [
86
- 317.4000244140625,
87
- 58
88
- ],
89
- "flags": {},
90
- "order": 13,
91
- "mode": 0,
92
- "inputs": [
93
- {
94
- "name": "conditioning",
95
- "type": "CONDITIONING",
96
- "link": 41
97
- }
98
- ],
99
- "outputs": [
100
- {
101
- "name": "CONDITIONING",
102
- "type": "CONDITIONING",
103
- "links": [
104
- 42
105
- ],
106
- "slot_index": 0,
107
- "shape": 3
108
- }
109
- ],
110
- "properties": {
111
- "Node name for S&R": "FluxGuidance"
112
- },
113
- "widgets_values": [
114
- 3.5
115
- ],
116
- "color": "#233",
117
- "bgcolor": "#355"
118
- },
119
- {
120
- "id": 22,
121
- "type": "BasicGuider",
122
- "pos": [
123
- 576,
124
- 48
125
- ],
126
- "size": [
127
- 222.3482666015625,
128
- 46
129
- ],
130
- "flags": {},
131
- "order": 15,
132
- "mode": 0,
133
- "inputs": [
134
- {
135
- "name": "model",
136
- "type": "MODEL",
137
- "link": 54,
138
- "slot_index": 0
139
- },
140
- {
141
- "name": "conditioning",
142
- "type": "CONDITIONING",
143
- "link": 42,
144
- "slot_index": 1
145
- }
146
- ],
147
- "outputs": [
148
- {
149
- "name": "GUIDER",
150
- "type": "GUIDER",
151
- "links": [
152
- 30
153
- ],
154
- "slot_index": 0,
155
- "shape": 3
156
- }
157
- ],
158
- "properties": {
159
- "Node name for S&R": "BasicGuider"
160
- },
161
- "widgets_values": []
162
- },
163
- {
164
- "id": 13,
165
- "type": "SamplerCustomAdvanced",
166
- "pos": [
167
- 864,
168
- 192
169
- ],
170
- "size": [
171
- 272.3617858886719,
172
- 124.53733825683594
173
- ],
174
- "flags": {},
175
- "order": 17,
176
- "mode": 0,
177
- "inputs": [
178
- {
179
- "name": "noise",
180
- "type": "NOISE",
181
- "link": 37,
182
- "slot_index": 0
183
- },
184
- {
185
- "name": "guider",
186
- "type": "GUIDER",
187
- "link": 30,
188
- "slot_index": 1
189
- },
190
- {
191
- "name": "sampler",
192
- "type": "SAMPLER",
193
- "link": 19,
194
- "slot_index": 2
195
- },
196
- {
197
- "name": "sigmas",
198
- "type": "SIGMAS",
199
- "link": 20,
200
- "slot_index": 3
201
- },
202
- {
203
- "name": "latent_image",
204
- "type": "LATENT",
205
- "link": 116,
206
- "slot_index": 4
207
- }
208
- ],
209
- "outputs": [
210
- {
211
- "name": "output",
212
- "type": "LATENT",
213
- "links": [
214
- 24
215
- ],
216
- "slot_index": 0,
217
- "shape": 3
218
- },
219
- {
220
- "name": "denoised_output",
221
- "type": "LATENT",
222
- "links": null,
223
- "shape": 3
224
- }
225
- ],
226
- "properties": {
227
- "Node name for S&R": "SamplerCustomAdvanced"
228
- },
229
- "widgets_values": []
230
- },
231
- {
232
- "id": 8,
233
- "type": "VAEDecode",
234
- "pos": [
235
- 866,
236
- 367
237
- ],
238
- "size": [
239
- 210,
240
- 46
241
- ],
242
- "flags": {},
243
- "order": 18,
244
- "mode": 0,
245
- "inputs": [
246
- {
247
- "name": "samples",
248
- "type": "LATENT",
249
- "link": 24
250
- },
251
- {
252
- "name": "vae",
253
- "type": "VAE",
254
- "link": 12
255
- }
256
- ],
257
- "outputs": [
258
- {
259
- "name": "IMAGE",
260
- "type": "IMAGE",
261
- "links": [
262
- 9
263
- ],
264
- "slot_index": 0
265
- }
266
- ],
267
- "properties": {
268
- "Node name for S&R": "VAEDecode"
269
- },
270
- "widgets_values": []
271
- },
272
- {
273
- "id": 6,
274
- "type": "CLIPTextEncode",
275
- "pos": [
276
- 384,
277
- 240
278
- ],
279
- "size": [
280
- 422.84503173828125,
281
- 164.31304931640625
282
- ],
283
- "flags": {},
284
- "order": 11,
285
- "mode": 0,
286
- "inputs": [
287
- {
288
- "name": "clip",
289
- "type": "CLIP",
290
- "link": 10
291
- }
292
- ],
293
- "outputs": [
294
- {
295
- "name": "CONDITIONING",
296
- "type": "CONDITIONING",
297
- "links": [
298
- 41
299
- ],
300
- "slot_index": 0
301
- }
302
- ],
303
- "title": "CLIP Text Encode (Positive Prompt)",
304
- "properties": {
305
- "Node name for S&R": "CLIPTextEncode"
306
- },
307
- "widgets_values": [
308
- "cute anime girl with massive fluffy fennec ears and a big fluffy tail blonde messy long hair blue eyes wearing a maid outfit with a long black gold leaf pattern dress and a white apron mouth open holding a fancy black forest cake with candles on top in the kitchen of an old dark Victorian mansion lit by candlelight with a bright window to the foggy forest and very expensive stuff everywhere"
309
- ],
310
- "color": "#232",
311
- "bgcolor": "#353"
312
- },
313
- {
314
- "id": 27,
315
- "type": "EmptySD3LatentImage",
316
- "pos": [
317
- 480,
318
- 624
319
- ],
320
- "size": [
321
- 315,
322
- 106
323
- ],
324
- "flags": {},
325
- "order": 9,
326
- "mode": 0,
327
- "inputs": [
328
- {
329
- "name": "width",
330
- "type": "INT",
331
- "link": 112,
332
- "widget": {
333
- "name": "width"
334
- }
335
- },
336
- {
337
- "name": "height",
338
- "type": "INT",
339
- "link": 113,
340
- "widget": {
341
- "name": "height"
342
- }
343
- }
344
- ],
345
- "outputs": [
346
- {
347
- "name": "LATENT",
348
- "type": "LATENT",
349
- "links": [
350
- 116
351
- ],
352
- "slot_index": 0,
353
- "shape": 3
354
- }
355
- ],
356
- "properties": {
357
- "Node name for S&R": "EmptySD3LatentImage"
358
- },
359
- "widgets_values": [
360
- 1024,
361
- 1024,
362
- 1
363
- ]
364
- },
365
- {
366
- "id": 34,
367
- "type": "PrimitiveNode",
368
- "pos": [
369
- 432,
370
- 480
371
- ],
372
- "size": [
373
- 210,
374
- 82
375
- ],
376
- "flags": {},
377
- "order": 1,
378
- "mode": 0,
379
- "inputs": [],
380
- "outputs": [
381
- {
382
- "name": "INT",
383
- "type": "INT",
384
- "links": [
385
- 112,
386
- 115
387
- ],
388
- "slot_index": 0,
389
- "widget": {
390
- "name": "width"
391
- }
392
- }
393
- ],
394
- "title": "width",
395
- "properties": {
396
- "Run widget replace on values": false
397
- },
398
- "widgets_values": [
399
- 1024,
400
- "fixed"
401
- ],
402
- "color": "#323",
403
- "bgcolor": "#535"
404
- },
405
- {
406
- "id": 35,
407
- "type": "PrimitiveNode",
408
- "pos": [
409
- 672,
410
- 480
411
- ],
412
- "size": [
413
- 210,
414
- 82
415
- ],
416
- "flags": {},
417
- "order": 2,
418
- "mode": 0,
419
- "inputs": [],
420
- "outputs": [
421
- {
422
- "name": "INT",
423
- "type": "INT",
424
- "links": [
425
- 113,
426
- 114
427
- ],
428
- "slot_index": 0,
429
- "widget": {
430
- "name": "height"
431
- }
432
- }
433
- ],
434
- "title": "height",
435
- "properties": {
436
- "Run widget replace on values": false
437
- },
438
- "widgets_values": [
439
- 1024,
440
- "fixed"
441
- ],
442
- "color": "#323",
443
- "bgcolor": "#535"
444
- },
445
- {
446
- "id": 9,
447
- "type": "SaveImage",
448
- "pos": [
449
- 1155,
450
- 196
451
- ],
452
- "size": [
453
- 985.3012084960938,
454
- 1060.3828125
455
- ],
456
- "flags": {},
457
- "order": 19,
458
- "mode": 0,
459
- "inputs": [
460
- {
461
- "name": "images",
462
- "type": "IMAGE",
463
- "link": 9
464
- }
465
- ],
466
- "outputs": [],
467
- "properties": {},
468
- "widgets_values": [
469
- "ComfyUI"
470
- ]
471
- },
472
- {
473
- "id": 37,
474
- "type": "Note",
475
- "pos": [
476
- 480,
477
- 1344
478
- ],
479
- "size": [
480
- 314.99755859375,
481
- 117.98363494873047
482
- ],
483
- "flags": {},
484
- "order": 3,
485
- "mode": 0,
486
- "inputs": [],
487
- "outputs": [],
488
- "properties": {
489
- "text": ""
490
- },
491
- "widgets_values": [
492
- "The reference sampling implementation auto adjusts the shift value based on the resolution, if you don't want this you can just bypass (CTRL-B) this ModelSamplingFlux node.\n"
493
- ],
494
- "color": "#432",
495
- "bgcolor": "#653"
496
- },
497
- {
498
- "id": 10,
499
- "type": "VAELoader",
500
- "pos": [
501
- 48,
502
- 432
503
- ],
504
- "size": [
505
- 311.81634521484375,
506
- 60.429901123046875
507
- ],
508
- "flags": {},
509
- "order": 4,
510
- "mode": 0,
511
- "inputs": [],
512
- "outputs": [
513
- {
514
- "name": "VAE",
515
- "type": "VAE",
516
- "links": [
517
- 12
518
- ],
519
- "slot_index": 0,
520
- "shape": 3
521
- }
522
- ],
523
- "properties": {
524
- "Node name for S&R": "VAELoader"
525
- },
526
- "widgets_values": [
527
- "ae.safetensors"
528
- ]
529
- },
530
- {
531
- "id": 28,
532
- "type": "Note",
533
- "pos": [
534
- 48,
535
- 576
536
- ],
537
- "size": [
538
- 336,
539
- 288
540
- ],
541
- "flags": {},
542
- "order": 5,
543
- "mode": 0,
544
- "inputs": [],
545
- "outputs": [],
546
- "properties": {
547
- "text": ""
548
- },
549
- "widgets_values": [
550
- "If you get an error in any of the nodes above make sure the files are in the correct directories.\n\nSee the top of the examples page for the links : https://comfyanonymous.github.io/ComfyUI_examples/flux/\n\nflux1-dev.safetensors goes in: ComfyUI/models/unet/\n\nt5xxl_fp16.safetensors and clip_l.safetensors go in: ComfyUI/models/clip/\n\nae.safetensors goes in: ComfyUI/models/vae/\n\n\nTip: You can set the weight_dtype above to one of the fp8 types if you have memory issues."
551
- ],
552
- "color": "#432",
553
- "bgcolor": "#653"
554
- },
555
- {
556
- "id": 30,
557
- "type": "ModelSamplingFlux",
558
- "pos": [
559
- 480,
560
- 1152
561
- ],
562
- "size": [
563
- 315,
564
- 130
565
- ],
566
- "flags": {},
567
- "order": 14,
568
- "mode": 0,
569
- "inputs": [
570
- {
571
- "name": "model",
572
- "type": "MODEL",
573
- "link": 119,
574
- "slot_index": 0
575
- },
576
- {
577
- "name": "width",
578
- "type": "INT",
579
- "link": 115,
580
- "slot_index": 1,
581
- "widget": {
582
- "name": "width"
583
- }
584
- },
585
- {
586
- "name": "height",
587
- "type": "INT",
588
- "link": 114,
589
- "slot_index": 2,
590
- "widget": {
591
- "name": "height"
592
- }
593
- }
594
- ],
595
- "outputs": [
596
- {
597
- "name": "MODEL",
598
- "type": "MODEL",
599
- "links": [
600
- 54,
601
- 55
602
- ],
603
- "slot_index": 0,
604
- "shape": 3
605
- }
606
- ],
607
- "properties": {
608
- "Node name for S&R": "ModelSamplingFlux"
609
- },
610
- "widgets_values": [
611
- 1.15,
612
- 0.5,
613
- 1024,
614
- 1024
615
- ]
616
- },
617
- {
618
- "id": 39,
619
- "type": "EnhancedCompileModel",
620
- "pos": [
621
- 0,
622
- 1200
623
- ],
624
- "size": [
625
- 400,
626
- 294
627
- ],
628
- "flags": {},
629
- "order": 12,
630
- "mode": 0,
631
- "inputs": [
632
- {
633
- "name": "model",
634
- "type": "*",
635
- "link": 118
636
- }
637
- ],
638
- "outputs": [
639
- {
640
- "name": "*",
641
- "type": "*",
642
- "links": [
643
- 119
644
- ],
645
- "slot_index": 0
646
- }
647
- ],
648
- "properties": {
649
- "Node name for S&R": "EnhancedCompileModel"
650
- },
651
- "widgets_values": [
652
- true,
653
- "diffusion_model",
654
- "torch.compile",
655
- false,
656
- false,
657
- "",
658
- "",
659
- false,
660
- "inductor"
661
- ]
662
- },
663
- {
664
- "id": 38,
665
- "type": "ApplyFBCacheOnModel",
666
- "pos": [
667
- 50,
668
- 960
669
- ],
670
- "size": [
671
- 315,
672
- 154
673
- ],
674
- "flags": {},
675
- "order": 10,
676
- "mode": 0,
677
- "inputs": [
678
- {
679
- "name": "model",
680
- "type": "MODEL",
681
- "link": 117
682
- }
683
- ],
684
- "outputs": [
685
- {
686
- "name": "MODEL",
687
- "type": "MODEL",
688
- "links": [
689
- 118
690
- ],
691
- "slot_index": 0
692
- }
693
- ],
694
- "properties": {
695
- "Node name for S&R": "ApplyFBCacheOnModel"
696
- },
697
- "widgets_values": [
698
- "diffusion_model",
699
- 0.12,
700
- 0,
701
- 1,
702
- -1
703
- ]
704
- },
705
- {
706
- "id": 12,
707
- "type": "UNETLoader",
708
- "pos": [
709
- 48,
710
- 144
711
- ],
712
- "size": [
713
- 315,
714
- 82
715
- ],
716
- "flags": {},
717
- "order": 6,
718
- "mode": 0,
719
- "inputs": [],
720
- "outputs": [
721
- {
722
- "name": "MODEL",
723
- "type": "MODEL",
724
- "links": [
725
- 117
726
- ],
727
- "slot_index": 0,
728
- "shape": 3
729
- }
730
- ],
731
- "properties": {
732
- "Node name for S&R": "UNETLoader"
733
- },
734
- "widgets_values": [
735
- "flux1-dev.safetensors",
736
- "fp8_e4m3fn_fast"
737
- ],
738
- "color": "#223",
739
- "bgcolor": "#335"
740
- },
741
- {
742
- "id": 11,
743
- "type": "DualCLIPLoader",
744
- "pos": [
745
- 48,
746
- 288
747
- ],
748
- "size": [
749
- 315,
750
- 106
751
- ],
752
- "flags": {},
753
- "order": 7,
754
- "mode": 0,
755
- "inputs": [],
756
- "outputs": [
757
- {
758
- "name": "CLIP",
759
- "type": "CLIP",
760
- "links": [
761
- 10
762
- ],
763
- "slot_index": 0,
764
- "shape": 3
765
- }
766
- ],
767
- "properties": {
768
- "Node name for S&R": "DualCLIPLoader"
769
- },
770
- "widgets_values": [
771
- "t5xxl_fp8_e4m3fn.safetensors",
772
- "clip_l.safetensors",
773
- "flux",
774
- "default"
775
- ]
776
- },
777
- {
778
- "id": 25,
779
- "type": "RandomNoise",
780
- "pos": [
781
- 480,
782
- 768
783
- ],
784
- "size": [
785
- 315,
786
- 82
787
- ],
788
- "flags": {},
789
- "order": 8,
790
- "mode": 0,
791
- "inputs": [],
792
- "outputs": [
793
- {
794
- "name": "NOISE",
795
- "type": "NOISE",
796
- "links": [
797
- 37
798
- ],
799
- "shape": 3
800
- }
801
- ],
802
- "properties": {
803
- "Node name for S&R": "RandomNoise"
804
- },
805
- "widgets_values": [
806
- 0,
807
- "fixed"
808
- ],
809
- "color": "#2a363b",
810
- "bgcolor": "#3f5159"
811
- }
812
- ],
813
- "links": [
814
- [
815
- 9,
816
- 8,
817
- 0,
818
- 9,
819
- 0,
820
- "IMAGE"
821
- ],
822
- [
823
- 10,
824
- 11,
825
- 0,
826
- 6,
827
- 0,
828
- "CLIP"
829
- ],
830
- [
831
- 12,
832
- 10,
833
- 0,
834
- 8,
835
- 1,
836
- "VAE"
837
- ],
838
- [
839
- 19,
840
- 16,
841
- 0,
842
- 13,
843
- 2,
844
- "SAMPLER"
845
- ],
846
- [
847
- 20,
848
- 17,
849
- 0,
850
- 13,
851
- 3,
852
- "SIGMAS"
853
- ],
854
- [
855
- 24,
856
- 13,
857
- 0,
858
- 8,
859
- 0,
860
- "LATENT"
861
- ],
862
- [
863
- 30,
864
- 22,
865
- 0,
866
- 13,
867
- 1,
868
- "GUIDER"
869
- ],
870
- [
871
- 37,
872
- 25,
873
- 0,
874
- 13,
875
- 0,
876
- "NOISE"
877
- ],
878
- [
879
- 41,
880
- 6,
881
- 0,
882
- 26,
883
- 0,
884
- "CONDITIONING"
885
- ],
886
- [
887
- 42,
888
- 26,
889
- 0,
890
- 22,
891
- 1,
892
- "CONDITIONING"
893
- ],
894
- [
895
- 54,
896
- 30,
897
- 0,
898
- 22,
899
- 0,
900
- "MODEL"
901
- ],
902
- [
903
- 55,
904
- 30,
905
- 0,
906
- 17,
907
- 0,
908
- "MODEL"
909
- ],
910
- [
911
- 112,
912
- 34,
913
- 0,
914
- 27,
915
- 0,
916
- "INT"
917
- ],
918
- [
919
- 113,
920
- 35,
921
- 0,
922
- 27,
923
- 1,
924
- "INT"
925
- ],
926
- [
927
- 114,
928
- 35,
929
- 0,
930
- 30,
931
- 2,
932
- "INT"
933
- ],
934
- [
935
- 115,
936
- 34,
937
- 0,
938
- 30,
939
- 1,
940
- "INT"
941
- ],
942
- [
943
- 116,
944
- 27,
945
- 0,
946
- 13,
947
- 4,
948
- "LATENT"
949
- ],
950
- [
951
- 117,
952
- 12,
953
- 0,
954
- 38,
955
- 0,
956
- "MODEL"
957
- ],
958
- [
959
- 118,
960
- 38,
961
- 0,
962
- 39,
963
- 0,
964
- "*"
965
- ],
966
- [
967
- 119,
968
- 39,
969
- 0,
970
- 30,
971
- 0,
972
- "MODEL"
973
- ]
974
- ],
975
- "groups": [],
976
- "config": {},
977
- "extra": {
978
- "ds": {
979
- "scale": 0.7513148009015777,
980
- "offset": [
981
- 548.4488064056912,
982
- 100.33731670597518
983
- ]
984
- },
985
- "groupNodes": {},
986
- "node_versions": {
987
- "comfy-core": "v0.3.10-44-g2ff3104f",
988
- "Comfy-WaveSpeed": "21140cdf8c43946acd9ea522b4fda66df5d859c9"
989
- },
990
- "VHS_latentpreview": false,
991
- "VHS_latentpreviewrate": 0
992
- },
993
- "version": 0.4
994
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/workflows/flux_controlnet.json DELETED
@@ -1,888 +0,0 @@
1
- {
2
- "last_node_id": 37,
3
- "last_link_id": 65,
4
- "nodes": [
5
- {
6
- "id": 3,
7
- "type": "KSampler",
8
- "pos": [
9
- 1280,
10
- 100
11
- ],
12
- "size": [
13
- 315,
14
- 262
15
- ],
16
- "flags": {},
17
- "order": 15,
18
- "mode": 0,
19
- "inputs": [
20
- {
21
- "name": "model",
22
- "type": "MODEL",
23
- "link": 65
24
- },
25
- {
26
- "name": "positive",
27
- "type": "CONDITIONING",
28
- "link": 18
29
- },
30
- {
31
- "name": "negative",
32
- "type": "CONDITIONING",
33
- "link": 19
34
- },
35
- {
36
- "name": "latent_image",
37
- "type": "LATENT",
38
- "link": 54
39
- }
40
- ],
41
- "outputs": [
42
- {
43
- "name": "LATENT",
44
- "type": "LATENT",
45
- "links": [
46
- 7
47
- ],
48
- "slot_index": 0
49
- }
50
- ],
51
- "properties": {
52
- "Node name for S&R": "KSampler"
53
- },
54
- "widgets_values": [
55
- 0,
56
- "fixed",
57
- 20,
58
- 1,
59
- "euler",
60
- "normal",
61
- 1
62
- ]
63
- },
64
- {
65
- "id": 7,
66
- "type": "CLIPTextEncode",
67
- "pos": [
68
- 212,
69
- 417
70
- ],
71
- "size": [
72
- 425.27801513671875,
73
- 180.6060791015625
74
- ],
75
- "flags": {
76
- "collapsed": true
77
- },
78
- "order": 9,
79
- "mode": 0,
80
- "inputs": [
81
- {
82
- "name": "clip",
83
- "type": "CLIP",
84
- "link": 59
85
- }
86
- ],
87
- "outputs": [
88
- {
89
- "name": "CONDITIONING",
90
- "type": "CONDITIONING",
91
- "links": [
92
- 17
93
- ],
94
- "slot_index": 0
95
- }
96
- ],
97
- "title": "CLIP Text Encode (Negative Prompt)",
98
- "properties": {
99
- "Node name for S&R": "CLIPTextEncode"
100
- },
101
- "widgets_values": [
102
- ""
103
- ],
104
- "color": "#322",
105
- "bgcolor": "#533"
106
- },
107
- {
108
- "id": 8,
109
- "type": "VAEDecode",
110
- "pos": [
111
- 1620,
112
- 98
113
- ],
114
- "size": [
115
- 210,
116
- 46
117
- ],
118
- "flags": {},
119
- "order": 16,
120
- "mode": 0,
121
- "inputs": [
122
- {
123
- "name": "samples",
124
- "type": "LATENT",
125
- "link": 7
126
- },
127
- {
128
- "name": "vae",
129
- "type": "VAE",
130
- "link": 62
131
- }
132
- ],
133
- "outputs": [
134
- {
135
- "name": "IMAGE",
136
- "type": "IMAGE",
137
- "links": [
138
- 9
139
- ],
140
- "slot_index": 0
141
- }
142
- ],
143
- "properties": {
144
- "Node name for S&R": "VAEDecode"
145
- },
146
- "widgets_values": []
147
- },
148
- {
149
- "id": 9,
150
- "type": "SaveImage",
151
- "pos": [
152
- 1865,
153
- 99
154
- ],
155
- "size": [
156
- 828.9535522460938,
157
- 893.8475341796875
158
- ],
159
- "flags": {},
160
- "order": 17,
161
- "mode": 0,
162
- "inputs": [
163
- {
164
- "name": "images",
165
- "type": "IMAGE",
166
- "link": 9
167
- }
168
- ],
169
- "outputs": [],
170
- "properties": {},
171
- "widgets_values": [
172
- "ComfyUI"
173
- ]
174
- },
175
- {
176
- "id": 14,
177
- "type": "ControlNetApplySD3",
178
- "pos": [
179
- 930,
180
- 100
181
- ],
182
- "size": [
183
- 315,
184
- 186
185
- ],
186
- "flags": {},
187
- "order": 14,
188
- "mode": 0,
189
- "inputs": [
190
- {
191
- "name": "positive",
192
- "type": "CONDITIONING",
193
- "link": 42
194
- },
195
- {
196
- "name": "negative",
197
- "type": "CONDITIONING",
198
- "link": 17
199
- },
200
- {
201
- "name": "control_net",
202
- "type": "CONTROL_NET",
203
- "link": 52
204
- },
205
- {
206
- "name": "vae",
207
- "type": "VAE",
208
- "link": 60
209
- },
210
- {
211
- "name": "image",
212
- "type": "IMAGE",
213
- "link": 50
214
- }
215
- ],
216
- "outputs": [
217
- {
218
- "name": "positive",
219
- "type": "CONDITIONING",
220
- "links": [
221
- 18
222
- ],
223
- "slot_index": 0,
224
- "shape": 3
225
- },
226
- {
227
- "name": "negative",
228
- "type": "CONDITIONING",
229
- "links": [
230
- 19
231
- ],
232
- "slot_index": 1,
233
- "shape": 3
234
- }
235
- ],
236
- "properties": {
237
- "Node name for S&R": "ControlNetApplySD3"
238
- },
239
- "widgets_values": [
240
- 0.4,
241
- 0,
242
- 1
243
- ]
244
- },
245
- {
246
- "id": 15,
247
- "type": "ControlNetLoader",
248
- "pos": [
249
- 570,
250
- -60
251
- ],
252
- "size": [
253
- 315,
254
- 58
255
- ],
256
- "flags": {},
257
- "order": 0,
258
- "mode": 0,
259
- "inputs": [],
260
- "outputs": [
261
- {
262
- "name": "CONTROL_NET",
263
- "type": "CONTROL_NET",
264
- "links": [
265
- 52
266
- ],
267
- "slot_index": 0,
268
- "shape": 3
269
- }
270
- ],
271
- "properties": {
272
- "Node name for S&R": "ControlNetLoader"
273
- },
274
- "widgets_values": [
275
- "instantx_flux_canny.safetensors"
276
- ]
277
- },
278
- {
279
- "id": 17,
280
- "type": "LoadImage",
281
- "pos": [
282
- 220,
283
- 530
284
- ],
285
- "size": [
286
- 315,
287
- 314.0000305175781
288
- ],
289
- "flags": {},
290
- "order": 1,
291
- "mode": 0,
292
- "inputs": [],
293
- "outputs": [
294
- {
295
- "name": "IMAGE",
296
- "type": "IMAGE",
297
- "links": [
298
- 49
299
- ],
300
- "slot_index": 0,
301
- "shape": 3
302
- },
303
- {
304
- "name": "MASK",
305
- "type": "MASK",
306
- "links": null,
307
- "shape": 3
308
- }
309
- ],
310
- "properties": {
311
- "Node name for S&R": "LoadImage"
312
- },
313
- "widgets_values": [
314
- "girl_in_field.png",
315
- "image"
316
- ]
317
- },
318
- {
319
- "id": 18,
320
- "type": "Canny",
321
- "pos": [
322
- 560,
323
- 530
324
- ],
325
- "size": [
326
- 315,
327
- 82
328
- ],
329
- "flags": {},
330
- "order": 7,
331
- "mode": 0,
332
- "inputs": [
333
- {
334
- "name": "image",
335
- "type": "IMAGE",
336
- "link": 49
337
- }
338
- ],
339
- "outputs": [
340
- {
341
- "name": "IMAGE",
342
- "type": "IMAGE",
343
- "links": [
344
- 26,
345
- 50
346
- ],
347
- "slot_index": 0,
348
- "shape": 3
349
- }
350
- ],
351
- "properties": {
352
- "Node name for S&R": "Canny"
353
- },
354
- "widgets_values": [
355
- 0.2,
356
- 0.3
357
- ]
358
- },
359
- {
360
- "id": 19,
361
- "type": "PreviewImage",
362
- "pos": [
363
- 900,
364
- 530
365
- ],
366
- "size": [
367
- 571.5869140625,
368
- 625.5296020507812
369
- ],
370
- "flags": {},
371
- "order": 11,
372
- "mode": 0,
373
- "inputs": [
374
- {
375
- "name": "images",
376
- "type": "IMAGE",
377
- "link": 26
378
- }
379
- ],
380
- "outputs": [],
381
- "properties": {
382
- "Node name for S&R": "PreviewImage"
383
- },
384
- "widgets_values": []
385
- },
386
- {
387
- "id": 23,
388
- "type": "CLIPTextEncode",
389
- "pos": [
390
- 210,
391
- 196
392
- ],
393
- "size": [
394
- 422.84503173828125,
395
- 164.31304931640625
396
- ],
397
- "flags": {},
398
- "order": 10,
399
- "mode": 0,
400
- "inputs": [
401
- {
402
- "name": "clip",
403
- "type": "CLIP",
404
- "link": 61
405
- }
406
- ],
407
- "outputs": [
408
- {
409
- "name": "CONDITIONING",
410
- "type": "CONDITIONING",
411
- "links": [
412
- 41
413
- ],
414
- "slot_index": 0
415
- }
416
- ],
417
- "title": "CLIP Text Encode (Positive Prompt)",
418
- "properties": {
419
- "Node name for S&R": "CLIPTextEncode"
420
- },
421
- "widgets_values": [
422
- "anime girl smiling with long hair standing in a football arena with a single massive sword hanging from her back"
423
- ],
424
- "color": "#232",
425
- "bgcolor": "#353"
426
- },
427
- {
428
- "id": 26,
429
- "type": "FluxGuidance",
430
- "pos": [
431
- 570,
432
- 50
433
- ],
434
- "size": [
435
- 317.4000244140625,
436
- 58
437
- ],
438
- "flags": {},
439
- "order": 13,
440
- "mode": 0,
441
- "inputs": [
442
- {
443
- "name": "conditioning",
444
- "type": "CONDITIONING",
445
- "link": 41
446
- }
447
- ],
448
- "outputs": [
449
- {
450
- "name": "CONDITIONING",
451
- "type": "CONDITIONING",
452
- "links": [
453
- 42
454
- ],
455
- "slot_index": 0,
456
- "shape": 3
457
- }
458
- ],
459
- "properties": {
460
- "Node name for S&R": "FluxGuidance"
461
- },
462
- "widgets_values": [
463
- 3.5
464
- ]
465
- },
466
- {
467
- "id": 28,
468
- "type": "EmptySD3LatentImage",
469
- "pos": [
470
- 930,
471
- 340
472
- ],
473
- "size": [
474
- 315,
475
- 106
476
- ],
477
- "flags": {},
478
- "order": 2,
479
- "mode": 0,
480
- "inputs": [],
481
- "outputs": [
482
- {
483
- "name": "LATENT",
484
- "type": "LATENT",
485
- "links": [
486
- 54
487
- ],
488
- "slot_index": 0,
489
- "shape": 3
490
- }
491
- ],
492
- "properties": {
493
- "Node name for S&R": "EmptySD3LatentImage"
494
- },
495
- "widgets_values": [
496
- 1024,
497
- 1024,
498
- 1
499
- ]
500
- },
501
- {
502
- "id": 32,
503
- "type": "VAELoader",
504
- "pos": [
505
- -180,
506
- 230
507
- ],
508
- "size": [
509
- 311.81634521484375,
510
- 60.429901123046875
511
- ],
512
- "flags": {},
513
- "order": 6,
514
- "mode": 0,
515
- "inputs": [],
516
- "outputs": [
517
- {
518
- "name": "VAE",
519
- "type": "VAE",
520
- "links": [
521
- 60,
522
- 62
523
- ],
524
- "slot_index": 0,
525
- "shape": 3
526
- }
527
- ],
528
- "properties": {
529
- "Node name for S&R": "VAELoader"
530
- },
531
- "widgets_values": [
532
- "ae.safetensors"
533
- ]
534
- },
535
- {
536
- "id": 33,
537
- "type": "Note",
538
- "pos": [
539
- -180,
540
- 380
541
- ],
542
- "size": [
543
- 336,
544
- 288
545
- ],
546
- "flags": {},
547
- "order": 3,
548
- "mode": 0,
549
- "inputs": [],
550
- "outputs": [],
551
- "properties": {
552
- "text": ""
553
- },
554
- "widgets_values": [
555
- "If you get an error in any of the nodes above make sure the files are in the correct directories.\n\nSee the top of the examples page for the links : https://comfyanonymous.github.io/ComfyUI_examples/flux/\n\nflux1-dev.safetensors goes in: ComfyUI/models/unet/\n\nt5xxl_fp16.safetensors and clip_l.safetensors go in: ComfyUI/models/clip/\n\nae.safetensors goes in: ComfyUI/models/vae/\n\n\nTip: You can set the weight_dtype above to one of the fp8 types if you have memory issues."
556
- ],
557
- "color": "#432",
558
- "bgcolor": "#653"
559
- },
560
- {
561
- "id": 34,
562
- "type": "UNETLoader",
563
- "pos": [
564
- -180,
565
- -60
566
- ],
567
- "size": [
568
- 315,
569
- 82
570
- ],
571
- "flags": {},
572
- "order": 4,
573
- "mode": 0,
574
- "inputs": [],
575
- "outputs": [
576
- {
577
- "name": "MODEL",
578
- "type": "MODEL",
579
- "links": [
580
- 63
581
- ],
582
- "slot_index": 0,
583
- "shape": 3
584
- }
585
- ],
586
- "properties": {
587
- "Node name for S&R": "UNETLoader"
588
- },
589
- "widgets_values": [
590
- "flux1-dev.safetensors",
591
- "fp8_e4m3fn_fast"
592
- ],
593
- "color": "#223",
594
- "bgcolor": "#335"
595
- },
596
- {
597
- "id": 35,
598
- "type": "DualCLIPLoader",
599
- "pos": [
600
- -180,
601
- 90
602
- ],
603
- "size": [
604
- 315,
605
- 106
606
- ],
607
- "flags": {},
608
- "order": 5,
609
- "mode": 0,
610
- "inputs": [],
611
- "outputs": [
612
- {
613
- "name": "CLIP",
614
- "type": "CLIP",
615
- "links": [
616
- 59,
617
- 61
618
- ],
619
- "slot_index": 0,
620
- "shape": 3
621
- }
622
- ],
623
- "properties": {
624
- "Node name for S&R": "DualCLIPLoader"
625
- },
626
- "widgets_values": [
627
- "t5xxl_fp8_e4m3fn.safetensors",
628
- "clip_l.safetensors",
629
- "flux",
630
- "default"
631
- ]
632
- },
633
- {
634
- "id": 36,
635
- "type": "ApplyFBCacheOnModel",
636
- "pos": [
637
- 200,
638
- -160
639
- ],
640
- "size": [
641
- 315,
642
- 154
643
- ],
644
- "flags": {},
645
- "order": 8,
646
- "mode": 0,
647
- "inputs": [
648
- {
649
- "name": "model",
650
- "type": "MODEL",
651
- "link": 63
652
- }
653
- ],
654
- "outputs": [
655
- {
656
- "name": "MODEL",
657
- "type": "MODEL",
658
- "links": [
659
- 64
660
- ],
661
- "slot_index": 0
662
- }
663
- ],
664
- "properties": {
665
- "Node name for S&R": "ApplyFBCacheOnModel"
666
- },
667
- "widgets_values": [
668
- "diffusion_model",
669
- 0.12,
670
- 0,
671
- 1,
672
- -1
673
- ]
674
- },
675
- {
676
- "id": 37,
677
- "type": "EnhancedCompileModel",
678
- "pos": [
679
- 560,
680
- -410
681
- ],
682
- "size": [
683
- 400,
684
- 294
685
- ],
686
- "flags": {},
687
- "order": 12,
688
- "mode": 0,
689
- "inputs": [
690
- {
691
- "name": "model",
692
- "type": "*",
693
- "link": 64
694
- }
695
- ],
696
- "outputs": [
697
- {
698
- "name": "*",
699
- "type": "*",
700
- "links": [
701
- 65
702
- ],
703
- "slot_index": 0
704
- }
705
- ],
706
- "properties": {
707
- "Node name for S&R": "EnhancedCompileModel"
708
- },
709
- "widgets_values": [
710
- true,
711
- "diffusion_model",
712
- "torch.compile",
713
- false,
714
- false,
715
- "",
716
- "",
717
- false,
718
- "inductor"
719
- ]
720
- }
721
- ],
722
- "links": [
723
- [
724
- 7,
725
- 3,
726
- 0,
727
- 8,
728
- 0,
729
- "LATENT"
730
- ],
731
- [
732
- 9,
733
- 8,
734
- 0,
735
- 9,
736
- 0,
737
- "IMAGE"
738
- ],
739
- [
740
- 17,
741
- 7,
742
- 0,
743
- 14,
744
- 1,
745
- "CONDITIONING"
746
- ],
747
- [
748
- 18,
749
- 14,
750
- 0,
751
- 3,
752
- 1,
753
- "CONDITIONING"
754
- ],
755
- [
756
- 19,
757
- 14,
758
- 1,
759
- 3,
760
- 2,
761
- "CONDITIONING"
762
- ],
763
- [
764
- 26,
765
- 18,
766
- 0,
767
- 19,
768
- 0,
769
- "IMAGE"
770
- ],
771
- [
772
- 41,
773
- 23,
774
- 0,
775
- 26,
776
- 0,
777
- "CONDITIONING"
778
- ],
779
- [
780
- 42,
781
- 26,
782
- 0,
783
- 14,
784
- 0,
785
- "CONDITIONING"
786
- ],
787
- [
788
- 49,
789
- 17,
790
- 0,
791
- 18,
792
- 0,
793
- "IMAGE"
794
- ],
795
- [
796
- 50,
797
- 18,
798
- 0,
799
- 14,
800
- 4,
801
- "IMAGE"
802
- ],
803
- [
804
- 52,
805
- 15,
806
- 0,
807
- 14,
808
- 2,
809
- "CONTROL_NET"
810
- ],
811
- [
812
- 54,
813
- 28,
814
- 0,
815
- 3,
816
- 3,
817
- "LATENT"
818
- ],
819
- [
820
- 59,
821
- 35,
822
- 0,
823
- 7,
824
- 0,
825
- "CLIP"
826
- ],
827
- [
828
- 60,
829
- 32,
830
- 0,
831
- 14,
832
- 3,
833
- "VAE"
834
- ],
835
- [
836
- 61,
837
- 35,
838
- 0,
839
- 23,
840
- 0,
841
- "CLIP"
842
- ],
843
- [
844
- 62,
845
- 32,
846
- 0,
847
- 8,
848
- 1,
849
- "VAE"
850
- ],
851
- [
852
- 63,
853
- 34,
854
- 0,
855
- 36,
856
- 0,
857
- "MODEL"
858
- ],
859
- [
860
- 64,
861
- 36,
862
- 0,
863
- 37,
864
- 0,
865
- "*"
866
- ],
867
- [
868
- 65,
869
- 37,
870
- 0,
871
- 3,
872
- 0,
873
- "MODEL"
874
- ]
875
- ],
876
- "groups": [],
877
- "config": {},
878
- "extra": {
879
- "ds": {
880
- "scale": 0.5131581182307068,
881
- "offset": [
882
- 230.9978013084971,
883
- 284.1700529197747
884
- ]
885
- }
886
- },
887
- "version": 0.4
888
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/workflows/hunyuan_video.json DELETED
@@ -1,851 +0,0 @@
1
- {
2
- "last_node_id": 78,
3
- "last_link_id": 217,
4
- "nodes": [
5
- {
6
- "id": 16,
7
- "type": "KSamplerSelect",
8
- "pos": [
9
- 484,
10
- 751
11
- ],
12
- "size": [
13
- 315,
14
- 58
15
- ],
16
- "flags": {},
17
- "order": 0,
18
- "mode": 0,
19
- "inputs": [],
20
- "outputs": [
21
- {
22
- "name": "SAMPLER",
23
- "type": "SAMPLER",
24
- "links": [
25
- 19
26
- ],
27
- "shape": 3
28
- }
29
- ],
30
- "properties": {
31
- "Node name for S&R": "KSamplerSelect"
32
- },
33
- "widgets_values": [
34
- "euler"
35
- ]
36
- },
37
- {
38
- "id": 17,
39
- "type": "BasicScheduler",
40
- "pos": [
41
- 478,
42
- 860
43
- ],
44
- "size": [
45
- 315,
46
- 106
47
- ],
48
- "flags": {},
49
- "order": 8,
50
- "mode": 0,
51
- "inputs": [
52
- {
53
- "name": "model",
54
- "type": "MODEL",
55
- "link": 190,
56
- "slot_index": 0
57
- }
58
- ],
59
- "outputs": [
60
- {
61
- "name": "SIGMAS",
62
- "type": "SIGMAS",
63
- "links": [
64
- 20
65
- ],
66
- "shape": 3
67
- }
68
- ],
69
- "properties": {
70
- "Node name for S&R": "BasicScheduler"
71
- },
72
- "widgets_values": [
73
- "simple",
74
- 20,
75
- 1
76
- ]
77
- },
78
- {
79
- "id": 26,
80
- "type": "FluxGuidance",
81
- "pos": [
82
- 520,
83
- 100
84
- ],
85
- "size": [
86
- 317.4000244140625,
87
- 58
88
- ],
89
- "flags": {},
90
- "order": 12,
91
- "mode": 0,
92
- "inputs": [
93
- {
94
- "name": "conditioning",
95
- "type": "CONDITIONING",
96
- "link": 175
97
- }
98
- ],
99
- "outputs": [
100
- {
101
- "name": "CONDITIONING",
102
- "type": "CONDITIONING",
103
- "links": [
104
- 129
105
- ],
106
- "slot_index": 0,
107
- "shape": 3
108
- }
109
- ],
110
- "properties": {
111
- "Node name for S&R": "FluxGuidance"
112
- },
113
- "widgets_values": [
114
- 6
115
- ],
116
- "color": "#233",
117
- "bgcolor": "#355"
118
- },
119
- {
120
- "id": 45,
121
- "type": "EmptyHunyuanLatentVideo",
122
- "pos": [
123
- 475.540771484375,
124
- 432.673583984375
125
- ],
126
- "size": [
127
- 315,
128
- 130
129
- ],
130
- "flags": {},
131
- "order": 1,
132
- "mode": 0,
133
- "inputs": [],
134
- "outputs": [
135
- {
136
- "name": "LATENT",
137
- "type": "LATENT",
138
- "links": [
139
- 180
140
- ],
141
- "slot_index": 0
142
- }
143
- ],
144
- "properties": {
145
- "Node name for S&R": "EmptyHunyuanLatentVideo"
146
- },
147
- "widgets_values": [
148
- 848,
149
- 480,
150
- 73,
151
- 1
152
- ]
153
- },
154
- {
155
- "id": 22,
156
- "type": "BasicGuider",
157
- "pos": [
158
- 600,
159
- 0
160
- ],
161
- "size": [
162
- 222.3482666015625,
163
- 46
164
- ],
165
- "flags": {},
166
- "order": 13,
167
- "mode": 0,
168
- "inputs": [
169
- {
170
- "name": "model",
171
- "type": "MODEL",
172
- "link": 195,
173
- "slot_index": 0
174
- },
175
- {
176
- "name": "conditioning",
177
- "type": "CONDITIONING",
178
- "link": 129,
179
- "slot_index": 1
180
- }
181
- ],
182
- "outputs": [
183
- {
184
- "name": "GUIDER",
185
- "type": "GUIDER",
186
- "links": [
187
- 30
188
- ],
189
- "slot_index": 0,
190
- "shape": 3
191
- }
192
- ],
193
- "properties": {
194
- "Node name for S&R": "BasicGuider"
195
- },
196
- "widgets_values": []
197
- },
198
- {
199
- "id": 73,
200
- "type": "VAEDecodeTiled",
201
- "pos": [
202
- 1150,
203
- 200
204
- ],
205
- "size": [
206
- 210,
207
- 150
208
- ],
209
- "flags": {},
210
- "order": 16,
211
- "mode": 0,
212
- "inputs": [
213
- {
214
- "name": "samples",
215
- "type": "LATENT",
216
- "link": 210
217
- },
218
- {
219
- "name": "vae",
220
- "type": "VAE",
221
- "link": 211
222
- }
223
- ],
224
- "outputs": [
225
- {
226
- "name": "IMAGE",
227
- "type": "IMAGE",
228
- "links": [
229
- 215
230
- ],
231
- "slot_index": 0
232
- }
233
- ],
234
- "properties": {
235
- "Node name for S&R": "VAEDecodeTiled"
236
- },
237
- "widgets_values": [
238
- 256,
239
- 64,
240
- 64,
241
- 8
242
- ]
243
- },
244
- {
245
- "id": 8,
246
- "type": "VAEDecode",
247
- "pos": [
248
- 1150,
249
- 90
250
- ],
251
- "size": [
252
- 210,
253
- 46
254
- ],
255
- "flags": {},
256
- "order": 15,
257
- "mode": 2,
258
- "inputs": [
259
- {
260
- "name": "samples",
261
- "type": "LATENT",
262
- "link": 181
263
- },
264
- {
265
- "name": "vae",
266
- "type": "VAE",
267
- "link": 206
268
- }
269
- ],
270
- "outputs": [
271
- {
272
- "name": "IMAGE",
273
- "type": "IMAGE",
274
- "links": [],
275
- "slot_index": 0
276
- }
277
- ],
278
- "properties": {
279
- "Node name for S&R": "VAEDecode"
280
- },
281
- "widgets_values": []
282
- },
283
- {
284
- "id": 74,
285
- "type": "Note",
286
- "pos": [
287
- 1150,
288
- 360
289
- ],
290
- "size": [
291
- 210,
292
- 170
293
- ],
294
- "flags": {},
295
- "order": 2,
296
- "mode": 0,
297
- "inputs": [],
298
- "outputs": [],
299
- "properties": {},
300
- "widgets_values": [
301
- "Use the tiled decode node by default because most people will need it.\n\nLower the tile_size and overlap if you run out of memory."
302
- ],
303
- "color": "#432",
304
- "bgcolor": "#653"
305
- },
306
- {
307
- "id": 77,
308
- "type": "Note",
309
- "pos": [
310
- 0,
311
- 0
312
- ],
313
- "size": [
314
- 350,
315
- 110
316
- ],
317
- "flags": {},
318
- "order": 3,
319
- "mode": 0,
320
- "inputs": [],
321
- "outputs": [],
322
- "properties": {},
323
- "widgets_values": [
324
- "Select a fp8 weight_dtype if you are running out of memory."
325
- ],
326
- "color": "#432",
327
- "bgcolor": "#653"
328
- },
329
- {
330
- "id": 13,
331
- "type": "SamplerCustomAdvanced",
332
- "pos": [
333
- 860,
334
- 200
335
- ],
336
- "size": [
337
- 272.3617858886719,
338
- 124.53733825683594
339
- ],
340
- "flags": {},
341
- "order": 14,
342
- "mode": 0,
343
- "inputs": [
344
- {
345
- "name": "noise",
346
- "type": "NOISE",
347
- "link": 37,
348
- "slot_index": 0
349
- },
350
- {
351
- "name": "guider",
352
- "type": "GUIDER",
353
- "link": 30,
354
- "slot_index": 1
355
- },
356
- {
357
- "name": "sampler",
358
- "type": "SAMPLER",
359
- "link": 19,
360
- "slot_index": 2
361
- },
362
- {
363
- "name": "sigmas",
364
- "type": "SIGMAS",
365
- "link": 20,
366
- "slot_index": 3
367
- },
368
- {
369
- "name": "latent_image",
370
- "type": "LATENT",
371
- "link": 180,
372
- "slot_index": 4
373
- }
374
- ],
375
- "outputs": [
376
- {
377
- "name": "output",
378
- "type": "LATENT",
379
- "links": [
380
- 181,
381
- 210
382
- ],
383
- "slot_index": 0,
384
- "shape": 3
385
- },
386
- {
387
- "name": "denoised_output",
388
- "type": "LATENT",
389
- "links": null,
390
- "shape": 3
391
- }
392
- ],
393
- "properties": {
394
- "Node name for S&R": "SamplerCustomAdvanced"
395
- },
396
- "widgets_values": []
397
- },
398
- {
399
- "id": 44,
400
- "type": "CLIPTextEncode",
401
- "pos": [
402
- 420,
403
- 200
404
- ],
405
- "size": [
406
- 422.84503173828125,
407
- 164.31304931640625
408
- ],
409
- "flags": {},
410
- "order": 10,
411
- "mode": 0,
412
- "inputs": [
413
- {
414
- "name": "clip",
415
- "type": "CLIP",
416
- "link": 205
417
- }
418
- ],
419
- "outputs": [
420
- {
421
- "name": "CONDITIONING",
422
- "type": "CONDITIONING",
423
- "links": [
424
- 175
425
- ],
426
- "slot_index": 0
427
- }
428
- ],
429
- "title": "CLIP Text Encode (Positive Prompt)",
430
- "properties": {
431
- "Node name for S&R": "CLIPTextEncode"
432
- },
433
- "widgets_values": [
434
- "anime style anime girl with massive fennec ears and one big fluffy tail, she has blonde hair long hair blue eyes wearing a pink sweater and a long blue skirt walking in a beautiful outdoor scenery with snow mountains in the background"
435
- ],
436
- "color": "#232",
437
- "bgcolor": "#353"
438
- },
439
- {
440
- "id": 75,
441
- "type": "SaveAnimatedWEBP",
442
- "pos": [
443
- 1410,
444
- 200
445
- ],
446
- "size": [
447
- 315,
448
- 366
449
- ],
450
- "flags": {},
451
- "order": 17,
452
- "mode": 0,
453
- "inputs": [
454
- {
455
- "name": "images",
456
- "type": "IMAGE",
457
- "link": 215
458
- }
459
- ],
460
- "outputs": [],
461
- "properties": {},
462
- "widgets_values": [
463
- "ComfyUI",
464
- 24,
465
- false,
466
- 80,
467
- "default",
468
- null
469
- ]
470
- },
471
- {
472
- "id": 12,
473
- "type": "UNETLoader",
474
- "pos": [
475
- 0,
476
- 150
477
- ],
478
- "size": [
479
- 350,
480
- 82
481
- ],
482
- "flags": {},
483
- "order": 4,
484
- "mode": 0,
485
- "inputs": [],
486
- "outputs": [
487
- {
488
- "name": "MODEL",
489
- "type": "MODEL",
490
- "links": [
491
- 190,
492
- 216
493
- ],
494
- "slot_index": 0,
495
- "shape": 3
496
- }
497
- ],
498
- "properties": {
499
- "Node name for S&R": "UNETLoader"
500
- },
501
- "widgets_values": [
502
- "hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors",
503
- "fp8_e4m3fn_fast"
504
- ],
505
- "color": "#223",
506
- "bgcolor": "#335"
507
- },
508
- {
509
- "id": 11,
510
- "type": "DualCLIPLoader",
511
- "pos": [
512
- 0,
513
- 270
514
- ],
515
- "size": [
516
- 350,
517
- 106
518
- ],
519
- "flags": {},
520
- "order": 5,
521
- "mode": 0,
522
- "inputs": [],
523
- "outputs": [
524
- {
525
- "name": "CLIP",
526
- "type": "CLIP",
527
- "links": [
528
- 205
529
- ],
530
- "slot_index": 0,
531
- "shape": 3
532
- }
533
- ],
534
- "properties": {
535
- "Node name for S&R": "DualCLIPLoader"
536
- },
537
- "widgets_values": [
538
- "clip_l.safetensors",
539
- "llava_llama3_fp8_scaled.safetensors",
540
- "hunyuan_video",
541
- "default"
542
- ]
543
- },
544
- {
545
- "id": 10,
546
- "type": "VAELoader",
547
- "pos": [
548
- 0,
549
- 420
550
- ],
551
- "size": [
552
- 350,
553
- 60
554
- ],
555
- "flags": {},
556
- "order": 6,
557
- "mode": 0,
558
- "inputs": [],
559
- "outputs": [
560
- {
561
- "name": "VAE",
562
- "type": "VAE",
563
- "links": [
564
- 206,
565
- 211
566
- ],
567
- "slot_index": 0,
568
- "shape": 3
569
- }
570
- ],
571
- "properties": {
572
- "Node name for S&R": "VAELoader"
573
- },
574
- "widgets_values": [
575
- "hunyuan_video_vae_bf16.safetensors"
576
- ]
577
- },
578
- {
579
- "id": 67,
580
- "type": "ModelSamplingSD3",
581
- "pos": [
582
- 360,
583
- 0
584
- ],
585
- "size": [
586
- 210,
587
- 58
588
- ],
589
- "flags": {},
590
- "order": 11,
591
- "mode": 0,
592
- "inputs": [
593
- {
594
- "name": "model",
595
- "type": "MODEL",
596
- "link": 217
597
- }
598
- ],
599
- "outputs": [
600
- {
601
- "name": "MODEL",
602
- "type": "MODEL",
603
- "links": [
604
- 195
605
- ],
606
- "slot_index": 0
607
- }
608
- ],
609
- "properties": {
610
- "Node name for S&R": "ModelSamplingSD3"
611
- },
612
- "widgets_values": [
613
- 7
614
- ]
615
- },
616
- {
617
- "id": 25,
618
- "type": "RandomNoise",
619
- "pos": [
620
- 479,
621
- 618
622
- ],
623
- "size": [
624
- 315,
625
- 82
626
- ],
627
- "flags": {},
628
- "order": 7,
629
- "mode": 0,
630
- "inputs": [],
631
- "outputs": [
632
- {
633
- "name": "NOISE",
634
- "type": "NOISE",
635
- "links": [
636
- 37
637
- ],
638
- "shape": 3
639
- }
640
- ],
641
- "properties": {
642
- "Node name for S&R": "RandomNoise"
643
- },
644
- "widgets_values": [
645
- 0,
646
- "fixed"
647
- ],
648
- "color": "#2a363b",
649
- "bgcolor": "#3f5159"
650
- },
651
- {
652
- "id": 78,
653
- "type": "ApplyFBCacheOnModel",
654
- "pos": [
655
- 10,
656
- -220
657
- ],
658
- "size": [
659
- 315,
660
- 154
661
- ],
662
- "flags": {},
663
- "order": 9,
664
- "mode": 0,
665
- "inputs": [
666
- {
667
- "name": "model",
668
- "type": "MODEL",
669
- "link": 216
670
- }
671
- ],
672
- "outputs": [
673
- {
674
- "name": "MODEL",
675
- "type": "MODEL",
676
- "links": [
677
- 217
678
- ],
679
- "slot_index": 0
680
- }
681
- ],
682
- "properties": {
683
- "Node name for S&R": "ApplyFBCacheOnModel"
684
- },
685
- "widgets_values": [
686
- "diffusion_model",
687
- 0.1,
688
- 0,
689
- 1,
690
- -1
691
- ]
692
- }
693
- ],
694
- "links": [
695
- [
696
- 19,
697
- 16,
698
- 0,
699
- 13,
700
- 2,
701
- "SAMPLER"
702
- ],
703
- [
704
- 20,
705
- 17,
706
- 0,
707
- 13,
708
- 3,
709
- "SIGMAS"
710
- ],
711
- [
712
- 30,
713
- 22,
714
- 0,
715
- 13,
716
- 1,
717
- "GUIDER"
718
- ],
719
- [
720
- 37,
721
- 25,
722
- 0,
723
- 13,
724
- 0,
725
- "NOISE"
726
- ],
727
- [
728
- 129,
729
- 26,
730
- 0,
731
- 22,
732
- 1,
733
- "CONDITIONING"
734
- ],
735
- [
736
- 175,
737
- 44,
738
- 0,
739
- 26,
740
- 0,
741
- "CONDITIONING"
742
- ],
743
- [
744
- 180,
745
- 45,
746
- 0,
747
- 13,
748
- 4,
749
- "LATENT"
750
- ],
751
- [
752
- 181,
753
- 13,
754
- 0,
755
- 8,
756
- 0,
757
- "LATENT"
758
- ],
759
- [
760
- 190,
761
- 12,
762
- 0,
763
- 17,
764
- 0,
765
- "MODEL"
766
- ],
767
- [
768
- 195,
769
- 67,
770
- 0,
771
- 22,
772
- 0,
773
- "MODEL"
774
- ],
775
- [
776
- 205,
777
- 11,
778
- 0,
779
- 44,
780
- 0,
781
- "CLIP"
782
- ],
783
- [
784
- 206,
785
- 10,
786
- 0,
787
- 8,
788
- 1,
789
- "VAE"
790
- ],
791
- [
792
- 210,
793
- 13,
794
- 0,
795
- 73,
796
- 0,
797
- "LATENT"
798
- ],
799
- [
800
- 211,
801
- 10,
802
- 0,
803
- 73,
804
- 1,
805
- "VAE"
806
- ],
807
- [
808
- 215,
809
- 73,
810
- 0,
811
- 75,
812
- 0,
813
- "IMAGE"
814
- ],
815
- [
816
- 216,
817
- 12,
818
- 0,
819
- 78,
820
- 0,
821
- "MODEL"
822
- ],
823
- [
824
- 217,
825
- 78,
826
- 0,
827
- 67,
828
- 0,
829
- "MODEL"
830
- ]
831
- ],
832
- "groups": [],
833
- "config": {},
834
- "extra": {
835
- "ds": {
836
- "scale": 1.0238870172519845,
837
- "offset": [
838
- 170.98964291539164,
839
- 220.91105213566834
840
- ]
841
- },
842
- "groupNodes": {},
843
- "node_versions": {
844
- "comfy-core": "v0.3.10-44-g2ff3104f",
845
- "Comfy-WaveSpeed": "e1e0e4c143ba15dd8f11b25741b4b491751f229a"
846
- },
847
- "VHS_latentpreview": false,
848
- "VHS_latentpreviewrate": 0
849
- },
850
- "version": 0.4
851
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/workflows/ltxv.json DELETED
@@ -1,723 +0,0 @@
1
- {
2
- "last_node_id": 78,
3
- "last_link_id": 186,
4
- "nodes": [
5
- {
6
- "id": 6,
7
- "type": "CLIPTextEncode",
8
- "pos": [
9
- 420,
10
- 190
11
- ],
12
- "size": [
13
- 422.84503173828125,
14
- 164.31304931640625
15
- ],
16
- "flags": {},
17
- "order": 6,
18
- "mode": 0,
19
- "inputs": [
20
- {
21
- "name": "clip",
22
- "type": "CLIP",
23
- "link": 74
24
- }
25
- ],
26
- "outputs": [
27
- {
28
- "name": "CONDITIONING",
29
- "type": "CONDITIONING",
30
- "links": [
31
- 169
32
- ],
33
- "slot_index": 0
34
- }
35
- ],
36
- "title": "CLIP Text Encode (Positive Prompt)",
37
- "properties": {
38
- "Node name for S&R": "CLIPTextEncode"
39
- },
40
- "widgets_values": [
41
- "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage."
42
- ],
43
- "color": "#232",
44
- "bgcolor": "#353"
45
- },
46
- {
47
- "id": 7,
48
- "type": "CLIPTextEncode",
49
- "pos": [
50
- 420,
51
- 390
52
- ],
53
- "size": [
54
- 425.27801513671875,
55
- 180.6060791015625
56
- ],
57
- "flags": {},
58
- "order": 7,
59
- "mode": 0,
60
- "inputs": [
61
- {
62
- "name": "clip",
63
- "type": "CLIP",
64
- "link": 75
65
- }
66
- ],
67
- "outputs": [
68
- {
69
- "name": "CONDITIONING",
70
- "type": "CONDITIONING",
71
- "links": [
72
- 170
73
- ],
74
- "slot_index": 0
75
- }
76
- ],
77
- "title": "CLIP Text Encode (Negative Prompt)",
78
- "properties": {
79
- "Node name for S&R": "CLIPTextEncode"
80
- },
81
- "widgets_values": [
82
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly"
83
- ],
84
- "color": "#322",
85
- "bgcolor": "#533"
86
- },
87
- {
88
- "id": 8,
89
- "type": "VAEDecode",
90
- "pos": [
91
- 1600,
92
- 30
93
- ],
94
- "size": [
95
- 210,
96
- 46
97
- ],
98
- "flags": {},
99
- "order": 12,
100
- "mode": 0,
101
- "inputs": [
102
- {
103
- "name": "samples",
104
- "type": "LATENT",
105
- "link": 171
106
- },
107
- {
108
- "name": "vae",
109
- "type": "VAE",
110
- "link": 87
111
- }
112
- ],
113
- "outputs": [
114
- {
115
- "name": "IMAGE",
116
- "type": "IMAGE",
117
- "links": [
118
- 106
119
- ],
120
- "slot_index": 0
121
- }
122
- ],
123
- "properties": {
124
- "Node name for S&R": "VAEDecode"
125
- },
126
- "widgets_values": []
127
- },
128
- {
129
- "id": 38,
130
- "type": "CLIPLoader",
131
- "pos": [
132
- 60,
133
- 190
134
- ],
135
- "size": [
136
- 315,
137
- 82
138
- ],
139
- "flags": {},
140
- "order": 3,
141
- "mode": 0,
142
- "inputs": [],
143
- "outputs": [
144
- {
145
- "name": "CLIP",
146
- "type": "CLIP",
147
- "links": [
148
- 74,
149
- 75
150
- ],
151
- "slot_index": 0
152
- }
153
- ],
154
- "properties": {
155
- "Node name for S&R": "CLIPLoader"
156
- },
157
- "widgets_values": [
158
- "t5xxl_fp8_e4m3fn.safetensors",
159
- "ltxv",
160
- "default"
161
- ]
162
- },
163
- {
164
- "id": 41,
165
- "type": "SaveAnimatedWEBP",
166
- "pos": [
167
- 1830,
168
- 30
169
- ],
170
- "size": [
171
- 680,
172
- 610
173
- ],
174
- "flags": {},
175
- "order": 13,
176
- "mode": 0,
177
- "inputs": [
178
- {
179
- "name": "images",
180
- "type": "IMAGE",
181
- "link": 106
182
- }
183
- ],
184
- "outputs": [],
185
- "properties": {},
186
- "widgets_values": [
187
- "ComfyUI",
188
- 24,
189
- false,
190
- 90,
191
- "default",
192
- null
193
- ]
194
- },
195
- {
196
- "id": 44,
197
- "type": "CheckpointLoaderSimple",
198
- "pos": [
199
- 520,
200
- 30
201
- ],
202
- "size": [
203
- 315,
204
- 98
205
- ],
206
- "flags": {},
207
- "order": 4,
208
- "mode": 0,
209
- "inputs": [],
210
- "outputs": [
211
- {
212
- "name": "MODEL",
213
- "type": "MODEL",
214
- "links": [
215
- 183
216
- ],
217
- "slot_index": 0
218
- },
219
- {
220
- "name": "CLIP",
221
- "type": "CLIP",
222
- "links": null
223
- },
224
- {
225
- "name": "VAE",
226
- "type": "VAE",
227
- "links": [
228
- 87
229
- ],
230
- "slot_index": 2
231
- }
232
- ],
233
- "properties": {
234
- "Node name for S&R": "CheckpointLoaderSimple"
235
- },
236
- "widgets_values": [
237
- "ltx-video-2b-v0.9.1.safetensors"
238
- ]
239
- },
240
- {
241
- "id": 69,
242
- "type": "LTXVConditioning",
243
- "pos": [
244
- 920,
245
- 60
246
- ],
247
- "size": [
248
- 223.8660125732422,
249
- 78
250
- ],
251
- "flags": {},
252
- "order": 9,
253
- "mode": 0,
254
- "inputs": [
255
- {
256
- "name": "positive",
257
- "type": "CONDITIONING",
258
- "link": 169
259
- },
260
- {
261
- "name": "negative",
262
- "type": "CONDITIONING",
263
- "link": 170
264
- }
265
- ],
266
- "outputs": [
267
- {
268
- "name": "positive",
269
- "type": "CONDITIONING",
270
- "links": [
271
- 166
272
- ],
273
- "slot_index": 0
274
- },
275
- {
276
- "name": "negative",
277
- "type": "CONDITIONING",
278
- "links": [
279
- 167
280
- ],
281
- "slot_index": 1
282
- }
283
- ],
284
- "properties": {
285
- "Node name for S&R": "LTXVConditioning"
286
- },
287
- "widgets_values": [
288
- 25
289
- ]
290
- },
291
- {
292
- "id": 70,
293
- "type": "EmptyLTXVLatentVideo",
294
- "pos": [
295
- 860,
296
- 240
297
- ],
298
- "size": [
299
- 315,
300
- 130
301
- ],
302
- "flags": {},
303
- "order": 0,
304
- "mode": 0,
305
- "inputs": [],
306
- "outputs": [
307
- {
308
- "name": "LATENT",
309
- "type": "LATENT",
310
- "links": [
311
- 168,
312
- 175
313
- ],
314
- "slot_index": 0
315
- }
316
- ],
317
- "properties": {
318
- "Node name for S&R": "EmptyLTXVLatentVideo"
319
- },
320
- "widgets_values": [
321
- 768,
322
- 512,
323
- 97,
324
- 1
325
- ]
326
- },
327
- {
328
- "id": 71,
329
- "type": "LTXVScheduler",
330
- "pos": [
331
- 856,
332
- 531
333
- ],
334
- "size": [
335
- 315,
336
- 154
337
- ],
338
- "flags": {},
339
- "order": 5,
340
- "mode": 0,
341
- "inputs": [
342
- {
343
- "name": "latent",
344
- "type": "LATENT",
345
- "link": 168,
346
- "shape": 7
347
- }
348
- ],
349
- "outputs": [
350
- {
351
- "name": "SIGMAS",
352
- "type": "SIGMAS",
353
- "links": [
354
- 182
355
- ],
356
- "slot_index": 0
357
- }
358
- ],
359
- "properties": {
360
- "Node name for S&R": "LTXVScheduler"
361
- },
362
- "widgets_values": [
363
- 30,
364
- 2.05,
365
- 0.95,
366
- true,
367
- 0.1
368
- ]
369
- },
370
- {
371
- "id": 72,
372
- "type": "SamplerCustom",
373
- "pos": [
374
- 1201,
375
- 32
376
- ],
377
- "size": [
378
- 355.20001220703125,
379
- 230
380
- ],
381
- "flags": {},
382
- "order": 11,
383
- "mode": 0,
384
- "inputs": [
385
- {
386
- "name": "model",
387
- "type": "MODEL",
388
- "link": 186
389
- },
390
- {
391
- "name": "positive",
392
- "type": "CONDITIONING",
393
- "link": 166
394
- },
395
- {
396
- "name": "negative",
397
- "type": "CONDITIONING",
398
- "link": 167
399
- },
400
- {
401
- "name": "sampler",
402
- "type": "SAMPLER",
403
- "link": 172
404
- },
405
- {
406
- "name": "sigmas",
407
- "type": "SIGMAS",
408
- "link": 182
409
- },
410
- {
411
- "name": "latent_image",
412
- "type": "LATENT",
413
- "link": 175
414
- }
415
- ],
416
- "outputs": [
417
- {
418
- "name": "output",
419
- "type": "LATENT",
420
- "links": [
421
- 171
422
- ],
423
- "slot_index": 0
424
- },
425
- {
426
- "name": "denoised_output",
427
- "type": "LATENT",
428
- "links": null
429
- }
430
- ],
431
- "properties": {
432
- "Node name for S&R": "SamplerCustom"
433
- },
434
- "widgets_values": [
435
- true,
436
- 0,
437
- "fixed",
438
- 3
439
- ]
440
- },
441
- {
442
- "id": 73,
443
- "type": "KSamplerSelect",
444
- "pos": [
445
- 860,
446
- 420
447
- ],
448
- "size": [
449
- 315,
450
- 58
451
- ],
452
- "flags": {},
453
- "order": 1,
454
- "mode": 0,
455
- "inputs": [],
456
- "outputs": [
457
- {
458
- "name": "SAMPLER",
459
- "type": "SAMPLER",
460
- "links": [
461
- 172
462
- ]
463
- }
464
- ],
465
- "properties": {
466
- "Node name for S&R": "KSamplerSelect"
467
- },
468
- "widgets_values": [
469
- "euler"
470
- ]
471
- },
472
- {
473
- "id": 76,
474
- "type": "Note",
475
- "pos": [
476
- 40,
477
- 350
478
- ],
479
- "size": [
480
- 360,
481
- 200
482
- ],
483
- "flags": {},
484
- "order": 2,
485
- "mode": 0,
486
- "inputs": [],
487
- "outputs": [],
488
- "properties": {},
489
- "widgets_values": [
490
- "This model needs long descriptive prompts, if the prompt is too short the quality will suffer greatly."
491
- ],
492
- "color": "#432",
493
- "bgcolor": "#653"
494
- },
495
- {
496
- "id": 77,
497
- "type": "ApplyFBCacheOnModel",
498
- "pos": [
499
- 840,
500
- -160
501
- ],
502
- "size": [
503
- 315,
504
- 82
505
- ],
506
- "flags": {},
507
- "order": 8,
508
- "mode": 0,
509
- "inputs": [
510
- {
511
- "name": "model",
512
- "type": "MODEL",
513
- "link": 183
514
- }
515
- ],
516
- "outputs": [
517
- {
518
- "name": "MODEL",
519
- "type": "MODEL",
520
- "links": [
521
- 185
522
- ],
523
- "slot_index": 0
524
- }
525
- ],
526
- "properties": {
527
- "Node name for S&R": "ApplyFBCacheOnModel"
528
- },
529
- "widgets_values": [
530
- "diffusion_model",
531
- 0.1
532
- ]
533
- },
534
- {
535
- "id": 78,
536
- "type": "EnhancedCompileModel",
537
- "pos": [
538
- 1200,
539
- -370
540
- ],
541
- "size": [
542
- 400,
543
- 294
544
- ],
545
- "flags": {},
546
- "order": 10,
547
- "mode": 0,
548
- "inputs": [
549
- {
550
- "name": "model",
551
- "type": "*",
552
- "link": 185
553
- }
554
- ],
555
- "outputs": [
556
- {
557
- "name": "*",
558
- "type": "*",
559
- "links": [
560
- 186
561
- ],
562
- "slot_index": 0
563
- }
564
- ],
565
- "properties": {
566
- "Node name for S&R": "EnhancedCompileModel"
567
- },
568
- "widgets_values": [
569
- true,
570
- "diffusion_model",
571
- "torch.compile",
572
- false,
573
- false,
574
- "",
575
- "",
576
- false,
577
- "inductor"
578
- ]
579
- }
580
- ],
581
- "links": [
582
- [
583
- 74,
584
- 38,
585
- 0,
586
- 6,
587
- 0,
588
- "CLIP"
589
- ],
590
- [
591
- 75,
592
- 38,
593
- 0,
594
- 7,
595
- 0,
596
- "CLIP"
597
- ],
598
- [
599
- 87,
600
- 44,
601
- 2,
602
- 8,
603
- 1,
604
- "VAE"
605
- ],
606
- [
607
- 106,
608
- 8,
609
- 0,
610
- 41,
611
- 0,
612
- "IMAGE"
613
- ],
614
- [
615
- 166,
616
- 69,
617
- 0,
618
- 72,
619
- 1,
620
- "CONDITIONING"
621
- ],
622
- [
623
- 167,
624
- 69,
625
- 1,
626
- 72,
627
- 2,
628
- "CONDITIONING"
629
- ],
630
- [
631
- 168,
632
- 70,
633
- 0,
634
- 71,
635
- 0,
636
- "LATENT"
637
- ],
638
- [
639
- 169,
640
- 6,
641
- 0,
642
- 69,
643
- 0,
644
- "CONDITIONING"
645
- ],
646
- [
647
- 170,
648
- 7,
649
- 0,
650
- 69,
651
- 1,
652
- "CONDITIONING"
653
- ],
654
- [
655
- 171,
656
- 72,
657
- 0,
658
- 8,
659
- 0,
660
- "LATENT"
661
- ],
662
- [
663
- 172,
664
- 73,
665
- 0,
666
- 72,
667
- 3,
668
- "SAMPLER"
669
- ],
670
- [
671
- 175,
672
- 70,
673
- 0,
674
- 72,
675
- 5,
676
- "LATENT"
677
- ],
678
- [
679
- 182,
680
- 71,
681
- 0,
682
- 72,
683
- 4,
684
- "SIGMAS"
685
- ],
686
- [
687
- 183,
688
- 44,
689
- 0,
690
- 77,
691
- 0,
692
- "MODEL"
693
- ],
694
- [
695
- 185,
696
- 77,
697
- 0,
698
- 78,
699
- 0,
700
- "*"
701
- ],
702
- [
703
- 186,
704
- 78,
705
- 0,
706
- 72,
707
- 0,
708
- "MODEL"
709
- ]
710
- ],
711
- "groups": [],
712
- "config": {},
713
- "extra": {
714
- "ds": {
715
- "scale": 0.5644739300537776,
716
- "offset": {
717
- "0": 40.9691162109375,
718
- "1": 495.14727783203125
719
- }
720
- }
721
- },
722
- "version": 0.4
723
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/workflows/sd3.5.json DELETED
@@ -1,657 +0,0 @@
1
- {
2
- "last_node_id": 55,
3
- "last_link_id": 104,
4
- "nodes": [
5
- {
6
- "id": 3,
7
- "type": "KSampler",
8
- "pos": [
9
- 864,
10
- 96
11
- ],
12
- "size": [
13
- 315,
14
- 262
15
- ],
16
- "flags": {},
17
- "order": 11,
18
- "mode": 0,
19
- "inputs": [
20
- {
21
- "name": "model",
22
- "type": "MODEL",
23
- "link": 104,
24
- "slot_index": 0
25
- },
26
- {
27
- "name": "positive",
28
- "type": "CONDITIONING",
29
- "link": 21
30
- },
31
- {
32
- "name": "negative",
33
- "type": "CONDITIONING",
34
- "link": 80
35
- },
36
- {
37
- "name": "latent_image",
38
- "type": "LATENT",
39
- "link": 100
40
- }
41
- ],
42
- "outputs": [
43
- {
44
- "name": "LATENT",
45
- "type": "LATENT",
46
- "links": [
47
- 7
48
- ],
49
- "slot_index": 0
50
- }
51
- ],
52
- "properties": {
53
- "Node name for S&R": "KSampler"
54
- },
55
- "widgets_values": [
56
- 0,
57
- "fixed",
58
- 30,
59
- 5.45,
60
- "euler",
61
- "sgm_uniform",
62
- 1
63
- ]
64
- },
65
- {
66
- "id": 4,
67
- "type": "CheckpointLoaderSimple",
68
- "pos": [
69
- -96,
70
- 480
71
- ],
72
- "size": [
73
- 384.75592041015625,
74
- 98
75
- ],
76
- "flags": {},
77
- "order": 6,
78
- "mode": 0,
79
- "inputs": [],
80
- "outputs": [
81
- {
82
- "name": "MODEL",
83
- "type": "MODEL",
84
- "links": [
85
- 101
86
- ],
87
- "slot_index": 0
88
- },
89
- {
90
- "name": "CLIP",
91
- "type": "CLIP",
92
- "links": [],
93
- "slot_index": 1
94
- },
95
- {
96
- "name": "VAE",
97
- "type": "VAE",
98
- "links": [
99
- 53
100
- ],
101
- "slot_index": 2
102
- }
103
- ],
104
- "properties": {
105
- "Node name for S&R": "CheckpointLoaderSimple"
106
- },
107
- "widgets_values": [
108
- "sd3.5_large_fp8_scaled.safetensors"
109
- ]
110
- },
111
- {
112
- "id": 8,
113
- "type": "VAEDecode",
114
- "pos": [
115
- 1200,
116
- 96
117
- ],
118
- "size": [
119
- 210,
120
- 46
121
- ],
122
- "flags": {},
123
- "order": 12,
124
- "mode": 0,
125
- "inputs": [
126
- {
127
- "name": "samples",
128
- "type": "LATENT",
129
- "link": 7
130
- },
131
- {
132
- "name": "vae",
133
- "type": "VAE",
134
- "link": 53,
135
- "slot_index": 1
136
- }
137
- ],
138
- "outputs": [
139
- {
140
- "name": "IMAGE",
141
- "type": "IMAGE",
142
- "links": [
143
- 51
144
- ],
145
- "slot_index": 0
146
- }
147
- ],
148
- "properties": {
149
- "Node name for S&R": "VAEDecode"
150
- },
151
- "widgets_values": []
152
- },
153
- {
154
- "id": 9,
155
- "type": "SaveImage",
156
- "pos": [
157
- 1440,
158
- 96
159
- ],
160
- "size": [
161
- 952.5112915039062,
162
- 1007.9328002929688
163
- ],
164
- "flags": {},
165
- "order": 13,
166
- "mode": 0,
167
- "inputs": [
168
- {
169
- "name": "images",
170
- "type": "IMAGE",
171
- "link": 51,
172
- "slot_index": 0
173
- }
174
- ],
175
- "outputs": [],
176
- "properties": {},
177
- "widgets_values": [
178
- "ComfyUI"
179
- ]
180
- },
181
- {
182
- "id": 16,
183
- "type": "CLIPTextEncode",
184
- "pos": [
185
- 384,
186
- 96
187
- ],
188
- "size": [
189
- 432,
190
- 192
191
- ],
192
- "flags": {},
193
- "order": 7,
194
- "mode": 0,
195
- "inputs": [
196
- {
197
- "name": "clip",
198
- "type": "CLIP",
199
- "link": 96
200
- }
201
- ],
202
- "outputs": [
203
- {
204
- "name": "CONDITIONING",
205
- "type": "CONDITIONING",
206
- "links": [
207
- 21
208
- ],
209
- "slot_index": 0
210
- }
211
- ],
212
- "title": "Positive Prompt",
213
- "properties": {
214
- "Node name for S&R": "CLIPTextEncode"
215
- },
216
- "widgets_values": [
217
- "a bottle with a rainbow galaxy inside it on top of a wooden table on a snowy mountain top with the ocean and clouds in the background"
218
- ],
219
- "color": "#232",
220
- "bgcolor": "#353"
221
- },
222
- {
223
- "id": 40,
224
- "type": "CLIPTextEncode",
225
- "pos": [
226
- 384,
227
- 336
228
- ],
229
- "size": [
230
- 432,
231
- 192
232
- ],
233
- "flags": {},
234
- "order": 8,
235
- "mode": 0,
236
- "inputs": [
237
- {
238
- "name": "clip",
239
- "type": "CLIP",
240
- "link": 97
241
- }
242
- ],
243
- "outputs": [
244
- {
245
- "name": "CONDITIONING",
246
- "type": "CONDITIONING",
247
- "links": [
248
- 80
249
- ],
250
- "slot_index": 0,
251
- "shape": 3
252
- }
253
- ],
254
- "title": "Negative Prompt",
255
- "properties": {
256
- "Node name for S&R": "CLIPTextEncode"
257
- },
258
- "widgets_values": [
259
- ""
260
- ],
261
- "color": "#322",
262
- "bgcolor": "#533"
263
- },
264
- {
265
- "id": 41,
266
- "type": "CLIPLoader",
267
- "pos": [
268
- -96,
269
- 0
270
- ],
271
- "size": [
272
- 315,
273
- 82
274
- ],
275
- "flags": {},
276
- "order": 0,
277
- "mode": 0,
278
- "inputs": [],
279
- "outputs": [
280
- {
281
- "name": "CLIP",
282
- "type": "CLIP",
283
- "links": [],
284
- "slot_index": 0,
285
- "shape": 3
286
- }
287
- ],
288
- "properties": {
289
- "Node name for S&R": "CLIPLoader"
290
- },
291
- "widgets_values": [
292
- "t5xxl_fp8_e4m3fn.safetensors",
293
- "sd3",
294
- "default"
295
- ]
296
- },
297
- {
298
- "id": 42,
299
- "type": "DualCLIPLoader",
300
- "pos": [
301
- -96,
302
- 144
303
- ],
304
- "size": [
305
- 315,
306
- 106
307
- ],
308
- "flags": {},
309
- "order": 1,
310
- "mode": 0,
311
- "inputs": [],
312
- "outputs": [
313
- {
314
- "name": "CLIP",
315
- "type": "CLIP",
316
- "links": [],
317
- "slot_index": 0,
318
- "shape": 3
319
- }
320
- ],
321
- "properties": {
322
- "Node name for S&R": "DualCLIPLoader"
323
- },
324
- "widgets_values": [
325
- "clip_l.safetensors",
326
- "clip_g.safetensors",
327
- "sd3",
328
- "default"
329
- ]
330
- },
331
- {
332
- "id": 43,
333
- "type": "TripleCLIPLoader",
334
- "pos": [
335
- -96,
336
- 288
337
- ],
338
- "size": [
339
- 315,
340
- 106
341
- ],
342
- "flags": {},
343
- "order": 5,
344
- "mode": 0,
345
- "inputs": [],
346
- "outputs": [
347
- {
348
- "name": "CLIP",
349
- "type": "CLIP",
350
- "links": [
351
- 96,
352
- 97
353
- ],
354
- "slot_index": 0,
355
- "shape": 3
356
- }
357
- ],
358
- "properties": {
359
- "Node name for S&R": "TripleCLIPLoader"
360
- },
361
- "widgets_values": [
362
- "clip_l.safetensors",
363
- "clip_g.safetensors",
364
- "t5xxl_fp8_e4m3fn.safetensors"
365
- ]
366
- },
367
- {
368
- "id": 50,
369
- "type": "Note",
370
- "pos": [
371
- -384,
372
- 144
373
- ],
374
- "size": [
375
- 223.34756469726562,
376
- 254.37765502929688
377
- ],
378
- "flags": {},
379
- "order": 2,
380
- "mode": 0,
381
- "inputs": [],
382
- "outputs": [],
383
- "properties": {
384
- "text": ""
385
- },
386
- "widgets_values": [
387
- "SD3 supports different text encoder configurations, you can see how to load them here.\n\n\nMake sure to put these files:\nclip_g.safetensors\nclip_l.safetensors\nt5xxl_fp8.safetensors\n\n\nIn the ComfyUI/models/clip directory"
388
- ],
389
- "color": "#432",
390
- "bgcolor": "#653"
391
- },
392
- {
393
- "id": 51,
394
- "type": "Note",
395
- "pos": [
396
- -96,
397
- 624
398
- ],
399
- "size": [
400
- 384,
401
- 192
402
- ],
403
- "flags": {},
404
- "order": 3,
405
- "mode": 0,
406
- "inputs": [],
407
- "outputs": [],
408
- "properties": {
409
- "text": ""
410
- },
411
- "widgets_values": [
412
- "sd3.5_large_fp8.safetensors is the file that does not contain any CLIP/text encoder weights so you need to load them separately.\n\nThis file goes in the ComfyUI/models/checkpoints directory."
413
- ],
414
- "color": "#432",
415
- "bgcolor": "#653"
416
- },
417
- {
418
- "id": 53,
419
- "type": "EmptySD3LatentImage",
420
- "pos": [
421
- 480,
422
- 576
423
- ],
424
- "size": [
425
- 315,
426
- 106
427
- ],
428
- "flags": {},
429
- "order": 4,
430
- "mode": 0,
431
- "inputs": [],
432
- "outputs": [
433
- {
434
- "name": "LATENT",
435
- "type": "LATENT",
436
- "links": [
437
- 100
438
- ],
439
- "slot_index": 0,
440
- "shape": 3
441
- }
442
- ],
443
- "properties": {
444
- "Node name for S&R": "EmptySD3LatentImage"
445
- },
446
- "widgets_values": [
447
- 1024,
448
- 1024,
449
- 1
450
- ]
451
- },
452
- {
453
- "id": 54,
454
- "type": "ApplyFBCacheOnModel",
455
- "pos": [
456
- 340,
457
- 750
458
- ],
459
- "size": [
460
- 315,
461
- 154
462
- ],
463
- "flags": {},
464
- "order": 9,
465
- "mode": 0,
466
- "inputs": [
467
- {
468
- "name": "model",
469
- "type": "MODEL",
470
- "link": 101
471
- }
472
- ],
473
- "outputs": [
474
- {
475
- "name": "MODEL",
476
- "type": "MODEL",
477
- "links": [
478
- 103
479
- ],
480
- "slot_index": 0
481
- }
482
- ],
483
- "properties": {
484
- "Node name for S&R": "ApplyFBCacheOnModel"
485
- },
486
- "widgets_values": [
487
- "diffusion_model",
488
- 0.12,
489
- 0,
490
- 1,
491
- -1
492
- ]
493
- },
494
- {
495
- "id": 55,
496
- "type": "EnhancedCompileModel",
497
- "pos": [
498
- 730,
499
- 750
500
- ],
501
- "size": [
502
- 400,
503
- 294
504
- ],
505
- "flags": {},
506
- "order": 10,
507
- "mode": 0,
508
- "inputs": [
509
- {
510
- "name": "model",
511
- "type": "*",
512
- "link": 103
513
- }
514
- ],
515
- "outputs": [
516
- {
517
- "name": "*",
518
- "type": "*",
519
- "links": [
520
- 104
521
- ],
522
- "slot_index": 0
523
- }
524
- ],
525
- "properties": {
526
- "Node name for S&R": "EnhancedCompileModel"
527
- },
528
- "widgets_values": [
529
- true,
530
- "diffusion_model",
531
- "torch.compile",
532
- false,
533
- false,
534
- "",
535
- "",
536
- false,
537
- "inductor"
538
- ]
539
- }
540
- ],
541
- "links": [
542
- [
543
- 7,
544
- 3,
545
- 0,
546
- 8,
547
- 0,
548
- "LATENT"
549
- ],
550
- [
551
- 21,
552
- 16,
553
- 0,
554
- 3,
555
- 1,
556
- "CONDITIONING"
557
- ],
558
- [
559
- 51,
560
- 8,
561
- 0,
562
- 9,
563
- 0,
564
- "IMAGE"
565
- ],
566
- [
567
- 53,
568
- 4,
569
- 2,
570
- 8,
571
- 1,
572
- "VAE"
573
- ],
574
- [
575
- 80,
576
- 40,
577
- 0,
578
- 3,
579
- 2,
580
- "CONDITIONING"
581
- ],
582
- [
583
- 96,
584
- 43,
585
- 0,
586
- 16,
587
- 0,
588
- "CLIP"
589
- ],
590
- [
591
- 97,
592
- 43,
593
- 0,
594
- 40,
595
- 0,
596
- "CLIP"
597
- ],
598
- [
599
- 100,
600
- 53,
601
- 0,
602
- 3,
603
- 3,
604
- "LATENT"
605
- ],
606
- [
607
- 101,
608
- 4,
609
- 0,
610
- 54,
611
- 0,
612
- "MODEL"
613
- ],
614
- [
615
- 103,
616
- 54,
617
- 0,
618
- 55,
619
- 0,
620
- "*"
621
- ],
622
- [
623
- 104,
624
- 55,
625
- 0,
626
- 3,
627
- 0,
628
- "MODEL"
629
- ]
630
- ],
631
- "groups": [
632
- {
633
- "id": 1,
634
- "title": "Different Text Encoder Configurations",
635
- "bounding": [
636
- -140,
637
- -100,
638
- 480,
639
- 528
640
- ],
641
- "color": "#3f789e",
642
- "font_size": 24,
643
- "flags": {}
644
- }
645
- ],
646
- "config": {},
647
- "extra": {
648
- "ds": {
649
- "scale": 0.6830134553650711,
650
- "offset": [
651
- -94.64810292225889,
652
- 94.43701306285806
653
- ]
654
- }
655
- },
656
- "version": 0.4
657
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/Comfy-WaveSpeed/workflows/sdxl.json DELETED
@@ -1,706 +0,0 @@
1
- {
2
- "last_node_id": 36,
3
- "last_link_id": 55,
4
- "nodes": [
5
- {
6
- "id": 33,
7
- "type": "CLIPTextEncodeSDXL",
8
- "pos": [
9
- 860,
10
- 160
11
- ],
12
- "size": [
13
- 220,
14
- 220
15
- ],
16
- "flags": {},
17
- "order": 10,
18
- "mode": 0,
19
- "inputs": [
20
- {
21
- "name": "clip",
22
- "type": "CLIP",
23
- "link": 50
24
- },
25
- {
26
- "name": "text_g",
27
- "type": "STRING",
28
- "link": 47,
29
- "slot_index": 1,
30
- "widget": {
31
- "name": "text_g"
32
- }
33
- },
34
- {
35
- "name": "text_l",
36
- "type": "STRING",
37
- "link": 48,
38
- "widget": {
39
- "name": "text_l"
40
- }
41
- }
42
- ],
43
- "outputs": [
44
- {
45
- "name": "CONDITIONING",
46
- "type": "CONDITIONING",
47
- "links": [
48
- 51
49
- ],
50
- "slot_index": 0,
51
- "shape": 3
52
- }
53
- ],
54
- "properties": {
55
- "Node name for S&R": "CLIPTextEncodeSDXL"
56
- },
57
- "widgets_values": [
58
- 4096,
59
- 4096,
60
- 0,
61
- 0,
62
- 4096,
63
- 4096,
64
- "blurry, animation, 3d render, illustration, toy, puppet, claymation, low quality, flag, nasa, mission patch",
65
- "blurry, animation, 3d render, illustration, toy, puppet, claymation, low quality, flag, nasa, mission patch"
66
- ],
67
- "color": "#322",
68
- "bgcolor": "#533"
69
- },
70
- {
71
- "id": 5,
72
- "type": "EmptyLatentImage",
73
- "pos": [
74
- 860,
75
- 440
76
- ],
77
- "size": [
78
- 220,
79
- 106
80
- ],
81
- "flags": {},
82
- "order": 0,
83
- "mode": 0,
84
- "inputs": [],
85
- "outputs": [
86
- {
87
- "name": "LATENT",
88
- "type": "LATENT",
89
- "links": [
90
- 2
91
- ],
92
- "slot_index": 0
93
- }
94
- ],
95
- "properties": {
96
- "Node name for S&R": "EmptyLatentImage"
97
- },
98
- "widgets_values": [
99
- 1024,
100
- 1024,
101
- 1
102
- ],
103
- "color": "#323",
104
- "bgcolor": "#535"
105
- },
106
- {
107
- "id": 11,
108
- "type": "Note",
109
- "pos": [
110
- 80,
111
- -200
112
- ],
113
- "size": [
114
- 282.48541259765625,
115
- 197.0584259033203
116
- ],
117
- "flags": {},
118
- "order": 1,
119
- "mode": 0,
120
- "inputs": [],
121
- "outputs": [],
122
- "properties": {
123
- "text": ""
124
- },
125
- "widgets_values": [
126
- "BASIC SDXL WORKFLOW WITH BASE MODEL ONLY\n========================================\n\nThis basic workflow only uses the BASE SDXL model. This can be useful for systems with limited resources as the REFINER takes another 6GB or ram.\n\nIf you use the embedded VAE be sure to download the \"0.9vae\" version because the 1.0 vae is considered defective."
127
- ],
128
- "color": "#432",
129
- "bgcolor": "#653"
130
- },
131
- {
132
- "id": 29,
133
- "type": "Note",
134
- "pos": [
135
- 620,
136
- 440
137
- ],
138
- "size": [
139
- 210,
140
- 477.3874816894531
141
- ],
142
- "flags": {},
143
- "order": 2,
144
- "mode": 0,
145
- "inputs": [],
146
- "outputs": [],
147
- "properties": {
148
- "text": ""
149
- },
150
- "widgets_values": [
151
- "SUPPORTED RESOLUTIONS\n=====================\n\nratio resolution\n-----------------\n0.5: 704×1408\n0.52: 704×1344\n0.57: 768×1344\n0.6: 768×1280\n0.68: 832×1216\n0.72: 832×1152\n0.78: 896×1152\n0.82: 896×1088\n0.88: 960×1088\n0.94: 960×1024\n1.0: 1024×1024\n1.07: 1024×960\n1.13: 1088×960\n1.21: 1088×896\n1.29: 1152×896\n1.38: 1152×832\n1.46: 1216×832\n1.67: 1280×768\n1.75: 1344×768\n1.91: 1344×704\n2.0: 1408×704\n2.09: 1472×704\n2.4: 1536×640\n2.5: 1600×640\n2.89: 1664×576\n3.0: 1728×576"
152
- ],
153
- "color": "#432",
154
- "bgcolor": "#653"
155
- },
156
- {
157
- "id": 28,
158
- "type": "SaveImage",
159
- "pos": [
160
- 1730,
161
- 50
162
- ],
163
- "size": [
164
- 688.129150390625,
165
- 728.7603759765625
166
- ],
167
- "flags": {},
168
- "order": 13,
169
- "mode": 0,
170
- "inputs": [
171
- {
172
- "name": "images",
173
- "type": "IMAGE",
174
- "link": 41
175
- }
176
- ],
177
- "outputs": [],
178
- "properties": {},
179
- "widgets_values": [
180
- "ComfyUI"
181
- ]
182
- },
183
- {
184
- "id": 32,
185
- "type": "PrimitiveNode",
186
- "pos": [
187
- 460,
188
- 180
189
- ],
190
- "size": [
191
- 338.24078369140625,
192
- 179.58128356933594
193
- ],
194
- "flags": {},
195
- "order": 3,
196
- "mode": 0,
197
- "inputs": [],
198
- "outputs": [
199
- {
200
- "name": "STRING",
201
- "type": "STRING",
202
- "links": [
203
- 47,
204
- 48
205
- ],
206
- "slot_index": 0,
207
- "widget": {
208
- "name": "text_g"
209
- }
210
- }
211
- ],
212
- "title": "negative",
213
- "properties": {
214
- "Run widget replace on values": false
215
- },
216
- "widgets_values": [
217
- "blurry, animation, 3d render, illustration, toy, puppet, claymation, low quality, flag, nasa, mission patch"
218
- ],
219
- "color": "#322",
220
- "bgcolor": "#533"
221
- },
222
- {
223
- "id": 34,
224
- "type": "Note",
225
- "pos": [
226
- 870,
227
- -300
228
- ],
229
- "size": [
230
- 210,
231
- 130
232
- ],
233
- "flags": {},
234
- "order": 4,
235
- "mode": 0,
236
- "inputs": [],
237
- "outputs": [],
238
- "properties": {
239
- "text": ""
240
- },
241
- "widgets_values": [
242
- "WIDTH/HEIGHT and TARGET_WIDTH/HEIGHT are both 4 times the latent size.\n\nThis generally grants a higher definition image."
243
- ],
244
- "color": "#432",
245
- "bgcolor": "#653"
246
- },
247
- {
248
- "id": 31,
249
- "type": "PrimitiveNode",
250
- "pos": [
251
- 460,
252
- -90
253
- ],
254
- "size": [
255
- 338.24078369140625,
256
- 179.58128356933594
257
- ],
258
- "flags": {},
259
- "order": 5,
260
- "mode": 0,
261
- "inputs": [],
262
- "outputs": [
263
- {
264
- "name": "STRING",
265
- "type": "STRING",
266
- "links": [
267
- 45,
268
- 46
269
- ],
270
- "slot_index": 0,
271
- "widget": {
272
- "name": "text_g"
273
- }
274
- }
275
- ],
276
- "title": "positive",
277
- "properties": {
278
- "Run widget replace on values": false
279
- },
280
- "widgets_values": [
281
- "a photo of an anthropomorphic fox wearing a spacesuit inside a sci-fi spaceship\n\ncinematic, dramatic lighting, high resolution, detailed, 4k"
282
- ],
283
- "color": "#232",
284
- "bgcolor": "#353"
285
- },
286
- {
287
- "id": 35,
288
- "type": "Note",
289
- "pos": [
290
- 464,
291
- -235
292
- ],
293
- "size": [
294
- 330.7162780761719,
295
- 95.14419555664062
296
- ],
297
- "flags": {},
298
- "order": 6,
299
- "mode": 0,
300
- "inputs": [],
301
- "outputs": [],
302
- "properties": {
303
- "text": ""
304
- },
305
- "widgets_values": [
306
- "Note that we send the same prompt to both TEXT_G and TEXT_L, you can experiment with different prompts but using the same seems to lead to more predictable results."
307
- ],
308
- "color": "#432",
309
- "bgcolor": "#653"
310
- },
311
- {
312
- "id": 30,
313
- "type": "CLIPTextEncodeSDXL",
314
- "pos": [
315
- 860,
316
- -120
317
- ],
318
- "size": [
319
- 220,
320
- 220
321
- ],
322
- "flags": {},
323
- "order": 9,
324
- "mode": 0,
325
- "inputs": [
326
- {
327
- "name": "clip",
328
- "type": "CLIP",
329
- "link": 49
330
- },
331
- {
332
- "name": "text_g",
333
- "type": "STRING",
334
- "link": 45,
335
- "slot_index": 1,
336
- "widget": {
337
- "name": "text_g"
338
- }
339
- },
340
- {
341
- "name": "text_l",
342
- "type": "STRING",
343
- "link": 46,
344
- "widget": {
345
- "name": "text_l"
346
- }
347
- }
348
- ],
349
- "outputs": [
350
- {
351
- "name": "CONDITIONING",
352
- "type": "CONDITIONING",
353
- "links": [
354
- 52
355
- ],
356
- "slot_index": 0,
357
- "shape": 3
358
- }
359
- ],
360
- "properties": {
361
- "Node name for S&R": "CLIPTextEncodeSDXL"
362
- },
363
- "widgets_values": [
364
- 4096,
365
- 4096,
366
- 0,
367
- 0,
368
- 4096,
369
- 4096,
370
- "a photo of an anthropomorphic fox wearing a spacesuit inside a sci-fi spaceship\n\ncinematic, dramatic lighting, high resolution, detailed, 4k",
371
- "a photo of an anthropomorphic fox wearing a spacesuit inside a sci-fi spaceship\n\ncinematic, dramatic lighting, high resolution, detailed, 4k"
372
- ],
373
- "color": "#232",
374
- "bgcolor": "#353"
375
- },
376
- {
377
- "id": 4,
378
- "type": "CheckpointLoaderSimple",
379
- "pos": [
380
- -27,
381
- 62
382
- ],
383
- "size": [
384
- 398.7421875,
385
- 98
386
- ],
387
- "flags": {},
388
- "order": 7,
389
- "mode": 0,
390
- "inputs": [],
391
- "outputs": [
392
- {
393
- "name": "MODEL",
394
- "type": "MODEL",
395
- "links": [
396
- 54
397
- ],
398
- "slot_index": 0
399
- },
400
- {
401
- "name": "CLIP",
402
- "type": "CLIP",
403
- "links": [
404
- 49,
405
- 50
406
- ],
407
- "slot_index": 1
408
- },
409
- {
410
- "name": "VAE",
411
- "type": "VAE",
412
- "links": [
413
- 43
414
- ],
415
- "slot_index": 2
416
- }
417
- ],
418
- "properties": {
419
- "Node name for S&R": "CheckpointLoaderSimple"
420
- },
421
- "widgets_values": [
422
- "sd_xl_base_1.0.safetensors"
423
- ],
424
- "color": "#223",
425
- "bgcolor": "#335"
426
- },
427
- {
428
- "id": 8,
429
- "type": "VAEDecode",
430
- "pos": [
431
- 1540,
432
- 40
433
- ],
434
- "size": [
435
- 140,
436
- 60
437
- ],
438
- "flags": {},
439
- "order": 12,
440
- "mode": 0,
441
- "inputs": [
442
- {
443
- "name": "samples",
444
- "type": "LATENT",
445
- "link": 53
446
- },
447
- {
448
- "name": "vae",
449
- "type": "VAE",
450
- "link": 43
451
- }
452
- ],
453
- "outputs": [
454
- {
455
- "name": "IMAGE",
456
- "type": "IMAGE",
457
- "links": [
458
- 41
459
- ],
460
- "slot_index": 0
461
- }
462
- ],
463
- "properties": {
464
- "Node name for S&R": "VAEDecode"
465
- },
466
- "widgets_values": [],
467
- "color": "#323",
468
- "bgcolor": "#535"
469
- },
470
- {
471
- "id": 3,
472
- "type": "KSampler",
473
- "pos": [
474
- 1190,
475
- 30
476
- ],
477
- "size": [
478
- 300,
479
- 262
480
- ],
481
- "flags": {},
482
- "order": 11,
483
- "mode": 0,
484
- "inputs": [
485
- {
486
- "name": "model",
487
- "type": "MODEL",
488
- "link": 55
489
- },
490
- {
491
- "name": "positive",
492
- "type": "CONDITIONING",
493
- "link": 52
494
- },
495
- {
496
- "name": "negative",
497
- "type": "CONDITIONING",
498
- "link": 51
499
- },
500
- {
501
- "name": "latent_image",
502
- "type": "LATENT",
503
- "link": 2
504
- }
505
- ],
506
- "outputs": [
507
- {
508
- "name": "LATENT",
509
- "type": "LATENT",
510
- "links": [
511
- 53
512
- ],
513
- "slot_index": 0
514
- }
515
- ],
516
- "properties": {
517
- "Node name for S&R": "KSampler"
518
- },
519
- "widgets_values": [
520
- 0,
521
- "fixed",
522
- 25,
523
- 6.5,
524
- "dpmpp_2m_sde",
525
- "exponential",
526
- 1
527
- ],
528
- "color": "#2a363b",
529
- "bgcolor": "#3f5159"
530
- },
531
- {
532
- "id": 36,
533
- "type": "ApplyFBCacheOnModel",
534
- "pos": [
535
- 1180,
536
- -250
537
- ],
538
- "size": [
539
- 315,
540
- 154
541
- ],
542
- "flags": {},
543
- "order": 8,
544
- "mode": 0,
545
- "inputs": [
546
- {
547
- "name": "model",
548
- "type": "MODEL",
549
- "link": 54
550
- }
551
- ],
552
- "outputs": [
553
- {
554
- "name": "MODEL",
555
- "type": "MODEL",
556
- "links": [
557
- 55
558
- ],
559
- "slot_index": 0
560
- }
561
- ],
562
- "properties": {
563
- "Node name for S&R": "ApplyFBCacheOnModel"
564
- },
565
- "widgets_values": [
566
- "diffusion_model",
567
- 0.2,
568
- 0,
569
- 1,
570
- -1
571
- ]
572
- }
573
- ],
574
- "links": [
575
- [
576
- 2,
577
- 5,
578
- 0,
579
- 3,
580
- 3,
581
- "LATENT"
582
- ],
583
- [
584
- 41,
585
- 8,
586
- 0,
587
- 28,
588
- 0,
589
- "IMAGE"
590
- ],
591
- [
592
- 43,
593
- 4,
594
- 2,
595
- 8,
596
- 1,
597
- "VAE"
598
- ],
599
- [
600
- 45,
601
- 31,
602
- 0,
603
- 30,
604
- 1,
605
- "STRING"
606
- ],
607
- [
608
- 46,
609
- 31,
610
- 0,
611
- 30,
612
- 2,
613
- "STRING"
614
- ],
615
- [
616
- 47,
617
- 32,
618
- 0,
619
- 33,
620
- 1,
621
- "STRING"
622
- ],
623
- [
624
- 48,
625
- 32,
626
- 0,
627
- 33,
628
- 2,
629
- "STRING"
630
- ],
631
- [
632
- 49,
633
- 4,
634
- 1,
635
- 30,
636
- 0,
637
- "CLIP"
638
- ],
639
- [
640
- 50,
641
- 4,
642
- 1,
643
- 33,
644
- 0,
645
- "CLIP"
646
- ],
647
- [
648
- 51,
649
- 33,
650
- 0,
651
- 3,
652
- 2,
653
- "CONDITIONING"
654
- ],
655
- [
656
- 52,
657
- 30,
658
- 0,
659
- 3,
660
- 1,
661
- "CONDITIONING"
662
- ],
663
- [
664
- 53,
665
- 3,
666
- 0,
667
- 8,
668
- 0,
669
- "LATENT"
670
- ],
671
- [
672
- 54,
673
- 4,
674
- 0,
675
- 36,
676
- 0,
677
- "MODEL"
678
- ],
679
- [
680
- 55,
681
- 36,
682
- 0,
683
- 3,
684
- 0,
685
- "MODEL"
686
- ]
687
- ],
688
- "groups": [],
689
- "config": {},
690
- "extra": {
691
- "ds": {
692
- "scale": 0.6993286095567135,
693
- "offset": [
694
- 370.5282918698711,
695
- 500.1938384498709
696
- ]
697
- },
698
- "node_versions": {
699
- "comfy-core": "v0.3.10-44-g2ff3104f",
700
- "Comfy-WaveSpeed": "805b67c2900f885bbc89de97d143e1a55a5881e9"
701
- },
702
- "VHS_latentpreview": false,
703
- "VHS_latentpreviewrate": 0
704
- },
705
- "version": 0.4
706
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/.gitattributes DELETED
@@ -1,2 +0,0 @@
1
- # Auto detect text files and perform LF normalization
2
- * text=auto
 
 
 
custom_nodes/ComfyUI-3D-Pack/.github/FUNDING.yml DELETED
@@ -1,13 +0,0 @@
1
- # These are supported funding model platforms
2
-
3
- github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4
- patreon: # Replace with a single Patreon username
5
- open_collective: # Replace with a single Open Collective username
6
- ko_fi: # Replace with a single Ko-fi username
7
- tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8
- community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9
- liberapay: # Replace with a single Liberapay username
10
- issuehunt: # Replace with a single IssueHunt username
11
- lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
12
- polar: # Replace with a single Polar username
13
- custom: ['https://twitter.com/janusch_patas']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/.github/workflows/publish.yml DELETED
@@ -1,21 +0,0 @@
1
- name: Publish to Comfy registry
2
- on:
3
- workflow_dispatch:
4
- push:
5
- branches:
6
- - main
7
- paths:
8
- - "pyproject.toml"
9
-
10
- jobs:
11
- publish-node:
12
- name: Publish Custom Node to registry
13
- runs-on: ubuntu-latest
14
- steps:
15
- - name: Check out code
16
- uses: actions/checkout@v4
17
- - name: Publish Custom Node
18
- uses: Comfy-Org/publish-node-action@main
19
- with:
20
- ## Add your own personal access token to your Github Repository secrets and reference it here.
21
- personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/.gitignore DELETED
@@ -1,172 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105
- __pypackages__/
106
-
107
- # Celery stuff
108
- celerybeat-schedule
109
- celerybeat.pid
110
-
111
- # SageMath parsed files
112
- *.sage.py
113
-
114
- # Environments
115
- .env
116
- .venv
117
- env/
118
- venv/
119
- ENV/
120
- env.bak/
121
- venv.bak/
122
-
123
- # Spyder project settings
124
- .spyderproject
125
- .spyproject
126
-
127
- # Rope project settings
128
- .ropeproject
129
-
130
- # mkdocs documentation
131
- /site
132
-
133
- # mypy
134
- .mypy_cache/
135
- .dmypy.json
136
- dmypy.json
137
-
138
- # Pyre type checker
139
- .pyre/
140
-
141
- # pytype static type analyzer
142
- .pytype/
143
-
144
- # Cython debug symbols
145
- cython_debug/
146
-
147
- # PyCharm
148
- # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
149
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
150
- # and can be added to the global gitignore or merged into this file. For a more nuclear
151
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
152
- #.idea/
153
-
154
- # Model files
155
- *.ckpt
156
- *.bin
157
- *.pth
158
- *.safetensors
159
- *.pkl
160
- *.pt
161
- .huggingface
162
-
163
- # Ignore test outputs
164
- outputs/
165
- output/
166
-
167
- # Ignore folders only used for build dependencies
168
- _Pre_Builds/_Build_Dependencies
169
- _Pre_Builds/_Build_Wheels
170
- _Pre_Builds/_Libs
171
-
172
- .idea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/.vscode/settings.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "cmake.sourceDirectory": "C:/Users/reall/Softwares/ComfyUI_windows_portable/ComfyUI/custom_nodes/ComfyUI-3D-Pack/diff-gaussian-rasterization",
3
- "python.analysis.extraPaths": [
4
- "./gen_3d_modules",
5
- "./MVs_Algorithms",
6
- "./_Pre_Builds/_Build_Scripts"
7
- ]
8
- }
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CRM/Put Convolutional Reconstruction Model here.txt DELETED
File without changes
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CRM_T2I_V3/Put CRM_T2I_V3 model here.txt DELETED
File without changes
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/2D_Stage/models/image_encoder/config.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "_name_or_path": "./image_encoder",
3
- "architectures": [
4
- "CLIPVisionModelWithProjection"
5
- ],
6
- "attention_dropout": 0.0,
7
- "dropout": 0.0,
8
- "hidden_act": "gelu",
9
- "hidden_size": 1280,
10
- "image_size": 224,
11
- "initializer_factor": 1.0,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 5120,
14
- "layer_norm_eps": 1e-05,
15
- "model_type": "clip_vision_model",
16
- "num_attention_heads": 16,
17
- "num_channels": 3,
18
- "num_hidden_layers": 32,
19
- "patch_size": 14,
20
- "projection_dim": 1024,
21
- "torch_dtype": "float16",
22
- "transformers_version": "4.28.0.dev0"
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/3D_Stage/models/base/README.md DELETED
@@ -1,60 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- tags:
4
- - dino
5
- - vision
6
- ---
7
-
8
- # Vision Transformer (base-sized model) trained using DINOv2
9
-
10
- Vision Transformer (ViT) model trained using the DINOv2 method. It was introduced in the paper [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. and first released in [this repository](https://github.com/facebookresearch/dinov2).
11
-
12
- Disclaimer: The team releasing DINOv2 did not write a model card for this model so this model card has been written by the Hugging Face team.
13
-
14
- ## Model description
15
-
16
- The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion.
17
-
18
- Images are presented to the model as a sequence of fixed-size patches, which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder.
19
-
20
- Note that this model does not include any fine-tuned heads.
21
-
22
- By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image.
23
-
24
- ## Intended uses & limitations
25
-
26
- You can use the raw model for feature extraction. See the [model hub](https://huggingface.co/models?search=facebook/dinov2) to look for
27
- fine-tuned versions on a task that interests you.
28
-
29
- ### How to use
30
-
31
- Here is how to use this model:
32
-
33
- ```python
34
- from transformers import AutoImageProcessor, AutoModel
35
- from PIL import Image
36
- import requests
37
-
38
- url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
39
- image = Image.open(requests.get(url, stream=True).raw)
40
-
41
- processor = AutoImageProcessor.from_pretrained('facebook/dinov2-base')
42
- model = AutoModel.from_pretrained('facebook/dinov2-base')
43
-
44
- inputs = processor(images=image, return_tensors="pt")
45
- outputs = model(**inputs)
46
- last_hidden_states = outputs.last_hidden_state
47
- ```
48
-
49
- ### BibTeX entry and citation info
50
-
51
- ```bibtex
52
- misc{oquab2023dinov2,
53
- title={DINOv2: Learning Robust Visual Features without Supervision},
54
- author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski},
55
- year={2023},
56
- eprint={2304.07193},
57
- archivePrefix={arXiv},
58
- primaryClass={cs.CV}
59
- }
60
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/3D_Stage/models/base/config.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "architectures": [
3
- "Dinov2Model"
4
- ],
5
- "attention_probs_dropout_prob": 0.0,
6
- "drop_path_rate": 0.0,
7
- "hidden_act": "gelu",
8
- "hidden_dropout_prob": 0.0,
9
- "hidden_size": 768,
10
- "image_size": 518,
11
- "initializer_range": 0.02,
12
- "layer_norm_eps": 1e-06,
13
- "layerscale_value": 1.0,
14
- "mlp_ratio": 4,
15
- "model_type": "dinov2",
16
- "num_attention_heads": 12,
17
- "num_channels": 3,
18
- "num_hidden_layers": 12,
19
- "patch_size": 14,
20
- "qkv_bias": true,
21
- "torch_dtype": "float32",
22
- "transformers_version": "4.31.0.dev0",
23
- "use_swiglu_ffn": false
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/3D_Stage/models/base/preprocessor_config.json DELETED
@@ -1,27 +0,0 @@
1
- {
2
- "crop_size": {
3
- "height": 256,
4
- "width": 256
5
- },
6
- "do_center_crop": false,
7
- "do_convert_rgb": false,
8
- "do_normalize": true,
9
- "do_rescale": true,
10
- "do_resize": false,
11
- "image_mean": [
12
- 0.485,
13
- 0.456,
14
- 0.406
15
- ],
16
- "image_processor_type": "BitImageProcessor",
17
- "image_std": [
18
- 0.229,
19
- 0.224,
20
- 0.225
21
- ],
22
- "resample": 3,
23
- "rescale_factor": 0.00392156862745098,
24
- "size": {
25
- "shortest_edge": 256
26
- }
27
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/Put CharacterGen Model here.txt DELETED
File without changes
custom_nodes/ComfyUI-3D-Pack/Checkpoints/CharacterGen/README.md DELETED
@@ -1,22 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- language:
4
- - en
5
- pipeline_tag: image-to-3d
6
- ---
7
-
8
- This is the model parameters of [CharacterGen](https://github.com/zjp-shadow/CharacterGen).
9
- Thanks for downloading the checkpoint~
10
- If you find our work helpful, please consider citing
11
-
12
- ```bibtex
13
- @article
14
- {peng2024charactergen,
15
- title ={CharacterGen: Efficient 3D Character Generation from Single Images with Multi-View Pose Canonicalization},
16
- author ={Hao-Yang Peng and Jia-Peng Zhang and Meng-Hao Guo and Yan-Pei Cao and Shi-Min Hu},
17
- journal ={ACM Transactions on Graphics (TOG)},
18
- year ={2024},
19
- volume ={43},
20
- number ={4},
21
- doi ={10.1145/3658217}
22
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Craftsman/image-to-shape-diffusion/clip-mvrgb-modln-l256-e64-ne8-nd16-nl6-aligned-vae/Put Craftsman Model here.txt DELETED
File without changes
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/JeffreyXiang/TRELLIS-image-large/.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/JeffreyXiang/TRELLIS-image-large/README.md DELETED
@@ -1,16 +0,0 @@
1
- ---
2
- library_name: trellis
3
- pipeline_tag: image-to-3d
4
- license: mit
5
- language:
6
- - en
7
- ---
8
- # TRELLIS Image Large
9
-
10
- <!-- Provide a quick summary of what the model is/does. -->
11
-
12
- The image conditioned version of TRELLIS, a large 3D genetive model. It was introduced in the paper [Structured 3D Latents for Scalable and Versatile 3D Generation](https://huggingface.co/papers/2412.01506).
13
-
14
- Project page: https://trellis3d.github.io/
15
-
16
- Code: https://github.com/Microsoft/TRELLIS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/README.md DELETED
@@ -1,5 +0,0 @@
1
- ---
2
- license: mit
3
- ---
4
- To enable download model using huggingface_hub package
5
- <br>Copied from https://huggingface.co/spaces/Wuvin/Unique3D/tree/main/ckpt
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/feature_extractor/preprocessor_config.json DELETED
@@ -1,44 +0,0 @@
1
- {
2
- "_valid_processor_keys": [
3
- "images",
4
- "do_resize",
5
- "size",
6
- "resample",
7
- "do_center_crop",
8
- "crop_size",
9
- "do_rescale",
10
- "rescale_factor",
11
- "do_normalize",
12
- "image_mean",
13
- "image_std",
14
- "do_convert_rgb",
15
- "return_tensors",
16
- "data_format",
17
- "input_data_format"
18
- ],
19
- "crop_size": {
20
- "height": 224,
21
- "width": 224
22
- },
23
- "do_center_crop": true,
24
- "do_convert_rgb": true,
25
- "do_normalize": true,
26
- "do_rescale": true,
27
- "do_resize": true,
28
- "image_mean": [
29
- 0.48145466,
30
- 0.4578275,
31
- 0.40821073
32
- ],
33
- "image_processor_type": "CLIPImageProcessor",
34
- "image_std": [
35
- 0.26862954,
36
- 0.26130258,
37
- 0.27577711
38
- ],
39
- "resample": 3,
40
- "rescale_factor": 0.00392156862745098,
41
- "size": {
42
- "shortest_edge": 224
43
- }
44
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/image_encoder/config.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "_name_or_path": "lambdalabs/sd-image-variations-diffusers",
3
- "architectures": [
4
- "CLIPVisionModelWithProjection"
5
- ],
6
- "attention_dropout": 0.0,
7
- "dropout": 0.0,
8
- "hidden_act": "quick_gelu",
9
- "hidden_size": 1024,
10
- "image_size": 224,
11
- "initializer_factor": 1.0,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 4096,
14
- "layer_norm_eps": 1e-05,
15
- "model_type": "clip_vision_model",
16
- "num_attention_heads": 16,
17
- "num_channels": 3,
18
- "num_hidden_layers": 24,
19
- "patch_size": 14,
20
- "projection_dim": 768,
21
- "torch_dtype": "float32",
22
- "transformers_version": "4.39.3"
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/model_index.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "_class_name": "StableDiffusionImage2MVCustomPipeline",
3
- "_diffusers_version": "0.27.2",
4
- "_name_or_path": "lambdalabs/sd-image-variations-diffusers",
5
- "condition_offset": true,
6
- "feature_extractor": [
7
- "transformers",
8
- "CLIPImageProcessor"
9
- ],
10
- "image_encoder": [
11
- "transformers",
12
- "CLIPVisionModelWithProjection"
13
- ],
14
- "requires_safety_checker": true,
15
- "safety_checker": [
16
- null,
17
- null
18
- ],
19
- "scheduler": [
20
- "diffusers",
21
- "DDIMScheduler"
22
- ],
23
- "unet": [
24
- "diffusers",
25
- "UNet2DConditionModel"
26
- ],
27
- "vae": [
28
- "diffusers",
29
- "AutoencoderKL"
30
- ]
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/scheduler/scheduler_config.json DELETED
@@ -1,20 +0,0 @@
1
- {
2
- "_class_name": "DDIMScheduler",
3
- "_diffusers_version": "0.27.2",
4
- "beta_end": 0.012,
5
- "beta_schedule": "scaled_linear",
6
- "beta_start": 0.00085,
7
- "clip_sample": false,
8
- "clip_sample_range": 1.0,
9
- "dynamic_thresholding_ratio": 0.995,
10
- "num_train_timesteps": 1000,
11
- "prediction_type": "epsilon",
12
- "rescale_betas_zero_snr": false,
13
- "sample_max_value": 1.0,
14
- "set_alpha_to_one": false,
15
- "skip_prk_steps": true,
16
- "steps_offset": 1,
17
- "thresholding": false,
18
- "timestep_spacing": "leading",
19
- "trained_betas": null
20
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/unet/config.json DELETED
@@ -1,68 +0,0 @@
1
- {
2
- "_class_name": "UnifieldWrappedUNet",
3
- "_diffusers_version": "0.27.2",
4
- "_name_or_path": "lambdalabs/sd-image-variations-diffusers",
5
- "act_fn": "silu",
6
- "addition_embed_type": null,
7
- "addition_embed_type_num_heads": 64,
8
- "addition_time_embed_dim": null,
9
- "attention_head_dim": 8,
10
- "attention_type": "default",
11
- "block_out_channels": [
12
- 320,
13
- 640,
14
- 1280,
15
- 1280
16
- ],
17
- "center_input_sample": false,
18
- "class_embed_type": null,
19
- "class_embeddings_concat": false,
20
- "conv_in_kernel": 3,
21
- "conv_out_kernel": 3,
22
- "cross_attention_dim": 768,
23
- "cross_attention_norm": null,
24
- "down_block_types": [
25
- "CrossAttnDownBlock2D",
26
- "CrossAttnDownBlock2D",
27
- "CrossAttnDownBlock2D",
28
- "DownBlock2D"
29
- ],
30
- "downsample_padding": 1,
31
- "dropout": 0.0,
32
- "dual_cross_attention": false,
33
- "encoder_hid_dim": null,
34
- "encoder_hid_dim_type": null,
35
- "flip_sin_to_cos": true,
36
- "freq_shift": 0,
37
- "in_channels": 8,
38
- "layers_per_block": 2,
39
- "mid_block_only_cross_attention": null,
40
- "mid_block_scale_factor": 1,
41
- "mid_block_type": "UNetMidBlock2DCrossAttn",
42
- "norm_eps": 1e-05,
43
- "norm_num_groups": 32,
44
- "num_attention_heads": null,
45
- "num_class_embeds": 8,
46
- "only_cross_attention": false,
47
- "out_channels": 4,
48
- "projection_class_embeddings_input_dim": null,
49
- "resnet_out_scale_factor": 1.0,
50
- "resnet_skip_time_act": false,
51
- "resnet_time_scale_shift": "default",
52
- "reverse_transformer_layers_per_block": null,
53
- "sample_size": 64,
54
- "time_cond_proj_dim": null,
55
- "time_embedding_act_fn": null,
56
- "time_embedding_dim": null,
57
- "time_embedding_type": "positional",
58
- "timestep_post_act": null,
59
- "transformer_layers_per_block": 1,
60
- "up_block_types": [
61
- "UpBlock2D",
62
- "CrossAttnUpBlock2D",
63
- "CrossAttnUpBlock2D",
64
- "CrossAttnUpBlock2D"
65
- ],
66
- "upcast_attention": false,
67
- "use_linear_projection": false
68
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2mvimage/vae/config.json DELETED
@@ -1,34 +0,0 @@
1
- {
2
- "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.27.2",
4
- "_name_or_path": "lambdalabs/sd-image-variations-diffusers",
5
- "act_fn": "silu",
6
- "block_out_channels": [
7
- 128,
8
- 256,
9
- 512,
10
- 512
11
- ],
12
- "down_block_types": [
13
- "DownEncoderBlock2D",
14
- "DownEncoderBlock2D",
15
- "DownEncoderBlock2D",
16
- "DownEncoderBlock2D"
17
- ],
18
- "force_upcast": true,
19
- "in_channels": 3,
20
- "latent_channels": 4,
21
- "latents_mean": null,
22
- "latents_std": null,
23
- "layers_per_block": 2,
24
- "norm_num_groups": 32,
25
- "out_channels": 3,
26
- "sample_size": 256,
27
- "scaling_factor": 0.18215,
28
- "up_block_types": [
29
- "UpDecoderBlock2D",
30
- "UpDecoderBlock2D",
31
- "UpDecoderBlock2D",
32
- "UpDecoderBlock2D"
33
- ]
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2normal/feature_extractor/preprocessor_config.json DELETED
@@ -1,44 +0,0 @@
1
- {
2
- "_valid_processor_keys": [
3
- "images",
4
- "do_resize",
5
- "size",
6
- "resample",
7
- "do_center_crop",
8
- "crop_size",
9
- "do_rescale",
10
- "rescale_factor",
11
- "do_normalize",
12
- "image_mean",
13
- "image_std",
14
- "do_convert_rgb",
15
- "return_tensors",
16
- "data_format",
17
- "input_data_format"
18
- ],
19
- "crop_size": {
20
- "height": 224,
21
- "width": 224
22
- },
23
- "do_center_crop": true,
24
- "do_convert_rgb": true,
25
- "do_normalize": true,
26
- "do_rescale": true,
27
- "do_resize": true,
28
- "image_mean": [
29
- 0.48145466,
30
- 0.4578275,
31
- 0.40821073
32
- ],
33
- "image_processor_type": "CLIPImageProcessor",
34
- "image_std": [
35
- 0.26862954,
36
- 0.26130258,
37
- 0.27577711
38
- ],
39
- "resample": 3,
40
- "rescale_factor": 0.00392156862745098,
41
- "size": {
42
- "shortest_edge": 224
43
- }
44
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2normal/image_encoder/config.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "_name_or_path": "lambdalabs/sd-image-variations-diffusers",
3
- "architectures": [
4
- "CLIPVisionModelWithProjection"
5
- ],
6
- "attention_dropout": 0.0,
7
- "dropout": 0.0,
8
- "hidden_act": "quick_gelu",
9
- "hidden_size": 1024,
10
- "image_size": 224,
11
- "initializer_factor": 1.0,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 4096,
14
- "layer_norm_eps": 1e-05,
15
- "model_type": "clip_vision_model",
16
- "num_attention_heads": 16,
17
- "num_channels": 3,
18
- "num_hidden_layers": 24,
19
- "patch_size": 14,
20
- "projection_dim": 768,
21
- "torch_dtype": "bfloat16",
22
- "transformers_version": "4.39.3"
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_nodes/ComfyUI-3D-Pack/Checkpoints/Diffusers/MrForExample/Unique3D/image2normal/model_index.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "_class_name": "StableDiffusionImageCustomPipeline",
3
- "_diffusers_version": "0.27.2",
4
- "_name_or_path": "lambdalabs/sd-image-variations-diffusers",
5
- "feature_extractor": [
6
- "transformers",
7
- "CLIPImageProcessor"
8
- ],
9
- "image_encoder": [
10
- "transformers",
11
- "CLIPVisionModelWithProjection"
12
- ],
13
- "noisy_cond_latents": false,
14
- "requires_safety_checker": true,
15
- "safety_checker": [
16
- null,
17
- null
18
- ],
19
- "scheduler": [
20
- "diffusers",
21
- "EulerAncestralDiscreteScheduler"
22
- ],
23
- "unet": [
24
- "diffusers",
25
- "UNet2DConditionModel"
26
- ],
27
- "vae": [
28
- "diffusers",
29
- "AutoencoderKL"
30
- ]
31
- }