Qwen_Image_Workflows / Qwen_Image_Controlnet_Patch.json
stablediffusiontutorials's picture
Upload 3 files
8333013 verified
{
"id": "91f6bbe2-ed41-4fd6-bac7-71d5b5864ecb",
"revision": 0,
"last_node_id": 83,
"last_link_id": 149,
"nodes": [
{
"id": 75,
"type": "ImageScaleToTotalPixels",
"pos": [
-60,
995
],
"size": [
270,
82
],
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "image",
"type": "IMAGE",
"link": 140
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
141,
143
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "ImageScaleToTotalPixels"
},
"widgets_values": [
"area",
1.68
]
},
{
"id": 69,
"type": "QwenImageDiffsynthControlnet",
"pos": [
810,
70
],
"size": [
310,
138
],
"flags": {},
"order": 15,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 130
},
{
"name": "model_patch",
"type": "MODEL_PATCH",
"link": 129
},
{
"name": "vae",
"type": "VAE",
"link": 132
},
{
"name": "image",
"type": "IMAGE",
"link": 135
},
{
"name": "mask",
"shape": 7,
"type": "MASK",
"link": null
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
131
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "QwenImageDiffsynthControlnet"
},
"widgets_values": [
1
]
},
{
"id": 66,
"type": "ModelSamplingAuraFlow",
"pos": [
810,
-40
],
"size": [
310,
58
],
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 149
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
130
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "ModelSamplingAuraFlow"
},
"widgets_values": [
3.1000000000000005
]
},
{
"id": 79,
"type": "MarkdownNote",
"pos": [
810,
760
],
"size": [
310,
140
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [],
"title": "KSampler settings",
"properties": {},
"widgets_values": [
"You can test and find the best setting by yourself. The following table is for reference.\n\n| model | steps | cfg |\n|---------------------|---------------|---------------|\n| fp8_e4m3fn | 20 | 2.5 |\n| fp8_e4m3fn + 4 Steps lightning LoRA | 4 | 1.0 |\n"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
810,
950
],
"size": [
310,
46
],
"flags": {},
"order": 18,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 128
},
{
"name": "vae",
"type": "VAE",
"link": 76
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
110
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 70,
"type": "ModelPatchLoader",
"pos": [
-120,
130
],
"size": [
380,
60
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL_PATCH",
"type": "MODEL_PATCH",
"links": [
129
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "ModelPatchLoader",
"models": [
{
"name": "qwen_image_canny_diffsynth_controlnet.safetensors",
"url": "https://huggingface.co/Comfy-Org/Qwen-Image-DiffSynth-ControlNets/resolve/main/split_files/model_patches/qwen_image_canny_diffsynth_controlnet.safetensors",
"directory": "model_patches"
}
]
},
"widgets_values": [
"qwen_image_canny_diffsynth_controlnet.safetensors"
]
},
{
"id": 39,
"type": "VAELoader",
"pos": [
-120,
400
],
"size": [
380,
58
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"slot_index": 0,
"links": [
76,
132,
144
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "VAELoader",
"models": [
{
"name": "qwen_image_vae.safetensors",
"url": "https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors",
"directory": "vae"
}
]
},
"widgets_values": [
"qwen_image_vae.safetensors"
]
},
{
"id": 37,
"type": "UNETLoader",
"pos": [
-120,
0
],
"size": [
380,
82
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"slot_index": 0,
"links": [
145
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "UNETLoader",
"models": [
{
"name": "qwen_image_fp8_e4m3fn.safetensors",
"url": "https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors",
"directory": "diffusion_models"
}
]
},
"widgets_values": [
"qwen_image_fp8_e4m3fn.safetensors",
"default"
]
},
{
"id": 76,
"type": "VAEEncode",
"pos": [
640,
630
],
"size": [
140,
46
],
"flags": {
"collapsed": true
},
"order": 14,
"mode": 0,
"inputs": [
{
"name": "pixels",
"type": "IMAGE",
"link": 143
},
{
"name": "vae",
"type": "VAE",
"link": 144
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
142
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "VAEEncode"
},
"widgets_values": []
},
{
"id": 68,
"type": "Note",
"pos": [
810,
-180
],
"size": [
310,
90
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [],
"outputs": [],
"properties": {},
"widgets_values": [
"Increase the shift if you get too many blury/dark/bad images. Decrease if you want to try increasing detail."
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 38,
"type": "CLIPLoader",
"pos": [
-120,
240
],
"size": [
380,
106
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"slot_index": 0,
"links": [
74,
75
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "CLIPLoader",
"models": [
{
"name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"url": "https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors",
"directory": "text_encoders"
}
]
},
"widgets_values": [
"qwen_2.5_vl_7b_fp8_scaled.safetensors",
"qwen_image",
"default"
]
},
{
"id": 71,
"type": "LoadImage",
"pos": [
-70,
635
],
"size": [
274.080078125,
314.00006103515625
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
140
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"ComfyUI_00944_.png",
"image"
]
},
{
"id": 73,
"type": "PreviewImage",
"pos": [
340,
770
],
"size": [
380,
320
],
"flags": {},
"order": 16,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 136
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 72,
"type": "Canny",
"pos": [
340,
630
],
"size": [
240,
90
],
"flags": {},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "image",
"type": "IMAGE",
"link": 141
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
135,
136
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "Canny"
},
"widgets_values": [
0.1,
0.2
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 60,
"type": "SaveImage",
"pos": [
1150,
-40
],
"size": [
970,
1030
],
"flags": {},
"order": 19,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 110
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51"
},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 80,
"type": "LoraLoaderModelOnly",
"pos": [
320,
-10
],
"size": [
430,
82
],
"flags": {},
"order": 8,
"mode": 4,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 145
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
149
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "LoraLoaderModelOnly",
"models": [
{
"name": "Qwen-Image-Lightning-4steps-V1.0.safetensors",
"url": "https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-4steps-V1.0.safetensors",
"directory": "loras"
}
]
},
"widgets_values": [
"Qwen-Image-Lightning-4steps-V1.0.safetensors",
1
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
300,
170
],
"size": [
460,
164.31304931640625
],
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 74
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
46
]
}
],
"title": "CLIP Text Encode (Positive Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"Conceptual makeup, a fairy girl with pink hair, pink shimmery scales dotted at the corners of her eyes, starry eyeshadow, makeup painting, thin eyebrows, three-dimensional features, a glossy finish, dazzling gold powder, silver fine glitter, a sense of layering and depth in the makeup, decorated with gold ornaments, pearlescent eyeshadow, dreamy makeup, soft pastels and subtle sparkles, a mysterious and fantasy-filled atmosphere, high-end makeup, dappled light on the face, soft lighting, optimal shadows, complex depth of field, dramatic lighting, clear focus, 8k, high quality, Fujifilm filter, surreal, a dreamy pastel wonderland, bright colors, a starry pink background, realistic.\n"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
300,
380
],
"size": [
460,
140
],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 75
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
52
]
}
],
"title": "CLIP Text Encode (Negative Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
" "
],
"color": "#223",
"bgcolor": "#335"
},
{
"id": 3,
"type": "KSampler",
"pos": [
810,
260
],
"size": [
310,
430
],
"flags": {},
"order": 17,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 131
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 46
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 52
},
{
"name": "latent_image",
"type": "LATENT",
"link": 142
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
128
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.51",
"Node name for S&R": "KSampler"
},
"widgets_values": [
91832422759220,
"randomize",
20,
2.5,
"euler",
"simple",
1
]
},
{
"id": 78,
"type": "MarkdownNote",
"pos": [
-690,
-50
],
"size": [
540,
630
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [],
"outputs": [],
"title": "Model links",
"properties": {
"widget_ue_connectable": {}
},
"widgets_values": [
"[Tutorial](https://docs.comfy.org/tutorials/image/qwen/qwen-image) | [教程](https://docs.comfy.org/zh-CN/tutorials/image/qwen/qwen-image)\n\n\n## Model links\n\nYou can find all the models on [Huggingface](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/tree/main) or [Modelscope](https://modelscope.cn/models/Comfy-Org/Qwen-Image_ComfyUI/files)\n\n**Diffusion model**\n\n- [qwen_image_fp8_e4m3fn.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors)\n\n**Model patches**\n\n- [qwen_image_canny_diffsynth_controlnet.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image-DiffSynth-ControlNets/resolve/main/split_files/model_patches/qwen_image_canny_diffsynth_controlnet.safetensors)\n- [qwen_image_depth_diffsynth_controlnet.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image-DiffSynth-ControlNets/resolve/main/split_files/model_patches/qwen_image_depth_diffsynth_controlnet.safetensors)\n- [qwen_image_inpaint_diffsynth_controlnet.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image-DiffSynth-ControlNets/resolve/main/split_files/model_patches/qwen_image_inpaint_diffsynth_controlnet.safetensors)\n\n**LoRA**\n\n- [Qwen-Image-Lightning-4steps-V1.0.safetensors](https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-4steps-V1.0.safetensors)\n\n**Text encoder**\n\n- [qwen_2.5_vl_7b_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors)\n\n**VAE**\n\n- [qwen_image_vae.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors)\n\n\nModel Storage Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 diffusion_models/\n│ │ ├── qwen_image_fp8_e4m3fn.safetensors\n│ │ └── qwen_image_distill_full_fp8_e4m3fn.safetensors\n│ ├── 📂 loras/\n│ │ └── Qwen-Image-Lightning-8steps-V1.0.safetensors\n│ ├── 📂 model_patches/ # create one if you can't find it\n│ │ ├── qwen_image_depth_diffsynth_controlnet.safetensors\n│ │ ├── qwen_image_canny_diffsynth_controlnet.safetensors\n│ │ └── qwen_image_inpaint_diffsynth_controlnet.safetensors\n│ ├── 📂 vae/\n│ │ └── qwen_image_vae.safetensors\n│ └── 📂 text_encoders/\n│ └── qwen_2.5_vl_7b_fp8_scaled.safetensors\n```\n"
],
"color": "#432",
"bgcolor": "#653"
}
],
"links": [
[
46,
6,
0,
3,
1,
"CONDITIONING"
],
[
52,
7,
0,
3,
2,
"CONDITIONING"
],
[
74,
38,
0,
6,
0,
"CLIP"
],
[
75,
38,
0,
7,
0,
"CLIP"
],
[
76,
39,
0,
8,
1,
"VAE"
],
[
110,
8,
0,
60,
0,
"IMAGE"
],
[
128,
3,
0,
8,
0,
"LATENT"
],
[
129,
70,
0,
69,
1,
"MODEL_PATCH"
],
[
130,
66,
0,
69,
0,
"MODEL"
],
[
131,
69,
0,
3,
0,
"MODEL"
],
[
132,
39,
0,
69,
2,
"VAE"
],
[
135,
72,
0,
69,
3,
"IMAGE"
],
[
136,
72,
0,
73,
0,
"IMAGE"
],
[
140,
71,
0,
75,
0,
"IMAGE"
],
[
141,
75,
0,
72,
0,
"IMAGE"
],
[
142,
76,
0,
3,
3,
"LATENT"
],
[
143,
75,
0,
76,
0,
"IMAGE"
],
[
144,
39,
0,
76,
1,
"VAE"
],
[
145,
37,
0,
80,
0,
"MODEL"
],
[
149,
80,
0,
66,
0,
"MODEL"
]
],
"groups": [
{
"id": 1,
"title": "Step 1 - Upload models",
"bounding": [
-130,
-80,
400,
610
],
"color": "#3f789e",
"font_size": 24,
"flags": {}
},
{
"id": 2,
"title": "Step 2 - Upload reference image",
"bounding": [
-130,
550,
400,
550
],
"color": "#3f789e",
"font_size": 24,
"flags": {}
},
{
"id": 3,
"title": "Image processing",
"bounding": [
290,
550,
490,
550
],
"color": "#3f789e",
"font_size": 24,
"flags": {}
},
{
"id": 4,
"title": "Step 3 - Prompt",
"bounding": [
290,
100,
490,
430
],
"color": "#3f789e",
"font_size": 24,
"flags": {}
},
{
"id": 5,
"title": "4 steps lightning LoRA",
"bounding": [
290,
-80,
490,
160
],
"color": "#3f789e",
"font_size": 24,
"flags": {}
}
],
"config": {},
"extra": {
"ds": {
"scale": 0.48559562289012265,
"offset": [
1846.044139609573,
391.24067543168553
]
},
"frontendVersion": "1.26.6"
},
"version": 0.4
}