Spaces:
Running
Running
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>ComfyUI Workflow</title> | |
| <style> | |
| body { | |
| font-family: -apple-system, BlinkMacSystemFont, 'SF Pro Text', sans-serif; | |
| background-color: #000000; | |
| color: #f5f5f7; | |
| padding: 40px; | |
| } | |
| pre { | |
| background: #1d1d1f; | |
| padding: 24px; | |
| border-radius: 12px; | |
| overflow-x: auto; | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <h1>ComfyUI Workflow</h1> | |
| <p>Error: Invalid JSON format</p> | |
| <pre> | |
| ```json | |
| { | |
| "last_node_id": 12, | |
| "last_link_id": 18, | |
| "nodes": [ | |
| { | |
| "id": 1, | |
| "type": "CheckpointLoader", | |
| "pos": [ | |
| 100, | |
| 100 | |
| ], | |
| "size": [ | |
| 315, | |
| 106 | |
| ], | |
| "flags": {}, | |
| "order": 0, | |
| "mode": 0, | |
| "outputs": [ | |
| { | |
| "name": "MODEL", | |
| "type": "MODEL", | |
| "links": [ | |
| 5 | |
| ] | |
| }, | |
| { | |
| "name": "CLIP", | |
| "type": "CLIP", | |
| "links": [ | |
| 6, | |
| 7 | |
| ] | |
| }, | |
| { | |
| "name": "VAE", | |
| "type": "VAE", | |
| "links": [ | |
| 8 | |
| ] | |
| } | |
| ], | |
| "properties": { | |
| "Node name for S&R": "CheckpointLoader" | |
| }, | |
| "widgets_values": [ | |
| "v1-5-pruned-emaonly.ckpt" | |
| ] | |
| }, | |
| { | |
| "id": 2, | |
| "type": "CLIPTextEncode", | |
| "pos": [ | |
| 500, | |
| 100 | |
| ], | |
| "size": [ | |
| 425, | |
| 180 | |
| ], | |
| "flags": {}, | |
| "order": 1, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "clip", | |
| "type": "CLIP", | |
| "link": 6 | |
| } | |
| ], | |
| "outputs": [ | |
| { | |
| "name": "CONDITIONING", | |
| "type": "CONDITIONING", | |
| "links": [ | |
| 9 | |
| ] | |
| } | |
| ], | |
| "title": "Positive Prompt", | |
| "properties": { | |
| "Node name for S&R": "CLIPTextEncode" | |
| }, | |
| "widgets_values": [ | |
| "beautiful scenery nature glass bottle landscape, purple galaxy bottle, 1girl, standing,long hair, open mouth, best quality, highly detailed, dramatic lighting" | |
| ] | |
| }, | |
| { | |
| "id": 3, | |
| "type": "CLIPTextEncode", | |
| "pos": [ | |
| 500, | |
| 320 | |
| ], | |
| "size": [ | |
| 425, | |
| 180 | |
| ], | |
| "flags": {}, | |
| "order": 2, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "clip", | |
| "type": "CLIP", | |
| "link": 7 | |
| } | |
| ], | |
| "outputs": [ | |
| { | |
| "name": "CONDITIONING", | |
| "type": "CONDITIONING", | |
| "links": [ | |
| 10 | |
| ] | |
| } | |
| ], | |
| "title": "Negative Prompt", | |
| "properties": { | |
| "Node name for S&R": "CLIPTextEncode" | |
| }, | |
| "widgets_values": [ | |
| "lowres, bad anatomy, bad hands, text, error, missing fingers, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" | |
| ] | |
| }, | |
| { | |
| "id": 4, | |
| "type": "EmptyLatentImage", | |
| "pos": [ | |
| 100, | |
| 300 | |
| ], | |
| "size": [ | |
| 315, | |
| 106 | |
| ], | |
| "flags": {}, | |
| "order": 3, | |
| "mode": 0, | |
| "outputs": [ | |
| { | |
| "name": "LATENT", | |
| "type": "LATENT", | |
| "links": [ | |
| 11 | |
| ] | |
| } | |
| ], | |
| "title": "Empty Latent Image", | |
| "properties": { | |
| "Node name for S&R": "EmptyLatentImage" | |
| }, | |
| "widgets_values": [ | |
| 512, | |
| 512, | |
| 1 | |
| ] | |
| }, | |
| { | |
| "id": 5, | |
| "type": "KSampler", | |
| "pos": [ | |
| 500, | |
| 550 | |
| ], | |
| "size": [ | |
| 315, | |
| 262 | |
| ], | |
| "flags": {}, | |
| "order": 4, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "model", | |
| "type": "MODEL", | |
| "link": 5 | |
| }, | |
| { | |
| "name": "positive", | |
| "type": "CONDITIONING", | |
| "link": 9 | |
| }, | |
| { | |
| "name": "negative", | |
| "type": "CONDITIONING", | |
| "link": 10 | |
| }, | |
| { | |
| "name": "latent_image", | |
| "type": "LATENT", | |
| "link": 11 | |
| } | |
| ], | |
| "outputs": [ | |
| { | |
| "name": "LATENT", | |
| "type": "LATENT", | |
| "links": [ | |
| 12 | |
| ] | |
| } | |
| ], | |
| "title": "KSampler", | |
| "properties": { | |
| "Node name for S&R": "KSampler" | |
| }, | |
| "widgets_values": [ | |
| 42, | |
| "randomize", | |
| 20, | |
| 7.0, | |
| "dpmpp_2m", | |
| "karras" | |
| ] | |
| }, | |
| { | |
| "id": 6, | |
| "type": "VAEDecode", | |
| "pos": [ | |
| 900, | |
| 550 | |
| ], | |
| "size": [ | |
| 210, | |
| 46 | |
| ], | |
| "flags": {}, | |
| "order": 5, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "samples", | |
| "type": "LATENT", | |
| "link": 12 | |
| }, | |
| { | |
| "name": "vae", | |
| "type": "VAE", | |
| "link": 8 | |
| } | |
| ], | |
| "outputs": [ | |
| { | |
| "name": "IMAGE", | |
| "type": "IMAGE", | |
| "links": [ | |
| 17 | |
| ] | |
| } | |
| ], | |
| "title": "VAE Decode", | |
| "properties": { | |
| "Node name for S&R": "VAEDecode" | |
| } | |
| }, | |
| { | |
| "id": 7, | |
| "type": "SaveImage", | |
| "pos": [ | |
| 1150, | |
| 550 | |
| ], | |
| "size": [ | |
| 315, | |
| 270 | |
| ], | |
| "flags": {}, | |
| "order": 6, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "images", | |
| "type": "IMAGE", | |
| "link": 17 | |
| } | |
| ], | |
| "title": "Save Image", | |
| "properties": { | |
| "Node name for S&R": "SaveImage" | |
| }, | |
| "widgets_values": [ | |
| "ComfyUI_Stable_Diffusion" | |
| ] | |
| }, | |
| { | |
| "id": 8, | |
| "type": "CLIPVisionLoader", | |
| "pos": [ | |
| 100, | |
| 450 | |
| ], | |
| "size": [ | |
| 315, | |
| 58 | |
| ], | |
| "flags": {}, | |
| "order": 7, | |
| "mode": 0, | |
| "outputs": [ | |
| { | |
| "name": "CLIP_VISION", | |
| "type": "CLIP_VISION", | |
| "links": [ | |
| 13 | |
| ] | |
| } | |
| ], | |
| "title": "CLIP Vision Loader", | |
| "properties": { | |
| "Node name for S&R": "CLIPVisionLoader" | |
| }, | |
| "widgets_values": [ | |
| "model.safetensors" | |
| ] | |
| }, | |
| { | |
| "id": 9, | |
| "type": "ImageOnlyCheckpointLoader", | |
| "pos": [ | |
| 100, | |
| 550 | |
| ], | |
| "size": [ | |
| 315, | |
| 58 | |
| ], | |
| "flags": {}, | |
| "order": 8, | |
| "mode": 0, | |
| "outputs": [ | |
| { | |
| "name": "IMAGE_ENCODER", | |
| "type": "IMAGE_ENCODER", | |
| "links": [ | |
| 14 | |
| ] | |
| } | |
| ], | |
| "title": "Image Only Checkpoint Loader", | |
| "properties": { | |
| "Node name for S&R": "ImageOnlyCheckpointLoader" | |
| }, | |
| "widgets_values": [ | |
| "model.safetensors" | |
| ] | |
| }, | |
| { | |
| "id": 10, | |
| "type": "CLIPTextEncode", | |
| "pos": [ | |
| 500, | |
| 700 | |
| ], | |
| "size": [ | |
| 425, | |
| 100 | |
| ], | |
| "flags": {}, | |
| "order": 9, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "clip", | |
| "type": "CLIP", | |
| "link": 15 | |
| } | |
| ], | |
| "outputs": [ | |
| { | |
| "name": "CONDITIONING", | |
| "type": "CONDITIONING", | |
| "links": [ | |
| 16 | |
| ] | |
| } | |
| ], | |
| "title": "Prompt", | |
| "properties": { | |
| "Node name for S&R": "CLIPTextEncode" | |
| }, | |
| "widgets_values": [ | |
| "a photo of a cat" | |
| ] | |
| }, | |
| { | |
| "id": 11, | |
| "type": "CLIPVisionLoader", | |
| "pos": [ | |
| 100, | |
| 650 | |
| ], | |
| "size": [ | |
| 315, | |
| 58 | |
| ], | |
| "flags": {}, | |
| "order": 10, | |
| "mode": 0, | |
| "outputs": [ | |
| { | |
| "name": "CLIP_VISION", | |
| "type": "CLIP_VISION", | |
| "links": [ | |
| 18 | |
| ] | |
| } | |
| ], | |
| "title": "CLIP Vision Loader", | |
| "properties": { | |
| "Node name for S&R": "CLIPVisionLoader" | |
| }, | |
| "widgets_values": [ | |
| "model.safetensors" | |
| ] | |
| }, | |
| { | |
| "id": 12, | |
| "type": "CLIPTextEncode", | |
| "pos": [ | |
| 950, | |
| 300 | |
| ], | |
| "size": [ | |
| 300, | |
| 200 | |
| ], | |
| "flags": {}, | |
| "order": 11, | |
| "mode": 0, | |
| "inputs": [ | |
| { | |
| "name": "clip", | |
| "type": "CLIP", | |
| "link": 16 | |
| } | |
| ], | |
| "outputs": [], | |
| "title": "CLIPTextEncode", | |
| "properties": { | |
| "Node name for S&R": "CLIPTextEncode" | |
| }, | |
| "widgets_values": [ | |
| "a photo of a cat" | |
| ] | |
| } | |
| ], | |
| "links": [ | |
| [ | |
| 5, | |
| 1, | |
| 0, | |
| 5, | |
| 0, | |
| "MODEL" | |
| ], | |
| [ | |
| 6, | |
| 1, | |
| 1, | |
| 2, | |
| 0, | |
| "CLIP" | |
| ], | |
| [ | |
| 7, | |
| 1, | |
| 1, | |
| 3, | |
| 0, | |
| "CLIP" | |
| ], | |
| [ | |
| 8, | |
| 1, | |
| 2, | |
| 6, | |
| 1, | |
| "VAE" | |
| ], | |
| [ | |
| 9, | |
| 2, | |
| 0, | |
| 5, | |
| 1, | |
| "CONDITIONING" | |
| ], | |
| [ | |
| 10, | |
| 3, | |
| 0, | |
| 5, | |
| 2, | |
| "CONDITIONING" | |
| ], | |
| [ | |
| 11, | |
| 4, | |
| 0, | |
| 5, | |
| 3, | |
| "LATENT" | |
| ], | |
| [ | |
| 12, | |
| 5, | |
| 0, | |
| 6, | |
| 0, | |
| "LATENT" | |
| ], | |
| [ | |
| 13, | |
| 8, | |
| 0, | |
| 10, | |
| 0, | |
| "CLIP_VISION" | |
| ], | |
| [ | |
| 14, | |
| 9, | |
| 0, | |
| 10, | |
| 1, | |
| "IMAGE_ENCODER" | |
| ], | |
| [ | |
| 15, | |
| 1, | |
| 1, | |
| 10, | |
| 2, | |
| "CLIP" | |
| ], | |
| [ | |
| 16, | |
| 10, | |
| 0, | |
| 12, | |
| 0, | |
| "CONDITIONING" | |
| ], | |
| [ | |
| 17, | |
| 6, | |
| 0, | |
| 7, | |
| 0, | |
| "IMAGE" | |
| ], | |
| [ | |
| 18, | |
| 11, | |
| 0, | |
| 12, | |
| 1, | |
| "CLIP_VISION" | |
| ] | |
| ], | |
| "groups": [ | |
| { | |
| "title": "Stable Diffusion Core", | |
| "bounding": [ | |
| 50, | |
| 50, | |
| 900, | |
| 550 | |
| ], | |
| "color": "#3f789e", | |
| "font_size": 24 | |
| }, | |
| { | |
| "title": "CLIP Vision", | |
| "bounding": [ | |
| 50, | |
| 450, | |
| 400, | |
| 300 | |
| ], | |
| "color": "#3f789e", | |
| "font_size": 24 | |
| }, | |
| { | |
| "title": "Image Processing", | |
| "bounding": [ | |
| 450, | |
| 650, | |
| 900, | |
| 350 | |
| ], | |
| "color": "#3f789e", | |
| "font_size": 24 | |
| } | |
| ], | |
| "config": {}, | |
| "extra": { | |
| "ds": { | |
| "scale": 0.5, | |
| "offset": [ | |
| -100, | |
| -100 | |
| ] | |
| } | |
| } | |
| } | |
| ``` | |
| This ComfyUI workflow provides a complete stable diffusion setup with: | |
| **Core Features:** | |
| - **Model Loading**: Loads stable diffusion checkpoints with model, CLIP, and VAE | |
| - **Text Encoding**: Positive and negative prompt encoding for guided generation | |
| - **Sampling**: KSampler with configurable steps, CFG scale, and sampler type | |
| - **Image Output**: VAE decoding and image saving | |
| **Key Parameters:** | |
| - Default model: `v1-5-pruned-emaonly.ckpt` | |
| - Resolution: 512x512 | |
| - Sampling steps: 20 | |
| - CFG scale: 7.0 | |
| - Sampler: `dpmpp_2m` with `karras` scheduling | |
| **Usage:** | |
| 1. Load the workflow in ComfyUI | |
| 2. Install required models in the models folder | |
| 3. Adjust prompts and parameters as needed | |
| 4. Queue the generation | |
| The workflow includes additional CLIP Vision nodes for advanced features like image-to-image generation if needed. Built with anycoder - https: //huggingface.co/spaces/akhaliq/anycoder</pre> | |
| </body> | |
| </html> |