akhaliq HF Staff commited on
Commit
d4705d5
·
verified ·
1 Parent(s): 8e601c6

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. index.html +660 -19
index.html CHANGED
@@ -1,19 +1,660 @@
1
- <!doctype html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- Also don't forget to check the
15
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
16
- </p>
17
- </div>
18
- </body>
19
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>ComfyUI Workflow</title>
7
+ <style>
8
+ body {
9
+ font-family: -apple-system, BlinkMacSystemFont, 'SF Pro Text', sans-serif;
10
+ background-color: #000000;
11
+ color: #f5f5f7;
12
+ padding: 40px;
13
+ }
14
+ pre {
15
+ background: #1d1d1f;
16
+ padding: 24px;
17
+ border-radius: 12px;
18
+ overflow-x: auto;
19
+ }
20
+ </style>
21
+ </head>
22
+ <body>
23
+ <h1>ComfyUI Workflow</h1>
24
+ <p>Error: Invalid JSON format</p>
25
+ <pre>
26
+
27
+ ```json
28
+ {
29
+ "last_node_id": 12,
30
+ "last_link_id": 18,
31
+ "nodes": [
32
+ {
33
+ "id": 1,
34
+ "type": "CheckpointLoader",
35
+ "pos": [
36
+ 100,
37
+ 100
38
+ ],
39
+ "size": [
40
+ 315,
41
+ 106
42
+ ],
43
+ "flags": {},
44
+ "order": 0,
45
+ "mode": 0,
46
+ "outputs": [
47
+ {
48
+ "name": "MODEL",
49
+ "type": "MODEL",
50
+ "links": [
51
+ 5
52
+ ]
53
+ },
54
+ {
55
+ "name": "CLIP",
56
+ "type": "CLIP",
57
+ "links": [
58
+ 6,
59
+ 7
60
+ ]
61
+ },
62
+ {
63
+ "name": "VAE",
64
+ "type": "VAE",
65
+ "links": [
66
+ 8
67
+ ]
68
+ }
69
+ ],
70
+ "properties": {
71
+ "Node name for S&R": "CheckpointLoader"
72
+ },
73
+ "widgets_values": [
74
+ "v1-5-pruned-emaonly.ckpt"
75
+ ]
76
+ },
77
+ {
78
+ "id": 2,
79
+ "type": "CLIPTextEncode",
80
+ "pos": [
81
+ 500,
82
+ 100
83
+ ],
84
+ "size": [
85
+ 425,
86
+ 180
87
+ ],
88
+ "flags": {},
89
+ "order": 1,
90
+ "mode": 0,
91
+ "inputs": [
92
+ {
93
+ "name": "clip",
94
+ "type": "CLIP",
95
+ "link": 6
96
+ }
97
+ ],
98
+ "outputs": [
99
+ {
100
+ "name": "CONDITIONING",
101
+ "type": "CONDITIONING",
102
+ "links": [
103
+ 9
104
+ ]
105
+ }
106
+ ],
107
+ "title": "Positive Prompt",
108
+ "properties": {
109
+ "Node name for S&R": "CLIPTextEncode"
110
+ },
111
+ "widgets_values": [
112
+ "beautiful scenery nature glass bottle landscape, purple galaxy bottle, 1girl, standing,long hair, open mouth, best quality, highly detailed, dramatic lighting"
113
+ ]
114
+ },
115
+ {
116
+ "id": 3,
117
+ "type": "CLIPTextEncode",
118
+ "pos": [
119
+ 500,
120
+ 320
121
+ ],
122
+ "size": [
123
+ 425,
124
+ 180
125
+ ],
126
+ "flags": {},
127
+ "order": 2,
128
+ "mode": 0,
129
+ "inputs": [
130
+ {
131
+ "name": "clip",
132
+ "type": "CLIP",
133
+ "link": 7
134
+ }
135
+ ],
136
+ "outputs": [
137
+ {
138
+ "name": "CONDITIONING",
139
+ "type": "CONDITIONING",
140
+ "links": [
141
+ 10
142
+ ]
143
+ }
144
+ ],
145
+ "title": "Negative Prompt",
146
+ "properties": {
147
+ "Node name for S&R": "CLIPTextEncode"
148
+ },
149
+ "widgets_values": [
150
+ "lowres, bad anatomy, bad hands, text, error, missing fingers, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"
151
+ ]
152
+ },
153
+ {
154
+ "id": 4,
155
+ "type": "EmptyLatentImage",
156
+ "pos": [
157
+ 100,
158
+ 300
159
+ ],
160
+ "size": [
161
+ 315,
162
+ 106
163
+ ],
164
+ "flags": {},
165
+ "order": 3,
166
+ "mode": 0,
167
+ "outputs": [
168
+ {
169
+ "name": "LATENT",
170
+ "type": "LATENT",
171
+ "links": [
172
+ 11
173
+ ]
174
+ }
175
+ ],
176
+ "title": "Empty Latent Image",
177
+ "properties": {
178
+ "Node name for S&R": "EmptyLatentImage"
179
+ },
180
+ "widgets_values": [
181
+ 512,
182
+ 512,
183
+ 1
184
+ ]
185
+ },
186
+ {
187
+ "id": 5,
188
+ "type": "KSampler",
189
+ "pos": [
190
+ 500,
191
+ 550
192
+ ],
193
+ "size": [
194
+ 315,
195
+ 262
196
+ ],
197
+ "flags": {},
198
+ "order": 4,
199
+ "mode": 0,
200
+ "inputs": [
201
+ {
202
+ "name": "model",
203
+ "type": "MODEL",
204
+ "link": 5
205
+ },
206
+ {
207
+ "name": "positive",
208
+ "type": "CONDITIONING",
209
+ "link": 9
210
+ },
211
+ {
212
+ "name": "negative",
213
+ "type": "CONDITIONING",
214
+ "link": 10
215
+ },
216
+ {
217
+ "name": "latent_image",
218
+ "type": "LATENT",
219
+ "link": 11
220
+ }
221
+ ],
222
+ "outputs": [
223
+ {
224
+ "name": "LATENT",
225
+ "type": "LATENT",
226
+ "links": [
227
+ 12
228
+ ]
229
+ }
230
+ ],
231
+ "title": "KSampler",
232
+ "properties": {
233
+ "Node name for S&R": "KSampler"
234
+ },
235
+ "widgets_values": [
236
+ 42,
237
+ "randomize",
238
+ 20,
239
+ 7.0,
240
+ "dpmpp_2m",
241
+ "karras"
242
+ ]
243
+ },
244
+ {
245
+ "id": 6,
246
+ "type": "VAEDecode",
247
+ "pos": [
248
+ 900,
249
+ 550
250
+ ],
251
+ "size": [
252
+ 210,
253
+ 46
254
+ ],
255
+ "flags": {},
256
+ "order": 5,
257
+ "mode": 0,
258
+ "inputs": [
259
+ {
260
+ "name": "samples",
261
+ "type": "LATENT",
262
+ "link": 12
263
+ },
264
+ {
265
+ "name": "vae",
266
+ "type": "VAE",
267
+ "link": 8
268
+ }
269
+ ],
270
+ "outputs": [
271
+ {
272
+ "name": "IMAGE",
273
+ "type": "IMAGE",
274
+ "links": [
275
+ 17
276
+ ]
277
+ }
278
+ ],
279
+ "title": "VAE Decode",
280
+ "properties": {
281
+ "Node name for S&R": "VAEDecode"
282
+ }
283
+ },
284
+ {
285
+ "id": 7,
286
+ "type": "SaveImage",
287
+ "pos": [
288
+ 1150,
289
+ 550
290
+ ],
291
+ "size": [
292
+ 315,
293
+ 270
294
+ ],
295
+ "flags": {},
296
+ "order": 6,
297
+ "mode": 0,
298
+ "inputs": [
299
+ {
300
+ "name": "images",
301
+ "type": "IMAGE",
302
+ "link": 17
303
+ }
304
+ ],
305
+ "title": "Save Image",
306
+ "properties": {
307
+ "Node name for S&R": "SaveImage"
308
+ },
309
+ "widgets_values": [
310
+ "ComfyUI_Stable_Diffusion"
311
+ ]
312
+ },
313
+ {
314
+ "id": 8,
315
+ "type": "CLIPVisionLoader",
316
+ "pos": [
317
+ 100,
318
+ 450
319
+ ],
320
+ "size": [
321
+ 315,
322
+ 58
323
+ ],
324
+ "flags": {},
325
+ "order": 7,
326
+ "mode": 0,
327
+ "outputs": [
328
+ {
329
+ "name": "CLIP_VISION",
330
+ "type": "CLIP_VISION",
331
+ "links": [
332
+ 13
333
+ ]
334
+ }
335
+ ],
336
+ "title": "CLIP Vision Loader",
337
+ "properties": {
338
+ "Node name for S&R": "CLIPVisionLoader"
339
+ },
340
+ "widgets_values": [
341
+ "model.safetensors"
342
+ ]
343
+ },
344
+ {
345
+ "id": 9,
346
+ "type": "ImageOnlyCheckpointLoader",
347
+ "pos": [
348
+ 100,
349
+ 550
350
+ ],
351
+ "size": [
352
+ 315,
353
+ 58
354
+ ],
355
+ "flags": {},
356
+ "order": 8,
357
+ "mode": 0,
358
+ "outputs": [
359
+ {
360
+ "name": "IMAGE_ENCODER",
361
+ "type": "IMAGE_ENCODER",
362
+ "links": [
363
+ 14
364
+ ]
365
+ }
366
+ ],
367
+ "title": "Image Only Checkpoint Loader",
368
+ "properties": {
369
+ "Node name for S&R": "ImageOnlyCheckpointLoader"
370
+ },
371
+ "widgets_values": [
372
+ "model.safetensors"
373
+ ]
374
+ },
375
+ {
376
+ "id": 10,
377
+ "type": "CLIPTextEncode",
378
+ "pos": [
379
+ 500,
380
+ 700
381
+ ],
382
+ "size": [
383
+ 425,
384
+ 100
385
+ ],
386
+ "flags": {},
387
+ "order": 9,
388
+ "mode": 0,
389
+ "inputs": [
390
+ {
391
+ "name": "clip",
392
+ "type": "CLIP",
393
+ "link": 15
394
+ }
395
+ ],
396
+ "outputs": [
397
+ {
398
+ "name": "CONDITIONING",
399
+ "type": "CONDITIONING",
400
+ "links": [
401
+ 16
402
+ ]
403
+ }
404
+ ],
405
+ "title": "Prompt",
406
+ "properties": {
407
+ "Node name for S&R": "CLIPTextEncode"
408
+ },
409
+ "widgets_values": [
410
+ "a photo of a cat"
411
+ ]
412
+ },
413
+ {
414
+ "id": 11,
415
+ "type": "CLIPVisionLoader",
416
+ "pos": [
417
+ 100,
418
+ 650
419
+ ],
420
+ "size": [
421
+ 315,
422
+ 58
423
+ ],
424
+ "flags": {},
425
+ "order": 10,
426
+ "mode": 0,
427
+ "outputs": [
428
+ {
429
+ "name": "CLIP_VISION",
430
+ "type": "CLIP_VISION",
431
+ "links": [
432
+ 18
433
+ ]
434
+ }
435
+ ],
436
+ "title": "CLIP Vision Loader",
437
+ "properties": {
438
+ "Node name for S&R": "CLIPVisionLoader"
439
+ },
440
+ "widgets_values": [
441
+ "model.safetensors"
442
+ ]
443
+ },
444
+ {
445
+ "id": 12,
446
+ "type": "CLIPTextEncode",
447
+ "pos": [
448
+ 950,
449
+ 300
450
+ ],
451
+ "size": [
452
+ 300,
453
+ 200
454
+ ],
455
+ "flags": {},
456
+ "order": 11,
457
+ "mode": 0,
458
+ "inputs": [
459
+ {
460
+ "name": "clip",
461
+ "type": "CLIP",
462
+ "link": 16
463
+ }
464
+ ],
465
+ "outputs": [],
466
+ "title": "CLIPTextEncode",
467
+ "properties": {
468
+ "Node name for S&R": "CLIPTextEncode"
469
+ },
470
+ "widgets_values": [
471
+ "a photo of a cat"
472
+ ]
473
+ }
474
+ ],
475
+ "links": [
476
+ [
477
+ 5,
478
+ 1,
479
+ 0,
480
+ 5,
481
+ 0,
482
+ "MODEL"
483
+ ],
484
+ [
485
+ 6,
486
+ 1,
487
+ 1,
488
+ 2,
489
+ 0,
490
+ "CLIP"
491
+ ],
492
+ [
493
+ 7,
494
+ 1,
495
+ 1,
496
+ 3,
497
+ 0,
498
+ "CLIP"
499
+ ],
500
+ [
501
+ 8,
502
+ 1,
503
+ 2,
504
+ 6,
505
+ 1,
506
+ "VAE"
507
+ ],
508
+ [
509
+ 9,
510
+ 2,
511
+ 0,
512
+ 5,
513
+ 1,
514
+ "CONDITIONING"
515
+ ],
516
+ [
517
+ 10,
518
+ 3,
519
+ 0,
520
+ 5,
521
+ 2,
522
+ "CONDITIONING"
523
+ ],
524
+ [
525
+ 11,
526
+ 4,
527
+ 0,
528
+ 5,
529
+ 3,
530
+ "LATENT"
531
+ ],
532
+ [
533
+ 12,
534
+ 5,
535
+ 0,
536
+ 6,
537
+ 0,
538
+ "LATENT"
539
+ ],
540
+ [
541
+ 13,
542
+ 8,
543
+ 0,
544
+ 10,
545
+ 0,
546
+ "CLIP_VISION"
547
+ ],
548
+ [
549
+ 14,
550
+ 9,
551
+ 0,
552
+ 10,
553
+ 1,
554
+ "IMAGE_ENCODER"
555
+ ],
556
+ [
557
+ 15,
558
+ 1,
559
+ 1,
560
+ 10,
561
+ 2,
562
+ "CLIP"
563
+ ],
564
+ [
565
+ 16,
566
+ 10,
567
+ 0,
568
+ 12,
569
+ 0,
570
+ "CONDITIONING"
571
+ ],
572
+ [
573
+ 17,
574
+ 6,
575
+ 0,
576
+ 7,
577
+ 0,
578
+ "IMAGE"
579
+ ],
580
+ [
581
+ 18,
582
+ 11,
583
+ 0,
584
+ 12,
585
+ 1,
586
+ "CLIP_VISION"
587
+ ]
588
+ ],
589
+ "groups": [
590
+ {
591
+ "title": "Stable Diffusion Core",
592
+ "bounding": [
593
+ 50,
594
+ 50,
595
+ 900,
596
+ 550
597
+ ],
598
+ "color": "#3f789e",
599
+ "font_size": 24
600
+ },
601
+ {
602
+ "title": "CLIP Vision",
603
+ "bounding": [
604
+ 50,
605
+ 450,
606
+ 400,
607
+ 300
608
+ ],
609
+ "color": "#3f789e",
610
+ "font_size": 24
611
+ },
612
+ {
613
+ "title": "Image Processing",
614
+ "bounding": [
615
+ 450,
616
+ 650,
617
+ 900,
618
+ 350
619
+ ],
620
+ "color": "#3f789e",
621
+ "font_size": 24
622
+ }
623
+ ],
624
+ "config": {},
625
+ "extra": {
626
+ "ds": {
627
+ "scale": 0.5,
628
+ "offset": [
629
+ -100,
630
+ -100
631
+ ]
632
+ }
633
+ }
634
+ }
635
+ ```
636
+
637
+ This ComfyUI workflow provides a complete stable diffusion setup with:
638
+
639
+ **Core Features:**
640
+ - **Model Loading**: Loads stable diffusion checkpoints with model, CLIP, and VAE
641
+ - **Text Encoding**: Positive and negative prompt encoding for guided generation
642
+ - **Sampling**: KSampler with configurable steps, CFG scale, and sampler type
643
+ - **Image Output**: VAE decoding and image saving
644
+
645
+ **Key Parameters:**
646
+ - Default model: `v1-5-pruned-emaonly.ckpt`
647
+ - Resolution: 512x512
648
+ - Sampling steps: 20
649
+ - CFG scale: 7.0
650
+ - Sampler: `dpmpp_2m` with `karras` scheduling
651
+
652
+ **Usage:**
653
+ 1. Load the workflow in ComfyUI
654
+ 2. Install required models in the models folder
655
+ 3. Adjust prompts and parameters as needed
656
+ 4. Queue the generation
657
+
658
+ The workflow includes additional CLIP Vision nodes for advanced features like image-to-image generation if needed. Built with anycoder - https: //huggingface.co/spaces/akhaliq/anycoder</pre>
659
+ </body>
660
+ </html>