ahmedghani commited on
Commit
651f936
·
verified ·
1 Parent(s): e308470

Upload 2 files

Browse files
imaginev5_ultra_workflow.json ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "keys": {
3
+ "input": "85",
4
+ "output": "77"
5
+ },
6
+ "extra_data": {},
7
+ "prompt": {
8
+ "20": {
9
+ "inputs": {
10
+ "ckpt_name": "turbovisionxlSuperFastXLBasedOnNew_alphaV0101Bakedvae.safetensors"
11
+ },
12
+ "class_type": "CheckpointLoaderSimple"
13
+ },
14
+ "29": {
15
+ "inputs": {
16
+ "width": 4096,
17
+ "height": 4096,
18
+ "crop_w": 0,
19
+ "crop_h": 0,
20
+ "target_width": 4096,
21
+ "target_height": 4096,
22
+ "text_g": [
23
+ "86",
24
+ 1
25
+ ],
26
+ "text_l": [
27
+ "86",
28
+ 1
29
+ ],
30
+ "clip": [
31
+ "20",
32
+ 1
33
+ ]
34
+ },
35
+ "class_type": "CLIPTextEncodeSDXL"
36
+ },
37
+ "30": {
38
+ "inputs": {
39
+ "width": 4096,
40
+ "height": 4096,
41
+ "crop_w": 0,
42
+ "crop_h": 0,
43
+ "target_width": 4096,
44
+ "target_height": 4096,
45
+ "text_g": [
46
+ "86",
47
+ 2
48
+ ],
49
+ "text_l": [
50
+ "86",
51
+ 2
52
+ ],
53
+ "clip": [
54
+ "20",
55
+ 1
56
+ ]
57
+ },
58
+ "class_type": "CLIPTextEncodeSDXL"
59
+ },
60
+ "67": {
61
+ "inputs": {
62
+ "add_noise": "enable",
63
+ "noise_seed": [
64
+ "86",
65
+ 9
66
+ ],
67
+ "steps": 6,
68
+ "cfg": 1.1,
69
+ "sampler_name": "dpmpp_2m_sde",
70
+ "scheduler": "karras",
71
+ "start_at_step": 0,
72
+ "end_at_step": 12,
73
+ "return_with_leftover_noise": "enable",
74
+ "model": [
75
+ "95",
76
+ 0
77
+ ],
78
+ "positive": [
79
+ "29",
80
+ 0
81
+ ],
82
+ "negative": [
83
+ "30",
84
+ 0
85
+ ],
86
+ "latent_image": [
87
+ "92",
88
+ 0
89
+ ]
90
+ },
91
+ "class_type": "KSamplerAdvanced"
92
+ },
93
+ "68": {
94
+ "inputs": {
95
+ "add_noise": "enable",
96
+ "noise_seed": [
97
+ "86",
98
+ 9
99
+ ],
100
+ "steps": 12,
101
+ "cfg": 1.1,
102
+ "sampler_name": "dpmpp_2m_sde",
103
+ "scheduler": "karras",
104
+ "start_at_step": 5,
105
+ "end_at_step": 10000,
106
+ "return_with_leftover_noise": "disable",
107
+ "model": [
108
+ "95",
109
+ 0
110
+ ],
111
+ "positive": [
112
+ "29",
113
+ 0
114
+ ],
115
+ "negative": [
116
+ "30",
117
+ 0
118
+ ],
119
+ "latent_image": [
120
+ "93",
121
+ 0
122
+ ]
123
+ },
124
+ "class_type": "KSamplerAdvanced"
125
+ },
126
+ "72": {
127
+ "inputs": {
128
+ "filename_prefix": "sdxlTurbo1xUpres",
129
+ "images": [
130
+ "77",
131
+ 0
132
+ ]
133
+ },
134
+ "class_type": "SaveImage"
135
+ },
136
+ "77": {
137
+ "inputs": {
138
+ "samples": [
139
+ "68",
140
+ 0
141
+ ],
142
+ "vae": [
143
+ "20",
144
+ 2
145
+ ]
146
+ },
147
+ "class_type": "VAEDecode"
148
+ },
149
+ "85": {
150
+ "inputs": {
151
+ "user_prompt": "digital drawing of cyberpunk skull with armor, maximalist detailing, colorful, vibrant, --ar 9:16 --chaos 30",
152
+ "mode": "t2i",
153
+ "init_noise_mode": "perlin1",
154
+ "user_neg_prompt": "trees",
155
+ "batch_size": 1,
156
+ "cfg": 7.5,
157
+ "steps": 6,
158
+ "width": 512,
159
+ "height": 512,
160
+ "seed": 1696661138,
161
+ "init_img": "",
162
+ "denoise": 1,
163
+ "image_prompt": "",
164
+ "mask": "",
165
+ "stage1_strength": 0.25,
166
+ "stage2_strength": 1,
167
+ "efficiency_multiplier": 1,
168
+ "face_swap_img": "",
169
+ "image_prompt_weights": "",
170
+ "control_net_input_img": "",
171
+ "vae": [
172
+ "20",
173
+ 2
174
+ ]
175
+ },
176
+ "class_type": "Vyro Pipe Input V2"
177
+ },
178
+ "86": {
179
+ "inputs": {
180
+ "vyro_params": [
181
+ "85",
182
+ 0
183
+ ]
184
+ },
185
+ "class_type": "Vyro Param Extractor"
186
+ },
187
+ "92": {
188
+ "inputs": {
189
+ "width": [
190
+ "86",
191
+ 7
192
+ ],
193
+ "height": [
194
+ "86",
195
+ 8
196
+ ],
197
+ "batch_size": 1
198
+ },
199
+ "class_type": "EmptyLatentImage"
200
+ },
201
+ "93": {
202
+ "inputs": {
203
+ "upscale_method": "nearest-exact",
204
+ "scale_by": 2,
205
+ "samples": [
206
+ "67",
207
+ 0
208
+ ]
209
+ },
210
+ "class_type": "LatentUpscaleBy"
211
+ },
212
+ "95": {
213
+ "inputs": {
214
+ "graph_file_name": "v5-tiny",
215
+ "static_mode": "enable",
216
+ "model": [
217
+ "20",
218
+ 0
219
+ ]
220
+ },
221
+ "class_type": "ModelSpeedup"
222
+ }
223
+ }
224
+ }
imaginev5_workflow.json ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "keys": {
3
+ "input": "16",
4
+ "output": "57"
5
+ },
6
+ "extra_data": {},
7
+ "prompt": {
8
+ "2": {
9
+ "inputs": {
10
+ "vae_name": "sdxl_vae.safetensors"
11
+ },
12
+ "class_type": "VAELoader"
13
+ },
14
+ "16": {
15
+ "inputs": {
16
+ "user_prompt": "professor working in a lab",
17
+ "mode": "t2i",
18
+ "init_noise_mode": "perlin1",
19
+ "user_neg_prompt": "",
20
+ "batch_size": 1,
21
+ "cfg": 8,
22
+ "steps": 10,
23
+ "width": 1024,
24
+ "height": 1024,
25
+ "seed": 3998723850,
26
+ "init_img": [
27
+ "103",
28
+ 0
29
+ ],
30
+ "denoise": 0.8,
31
+ "image_prompt": "",
32
+ "mask": "",
33
+ "stage1_strength": 1,
34
+ "stage2_strength": 1,
35
+ "efficiency_multiplier": 1,
36
+ "image_prompt_weights": "1.0,1.0",
37
+ "vae": [
38
+ "2",
39
+ 0
40
+ ]
41
+ },
42
+ "class_type": "Vyro Pipe Input V2"
43
+ },
44
+ "25": {
45
+ "inputs": {
46
+ "config_path": "v5.json",
47
+ "classifier_path": "spacy_prompt_analyzer"
48
+ },
49
+ "class_type": "Vyro Config Loader"
50
+ },
51
+ "27": {
52
+ "inputs": {
53
+ "debug": "enabled",
54
+ "skip": "disabled",
55
+ "vyro_params": [
56
+ "16",
57
+ 0
58
+ ],
59
+ "styles": [
60
+ "25",
61
+ 0
62
+ ],
63
+ "prompt_tree": [
64
+ "25",
65
+ 1
66
+ ],
67
+ "classifier": [
68
+ "25",
69
+ 3
70
+ ]
71
+ },
72
+ "class_type": "Vyro Prompt Analyzer"
73
+ },
74
+ "28": {
75
+ "inputs": {
76
+ "crop_factor": 0,
77
+ "base_clip": [
78
+ "383",
79
+ 1
80
+ ],
81
+ "refiner_clip": [
82
+ "384",
83
+ 1
84
+ ],
85
+ "params": [
86
+ "27",
87
+ 0
88
+ ]
89
+ },
90
+ "class_type": "Vyro Prompt Encoder"
91
+ },
92
+ "48": {
93
+ "inputs": {
94
+ "latent_src": "xl",
95
+ "latent_dst": "v1",
96
+ "samples": [
97
+ "390",
98
+ 0
99
+ ]
100
+ },
101
+ "class_type": "VyroLatentInterposer"
102
+ },
103
+ "49": {
104
+ "inputs": {
105
+ "upscale_method": "nearest-exact",
106
+ "scale_by": [
107
+ "144",
108
+ 15
109
+ ],
110
+ "samples": [
111
+ "48",
112
+ 0
113
+ ]
114
+ },
115
+ "class_type": "LatentUpscaleBy"
116
+ },
117
+ "57": {
118
+ "inputs": {
119
+ "samples": [
120
+ "151",
121
+ 0
122
+ ],
123
+ "vae": [
124
+ "100",
125
+ 0
126
+ ]
127
+ },
128
+ "class_type": "VAEDecode"
129
+ },
130
+ "63": {
131
+ "inputs": {
132
+ "add_noise": "enable",
133
+ "noise_seed": [
134
+ "144",
135
+ 9
136
+ ],
137
+ "steps": 40,
138
+ "cfg": 7,
139
+ "sampler_name": "dpmpp_3m_sde_gpu",
140
+ "scheduler": "simple",
141
+ "start_at_step": 20,
142
+ "end_at_step": 40,
143
+ "return_with_leftover_noise": "disable",
144
+ "model": [
145
+ "384",
146
+ 0
147
+ ],
148
+ "positive": [
149
+ "28",
150
+ 2
151
+ ],
152
+ "negative": [
153
+ "28",
154
+ 3
155
+ ],
156
+ "latent_image": [
157
+ "49",
158
+ 0
159
+ ]
160
+ },
161
+ "class_type": "KSamplerAdvanced"
162
+ },
163
+ "84": {
164
+ "inputs": {
165
+ "images": [
166
+ "57",
167
+ 0
168
+ ]
169
+ },
170
+ "class_type": "PreviewImage"
171
+ },
172
+ "100": {
173
+ "inputs": {
174
+ "vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"
175
+ },
176
+ "class_type": "VAELoader"
177
+ },
178
+ "102": {
179
+ "inputs": {
180
+ "image": "daniel (1).png",
181
+ "choose file to upload": "image"
182
+ },
183
+ "class_type": "LoadImage"
184
+ },
185
+ "103": {
186
+ "inputs": {
187
+ "image": [
188
+ "102",
189
+ 0
190
+ ]
191
+ },
192
+ "class_type": "Vyro Image to String"
193
+ },
194
+ "135": {
195
+ "inputs": {
196
+ "t2i": "allow",
197
+ "i2i": "allow",
198
+ "variate": "allow",
199
+ "remix": "deny",
200
+ "inpaint": "deny",
201
+ "outpaint": "deny",
202
+ "face_swap": "deny",
203
+ "qr": "deny",
204
+ "headshot": "deny",
205
+ "vyro_params": [
206
+ "27",
207
+ 0
208
+ ]
209
+ },
210
+ "class_type": "Vyro Mode Filter"
211
+ },
212
+ "144": {
213
+ "inputs": {
214
+ "vyro_params": [
215
+ "135",
216
+ 0
217
+ ]
218
+ },
219
+ "class_type": "Vyro Param Extractor"
220
+ },
221
+ "145": {
222
+ "inputs": {
223
+ "add_noise": "enable",
224
+ "noise_seed": [
225
+ "144",
226
+ 9
227
+ ],
228
+ "steps": 30,
229
+ "cfg": 7,
230
+ "sampler_name": "ddim",
231
+ "scheduler": "karras",
232
+ "start_at_step": 25,
233
+ "end_at_step": 27,
234
+ "return_with_leftover_noise": "enable",
235
+ "model": [
236
+ "384",
237
+ 0
238
+ ],
239
+ "positive": [
240
+ "28",
241
+ 2
242
+ ],
243
+ "negative": [
244
+ "28",
245
+ 3
246
+ ],
247
+ "latent_image": [
248
+ "63",
249
+ 0
250
+ ]
251
+ },
252
+ "class_type": "KSamplerAdvanced"
253
+ },
254
+ "151": {
255
+ "inputs": {
256
+ "add_noise": "disable",
257
+ "noise_seed": [
258
+ "144",
259
+ 9
260
+ ],
261
+ "steps": 30,
262
+ "cfg": 7,
263
+ "sampler_name": "ddim",
264
+ "scheduler": "karras",
265
+ "start_at_step": 27,
266
+ "end_at_step": 30,
267
+ "return_with_leftover_noise": "enable",
268
+ "model": [
269
+ "384",
270
+ 0
271
+ ],
272
+ "positive": [
273
+ "28",
274
+ 2
275
+ ],
276
+ "negative": [
277
+ "28",
278
+ 3
279
+ ],
280
+ "latent_image": [
281
+ "145",
282
+ 0
283
+ ]
284
+ },
285
+ "class_type": "KSamplerAdvanced"
286
+ },
287
+ "382": {
288
+ "inputs": {
289
+ "base_model": "sd_xl_base_1.0.safetensors"
290
+ },
291
+ "class_type": "Vyro Oneflow Base Model Loader"
292
+ },
293
+ "383": {
294
+ "inputs": {
295
+ "base_model": [
296
+ "389",
297
+ 0
298
+ ],
299
+ "base_clip": [
300
+ "382",
301
+ 1
302
+ ],
303
+ "style": [
304
+ "27",
305
+ 1
306
+ ],
307
+ "prompt_tree": [
308
+ "25",
309
+ 1
310
+ ],
311
+ "model_config": [
312
+ "25",
313
+ 2
314
+ ]
315
+ },
316
+ "class_type": "Vyro LoRa Loader"
317
+ },
318
+ "384": {
319
+ "inputs": {
320
+ "style": [
321
+ "27",
322
+ 1
323
+ ],
324
+ "prompt_tree": [
325
+ "25",
326
+ 1
327
+ ],
328
+ "model_config": [
329
+ "25",
330
+ 2
331
+ ]
332
+ },
333
+ "class_type": "Vyro Oneflow Refiner Model Loader"
334
+ },
335
+ "389": {
336
+ "inputs": {
337
+ "graph_file_name": "v5_graph",
338
+ "static_mode": "enable",
339
+ "model": [
340
+ "382",
341
+ 0
342
+ ]
343
+ },
344
+ "class_type": "ComfySpeedup"
345
+ },
346
+ "390": {
347
+ "inputs": {
348
+ "add_noise": "enable",
349
+ "noise_seed": [
350
+ "144",
351
+ 9
352
+ ],
353
+ "steps": 20,
354
+ "cfg": 8,
355
+ "sampler_name": "euler",
356
+ "scheduler": "normal",
357
+ "start_at_step": 0,
358
+ "end_at_step": 10000,
359
+ "return_with_leftover_noise": "disable",
360
+ "model": [
361
+ "383",
362
+ 0
363
+ ],
364
+ "positive": [
365
+ "28",
366
+ 0
367
+ ],
368
+ "negative": [
369
+ "28",
370
+ 1
371
+ ],
372
+ "latent_image": [
373
+ "144",
374
+ 0
375
+ ]
376
+ },
377
+ "class_type": "KSamplerAdvanced"
378
+ }
379
+ }
380
+ }