Coercer commited on
Commit
2a90493
·
verified ·
1 Parent(s): 5ec0b54

Upload FINAL_Workflow.py

Browse files
Files changed (1) hide show
  1. FINAL_Workflow.py +646 -0
FINAL_Workflow.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import sys
4
+ import json
5
+ import argparse
6
+ import contextlib
7
+ from typing import Sequence, Mapping, Any, Union
8
+ import torch
9
+
10
+
11
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
12
+ """Returns the value at the given index of a sequence or mapping.
13
+
14
+ If the object is a sequence (like list or string), returns the value at the given index.
15
+ If the object is a mapping (like a dictionary), returns the value at the index-th key.
16
+
17
+ Some return a dictionary, in these cases, we look for the "results" key
18
+
19
+ Args:
20
+ obj (Union[Sequence, Mapping]): The object to retrieve the value from.
21
+ index (int): The index of the value to retrieve.
22
+
23
+ Returns:
24
+ Any: The value at the given index.
25
+
26
+ Raises:
27
+ IndexError: If the index is out of bounds for the object and the object is not a mapping.
28
+ """
29
+ try:
30
+ return obj[index]
31
+ except KeyError:
32
+ return obj["result"][index]
33
+
34
+
35
+ def find_path(name: str, path: str = None) -> str:
36
+ """
37
+ Recursively looks at parent folders starting from the given path until it finds the given name.
38
+ Returns the path as a Path object if found, or None otherwise.
39
+ """
40
+ # If no path is given, use the current working directory
41
+ if path is None:
42
+ if args is None or args.comfyui_directory is None:
43
+ path = os.getcwd()
44
+ else:
45
+ path = args.comfyui_directory
46
+
47
+ # Check if the current directory contains the name
48
+ if name in os.listdir(path):
49
+ path_name = os.path.join(path, name)
50
+ print(f"{name} found: {path_name}")
51
+ return path_name
52
+
53
+ # Get the parent directory
54
+ parent_directory = os.path.dirname(path)
55
+
56
+ # If the parent directory is the same as the current directory, we've reached the root and stop the search
57
+ if parent_directory == path:
58
+ return None
59
+
60
+ # Recursively call the function with the parent directory
61
+ return find_path(name, parent_directory)
62
+
63
+
64
+ def add_comfyui_directory_to_sys_path() -> None:
65
+ """
66
+ Add 'ComfyUI' to the sys.path
67
+ """
68
+ comfyui_path = find_path("ComfyUI")
69
+ if comfyui_path is not None and os.path.isdir(comfyui_path):
70
+ sys.path.append(comfyui_path)
71
+
72
+ manager_path = os.path.join(
73
+ comfyui_path, "custom_nodes", "ComfyUI-Manager", "glob"
74
+ )
75
+
76
+ if os.path.isdir(manager_path) and os.listdir(manager_path):
77
+ sys.path.append(manager_path)
78
+ global has_manager
79
+ has_manager = True
80
+
81
+ import __main__
82
+
83
+ if getattr(__main__, "__file__", None) is None:
84
+ __main__.__file__ = os.path.join(comfyui_path, "main.py")
85
+
86
+ print(f"'{comfyui_path}' added to sys.path")
87
+
88
+
89
+ def add_extra_model_paths() -> None:
90
+ """
91
+ Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
92
+ """
93
+ from comfy.options import enable_args_parsing
94
+
95
+ enable_args_parsing()
96
+ from utils.extra_config import load_extra_path_config
97
+
98
+ extra_model_paths = find_path("extra_model_paths.yaml")
99
+
100
+ if extra_model_paths is not None:
101
+ load_extra_path_config(extra_model_paths)
102
+ else:
103
+ print("Could not find the extra_model_paths config file.")
104
+
105
+
106
+ def import_custom_nodes() -> None:
107
+ """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
108
+
109
+ This function sets up a new asyncio event loop, initializes the PromptServer,
110
+ creates a PromptQueue, and initializes the custom nodes.
111
+ """
112
+ if has_manager:
113
+ try:
114
+ import manager_core as manager
115
+ except ImportError:
116
+ print("Could not import manager_core, proceeding without it.")
117
+ return
118
+ else:
119
+ if hasattr(manager, "get_config"):
120
+ print("Patching manager_core.get_config to enforce offline mode.")
121
+ try:
122
+ get_config = manager.get_config
123
+
124
+ def _get_config(*args, **kwargs):
125
+ config = get_config(*args, **kwargs)
126
+ config["network_mode"] = "offline"
127
+ return config
128
+
129
+ manager.get_config = _get_config
130
+ except Exception as e:
131
+ print("Failed to patch manager_core.get_config:", e)
132
+
133
+ import asyncio
134
+ import execution
135
+ from nodes import init_extra_nodes
136
+ import server
137
+
138
+ # Creating a new event loop and setting it as the default loop
139
+ loop = asyncio.new_event_loop()
140
+ asyncio.set_event_loop(loop)
141
+
142
+ async def inner():
143
+ # Creating an instance of PromptServer with the loop
144
+ server_instance = server.PromptServer(loop)
145
+ execution.PromptQueue(server_instance)
146
+
147
+ # Initializing custom nodes
148
+ await init_extra_nodes(init_custom_nodes=True)
149
+
150
+ loop.run_until_complete(inner())
151
+
152
+
153
+ def save_image_wrapper(context, cls):
154
+ if args.output is None:
155
+ return cls
156
+
157
+ from PIL import Image, ImageOps, ImageSequence
158
+ from PIL.PngImagePlugin import PngInfo
159
+
160
+ import numpy as np
161
+
162
+ class WrappedSaveImage(cls):
163
+ counter = 0
164
+
165
+ def save_images(
166
+ self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None
167
+ ):
168
+ if args.output is None:
169
+ return super().save_images(
170
+ images, filename_prefix, prompt, extra_pnginfo
171
+ )
172
+ else:
173
+ if len(images) > 1 and args.output == "-":
174
+ raise ValueError("Cannot save multiple images to stdout")
175
+ filename_prefix += self.prefix_append
176
+
177
+ results = list()
178
+ for batch_number, image in enumerate(images):
179
+ i = 255.0 * image.cpu().numpy()
180
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
181
+ metadata = None
182
+ if not args.disable_metadata:
183
+ metadata = PngInfo()
184
+ if prompt is not None:
185
+ metadata.add_text("prompt", json.dumps(prompt))
186
+ if extra_pnginfo is not None:
187
+ for x in extra_pnginfo:
188
+ metadata.add_text(x, json.dumps(extra_pnginfo[x]))
189
+
190
+ if args.output == "-":
191
+ # Hack to briefly restore stdout
192
+ if context is not None:
193
+ context.__exit__(None, None, None)
194
+ try:
195
+ img.save(
196
+ sys.stdout.buffer,
197
+ format="png",
198
+ pnginfo=metadata,
199
+ compress_level=self.compress_level,
200
+ )
201
+ finally:
202
+ if context is not None:
203
+ context.__enter__()
204
+ else:
205
+ subfolder = ""
206
+ if len(images) == 1:
207
+ if os.path.isdir(args.output):
208
+ subfolder = args.output
209
+ file = "output.png"
210
+ else:
211
+ subfolder, file = os.path.split(args.output)
212
+ if subfolder == "":
213
+ subfolder = os.getcwd()
214
+ else:
215
+ if os.path.isdir(args.output):
216
+ subfolder = args.output
217
+ file = filename_prefix
218
+ else:
219
+ subfolder, file = os.path.split(args.output)
220
+
221
+ if subfolder == "":
222
+ subfolder = os.getcwd()
223
+
224
+ files = os.listdir(subfolder)
225
+ file_pattern = file
226
+ while True:
227
+ filename_with_batch_num = file_pattern.replace(
228
+ "%batch_num%", str(batch_number)
229
+ )
230
+ file = (
231
+ f"{filename_with_batch_num}_{self.counter:05}.png"
232
+ )
233
+ self.counter += 1
234
+
235
+ if file not in files:
236
+ break
237
+
238
+ img.save(
239
+ os.path.join(subfolder, file),
240
+ pnginfo=metadata,
241
+ compress_level=self.compress_level,
242
+ )
243
+ print("Saved image to", os.path.join(subfolder, file))
244
+ results.append(
245
+ {
246
+ "filename": file,
247
+ "subfolder": subfolder,
248
+ "type": self.type,
249
+ }
250
+ )
251
+
252
+ return {"ui": {"images": results}}
253
+
254
+ return WrappedSaveImage
255
+
256
+
257
+ def parse_arg(s: Any, default: Any = None) -> Any:
258
+ """Parses a JSON string, returning it unchanged if the parsing fails."""
259
+ if __name__ == "__main__" or not isinstance(s, str):
260
+ return s
261
+
262
+ try:
263
+ return json.loads(s)
264
+ except json.JSONDecodeError:
265
+ return s
266
+
267
+
268
+ parser = argparse.ArgumentParser(
269
+ description="A converted ComfyUI workflow. Node inputs listed below. Values passed should be valid JSON (assumes string if not valid JSON)."
270
+ )
271
+ parser.add_argument(
272
+ "--width1",
273
+ default=5176,
274
+ help='Argument 0, input `width` for node "Empty Latent Image" id 5 (autogenerated)',
275
+ )
276
+
277
+ parser.add_argument(
278
+ "--height2",
279
+ default=3784,
280
+ help='Argument 1, input `height` for node "Empty Latent Image" id 5 (autogenerated)',
281
+ )
282
+
283
+ parser.add_argument(
284
+ "--batch_size3",
285
+ default=1,
286
+ help='Argument 2, input `batch_size` for node "Empty Latent Image" id 5 (autogenerated)',
287
+ )
288
+
289
+ parser.add_argument(
290
+ "--ckpt_name4",
291
+ default="SDXLCheckpoint.safetensors",
292
+ help='Argument 0, input `ckpt_name` for node "Load Checkpoint" id 14 (autogenerated)',
293
+ )
294
+
295
+ parser.add_argument(
296
+ "--lora_name5",
297
+ default="dmd2_sdxl_4step_lora_fp16.safetensors",
298
+ help='Argument 2, input `lora_name` for node "Load LoRA" id 17 (autogenerated)',
299
+ )
300
+
301
+ parser.add_argument(
302
+ "--strength_model6",
303
+ default=1,
304
+ help='Argument 3, input `strength_model` for node "Load LoRA" id 17 (autogenerated)',
305
+ )
306
+
307
+ parser.add_argument(
308
+ "--strength_clip7",
309
+ default=1,
310
+ help='Argument 4, input `strength_clip` for node "Load LoRA" id 17 (autogenerated)',
311
+ )
312
+
313
+ parser.add_argument(
314
+ "--text8",
315
+ default="Xx_negative_xX",
316
+ help='Argument 0, input `text` for node "CLIP Text Encode (Prompt)" id 7 (autogenerated)',
317
+ )
318
+
319
+ parser.add_argument(
320
+ "--text9",
321
+ default="Xx_positive_xX",
322
+ help='Argument 0, input `text` for node "CLIP Text Encode (Prompt)" id 18 (autogenerated)',
323
+ )
324
+
325
+ parser.add_argument(
326
+ "--block_number10",
327
+ default=3,
328
+ help='Argument 1, input `block_number` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
329
+ )
330
+
331
+ parser.add_argument(
332
+ "--downscale_factor11",
333
+ default=2,
334
+ help='Argument 2, input `downscale_factor` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
335
+ )
336
+
337
+ parser.add_argument(
338
+ "--start_percent12",
339
+ default=0,
340
+ help='Argument 3, input `start_percent` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
341
+ )
342
+
343
+ parser.add_argument(
344
+ "--end_percent13",
345
+ default=0.5000000000000001,
346
+ help='Argument 4, input `end_percent` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
347
+ )
348
+
349
+ parser.add_argument(
350
+ "--downscale_after_skip14",
351
+ default=True,
352
+ help='Argument 5, input `downscale_after_skip` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
353
+ )
354
+
355
+ parser.add_argument(
356
+ "--downscale_method15",
357
+ default="bicubic",
358
+ help='Argument 6, input `downscale_method` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
359
+ )
360
+
361
+ parser.add_argument(
362
+ "--upscale_method16",
363
+ default="bicubic",
364
+ help='Argument 7, input `upscale_method` for node "PatchModelAddDownscale (Kohya Deep Shrink)" id 16 (autogenerated)',
365
+ )
366
+
367
+ parser.add_argument(
368
+ "--seed17",
369
+ default=64836095259134,
370
+ help='Argument 1, input `seed` for node "KSampler" id 3 (autogenerated)',
371
+ )
372
+
373
+ parser.add_argument(
374
+ "--steps18",
375
+ default=8,
376
+ help='Argument 2, input `steps` for node "KSampler" id 3 (autogenerated)',
377
+ )
378
+
379
+ parser.add_argument(
380
+ "--cfg19",
381
+ default=1,
382
+ help='Argument 3, input `cfg` for node "KSampler" id 3 (autogenerated)',
383
+ )
384
+
385
+ parser.add_argument(
386
+ "--sampler_name20",
387
+ default="lcm",
388
+ help='Argument 4, input `sampler_name` for node "KSampler" id 3 (autogenerated)',
389
+ )
390
+
391
+ parser.add_argument(
392
+ "--scheduler21",
393
+ default="beta",
394
+ help='Argument 5, input `scheduler` for node "KSampler" id 3 (autogenerated)',
395
+ )
396
+
397
+ parser.add_argument(
398
+ "--denoise22",
399
+ default=1,
400
+ help='Argument 9, input `denoise` for node "KSampler" id 3 (autogenerated)',
401
+ )
402
+
403
+ parser.add_argument(
404
+ "--filename_prefix23",
405
+ default="Fast",
406
+ help='Argument 1, input `filename_prefix` for node "Save Image" id 9 (autogenerated)',
407
+ )
408
+
409
+ parser.add_argument(
410
+ "--queue-size",
411
+ "-q",
412
+ type=int,
413
+ default=1,
414
+ help="How many times the workflow will be executed (default: 1)",
415
+ )
416
+
417
+ parser.add_argument(
418
+ "--comfyui-directory",
419
+ "-c",
420
+ default=None,
421
+ help="Where to look for ComfyUI (default: current directory)",
422
+ )
423
+
424
+ parser.add_argument(
425
+ "--output",
426
+ "-o",
427
+ default=None,
428
+ help="The location to save the output image. Either a file path, a directory, or - for stdout (default: the ComfyUI output directory)",
429
+ )
430
+
431
+ parser.add_argument(
432
+ "--disable-metadata",
433
+ action="store_true",
434
+ help="Disables writing workflow metadata to the outputs",
435
+ )
436
+
437
+
438
+ comfy_args = [sys.argv[0]]
439
+ if __name__ == "__main__" and "--" in sys.argv:
440
+ idx = sys.argv.index("--")
441
+ comfy_args += sys.argv[idx + 1 :]
442
+ sys.argv = sys.argv[:idx]
443
+
444
+ args = None
445
+ if __name__ == "__main__":
446
+ args = parser.parse_args()
447
+ sys.argv = comfy_args
448
+ if args is not None and args.output is not None and args.output == "-":
449
+ ctx = contextlib.redirect_stdout(sys.stderr)
450
+ else:
451
+ ctx = contextlib.nullcontext()
452
+
453
+ PROMPT_DATA = json.loads(
454
+ '{"3": {"inputs": {"seed": 64836095259134, "steps": 8, "cfg": 1, "sampler_name": "lcm", "scheduler": "beta", "denoise": 1, "model": ["16", 0], "positive": ["18", 0], "negative": ["7", 0], "latent_image": ["5", 0]}, "class_type": "KSampler", "_meta": {"title": "KSampler"}}, "5": {"inputs": {"width": 5176, "height": 3784, "batch_size": 1}, "class_type": "EmptyLatentImage", "_meta": {"title": "Empty Latent Image"}}, "7": {"inputs": {"text": "Xx_negative_xX", "clip": ["17", 1]}, "class_type": "CLIPTextEncode", "_meta": {"title": "CLIP Text Encode (Prompt)"}}, "8": {"inputs": {"samples": ["3", 0], "vae": ["14", 2]}, "class_type": "VAEDecode", "_meta": {"title": "VAE Decode"}}, "9": {"inputs": {"filename_prefix": "Fast", "images": ["8", 0]}, "class_type": "SaveImage", "_meta": {"title": "Save Image"}}, "14": {"inputs": {"ckpt_name": "SDXLCheckpoint.safetensors"}, "class_type": "CheckpointLoaderSimple", "_meta": {"title": "Load Checkpoint"}}, "16": {"inputs": {"block_number": 3, "downscale_factor": 2, "start_percent": 0, "end_percent": 0.5000000000000001, "downscale_after_skip": true, "downscale_method": "bicubic", "upscale_method": "bicubic", "model": ["17", 0]}, "class_type": "PatchModelAddDownscale", "_meta": {"title": "PatchModelAddDownscale (Kohya Deep Shrink)"}}, "17": {"inputs": {"lora_name": "dmd2_sdxl_4step_lora_fp16.safetensors", "strength_model": 1, "strength_clip": 1, "model": ["14", 0], "clip": ["14", 1]}, "class_type": "LoraLoader", "_meta": {"title": "Load LoRA"}}, "18": {"inputs": {"text": "Xx_positive_xX", "clip": ["17", 1]}, "class_type": "CLIPTextEncode", "_meta": {"title": "CLIP Text Encode (Prompt)"}}}'
455
+ )
456
+
457
+
458
+ def import_custom_nodes() -> None:
459
+ """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
460
+
461
+ This function sets up a new asyncio event loop, initializes the PromptServer,
462
+ creates a PromptQueue, and initializes the custom nodes.
463
+ """
464
+ if has_manager:
465
+ try:
466
+ import manager_core as manager
467
+ except ImportError:
468
+ print("Could not import manager_core, proceeding without it.")
469
+ return
470
+ else:
471
+ if hasattr(manager, "get_config"):
472
+ print("Patching manager_core.get_config to enforce offline mode.")
473
+ try:
474
+ get_config = manager.get_config
475
+
476
+ def _get_config(*args, **kwargs):
477
+ config = get_config(*args, **kwargs)
478
+ config["network_mode"] = "offline"
479
+ return config
480
+
481
+ manager.get_config = _get_config
482
+ except Exception as e:
483
+ print("Failed to patch manager_core.get_config:", e)
484
+
485
+ import asyncio
486
+ import execution
487
+ from nodes import init_extra_nodes
488
+ import server
489
+
490
+ # Creating a new event loop and setting it as the default loop
491
+ loop = asyncio.new_event_loop()
492
+ asyncio.set_event_loop(loop)
493
+
494
+ async def inner():
495
+ # Creating an instance of PromptServer with the loop
496
+ server_instance = server.PromptServer(loop)
497
+ execution.PromptQueue(server_instance)
498
+
499
+ # Initializing custom nodes
500
+ await init_extra_nodes(init_custom_nodes=True)
501
+
502
+ loop.run_until_complete(inner())
503
+
504
+
505
+ _custom_nodes_imported = False
506
+ _custom_path_added = False
507
+
508
+
509
+ def main(*func_args, **func_kwargs):
510
+ global args, _custom_nodes_imported, _custom_path_added
511
+ if __name__ == "__main__":
512
+ if args is None:
513
+ args = parser.parse_args()
514
+ else:
515
+ defaults = dict(
516
+ (arg, parser.get_default(arg))
517
+ for arg in ["queue_size", "comfyui_directory", "output", "disable_metadata"]
518
+ + [
519
+ "width1",
520
+ "height2",
521
+ "batch_size3",
522
+ "ckpt_name4",
523
+ "lora_name5",
524
+ "strength_model6",
525
+ "strength_clip7",
526
+ "text8",
527
+ "text9",
528
+ "block_number10",
529
+ "downscale_factor11",
530
+ "start_percent12",
531
+ "end_percent13",
532
+ "downscale_after_skip14",
533
+ "downscale_method15",
534
+ "upscale_method16",
535
+ "seed17",
536
+ "steps18",
537
+ "cfg19",
538
+ "sampler_name20",
539
+ "scheduler21",
540
+ "denoise22",
541
+ "filename_prefix23",
542
+ ]
543
+ )
544
+
545
+ all_args = dict()
546
+ all_args.update(defaults)
547
+ all_args.update(func_kwargs)
548
+
549
+ args = argparse.Namespace(**all_args)
550
+
551
+ with ctx:
552
+ if not _custom_path_added:
553
+ add_comfyui_directory_to_sys_path()
554
+ add_extra_model_paths()
555
+
556
+ _custom_path_added = True
557
+
558
+ if not _custom_nodes_imported:
559
+ import_custom_nodes()
560
+
561
+ _custom_nodes_imported = True
562
+
563
+ from nodes import NODE_CLASS_MAPPINGS
564
+
565
+ with torch.inference_mode(), ctx:
566
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
567
+ emptylatentimage_5 = emptylatentimage.generate(
568
+ width=parse_arg(args.width1),
569
+ height=parse_arg(args.height2),
570
+ batch_size=parse_arg(args.batch_size3),
571
+ )
572
+
573
+ checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
574
+ checkpointloadersimple_14 = checkpointloadersimple.load_checkpoint(
575
+ ckpt_name=parse_arg(args.ckpt_name4)
576
+ )
577
+
578
+ loraloader = NODE_CLASS_MAPPINGS["LoraLoader"]()
579
+ loraloader_17 = loraloader.load_lora(
580
+ lora_name=parse_arg(args.lora_name5),
581
+ strength_model=parse_arg(args.strength_model6),
582
+ strength_clip=parse_arg(args.strength_clip7),
583
+ model=get_value_at_index(checkpointloadersimple_14, 0),
584
+ clip=get_value_at_index(checkpointloadersimple_14, 1),
585
+ )
586
+
587
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
588
+ cliptextencode_7 = cliptextencode.encode(
589
+ text=parse_arg(args.text8), clip=get_value_at_index(loraloader_17, 1)
590
+ )
591
+
592
+ cliptextencode_18 = cliptextencode.encode(
593
+ text=parse_arg(args.text9), clip=get_value_at_index(loraloader_17, 1)
594
+ )
595
+
596
+ patchmodeladddownscale = NODE_CLASS_MAPPINGS["PatchModelAddDownscale"]()
597
+ ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
598
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
599
+ saveimage = save_image_wrapper(ctx, NODE_CLASS_MAPPINGS["SaveImage"])()
600
+ for q in range(args.queue_size):
601
+ patchmodeladddownscale_16 = patchmodeladddownscale.patch(
602
+ block_number=parse_arg(args.block_number10),
603
+ downscale_factor=parse_arg(args.downscale_factor11),
604
+ start_percent=parse_arg(args.start_percent12),
605
+ end_percent=parse_arg(args.end_percent13),
606
+ downscale_after_skip=parse_arg(args.downscale_after_skip14),
607
+ downscale_method=parse_arg(args.downscale_method15),
608
+ upscale_method=parse_arg(args.upscale_method16),
609
+ model=get_value_at_index(loraloader_17, 0),
610
+ )
611
+
612
+ ksampler_3 = ksampler.sample(
613
+ seed=parse_arg(args.seed17),
614
+ steps=parse_arg(args.steps18),
615
+ cfg=parse_arg(args.cfg19),
616
+ sampler_name=parse_arg(args.sampler_name20),
617
+ scheduler=parse_arg(args.scheduler21),
618
+ denoise=parse_arg(args.denoise22),
619
+ model=get_value_at_index(patchmodeladddownscale_16, 0),
620
+ positive=get_value_at_index(cliptextencode_18, 0),
621
+ negative=get_value_at_index(cliptextencode_7, 0),
622
+ latent_image=get_value_at_index(emptylatentimage_5, 0),
623
+ )
624
+
625
+ vaedecode_8 = vaedecode.decode(
626
+ samples=get_value_at_index(ksampler_3, 0),
627
+ vae=get_value_at_index(checkpointloadersimple_14, 2),
628
+ )
629
+
630
+ if __name__ != "__main__":
631
+ return dict(
632
+ filename_prefix=parse_arg(args.filename_prefix23),
633
+ images=get_value_at_index(vaedecode_8, 0),
634
+ prompt=PROMPT_DATA,
635
+ )
636
+ else:
637
+ saveimage_9 = saveimage.save_images(
638
+ filename_prefix=parse_arg(args.filename_prefix23),
639
+ images=get_value_at_index(vaedecode_8, 0),
640
+ prompt=PROMPT_DATA,
641
+ )
642
+
643
+
644
+ if __name__ == "__main__":
645
+ for i in range(repeats):
646
+ main()