kg-09 commited on
Commit
50ecbef
·
verified ·
1 Parent(s): b6580d1

Update workflow.py

Browse files
Files changed (1) hide show
  1. workflow.py +685 -685
workflow.py CHANGED
@@ -1,686 +1,686 @@
1
- import os
2
- import random
3
- import sys
4
- from typing import Sequence, Mapping, Any, Union
5
- import torch
6
- import time
7
- from PIL import Image
8
- import numpy as np
9
-
10
-
11
- def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
12
- """Returns the value at the given index of a sequence or mapping."""
13
- try:
14
- return obj[index]
15
- except KeyError:
16
- return obj["result"][index]
17
-
18
-
19
- def find_path(name: str, path: str = None) -> str:
20
- """Recursively looks at parent folders to find the given name."""
21
- if path is None:
22
- path = os.getcwd()
23
- if name in os.listdir(path):
24
- path_name = os.path.join(path, name)
25
- print(f"{name} found: {path_name}")
26
- return path_name
27
- parent_directory = os.path.dirname(path)
28
- if parent_directory == path:
29
- return None
30
- return find_path(name, parent_directory)
31
-
32
-
33
- def add_comfyui_directory_to_sys_path() -> None:
34
- """Add 'ComfyUI' to the sys.path"""
35
- comfyui_path = find_path("ComfyUI")
36
- if comfyui_path is not None and os.path.isdir(comfyui_path):
37
- sys.path.append(comfyui_path)
38
- print(f"'{comfyui_path}' added to sys.path")
39
-
40
-
41
- def add_extra_model_paths() -> None:
42
- """Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path."""
43
- try:
44
- from main import load_extra_path_config
45
- except ImportError:
46
- print("Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead.")
47
- from utils.extra_config import load_extra_path_config
48
- extra_model_paths = find_path("extra_model_paths.yaml")
49
- if extra_model_paths is not None:
50
- load_extra_path_config(extra_model_paths)
51
- else:
52
- print("Could not find the extra_model_paths config file.")
53
-
54
-
55
- add_comfyui_directory_to_sys_path()
56
- add_extra_model_paths()
57
-
58
-
59
- def import_custom_nodes() -> None:
60
- """Find all custom nodes and initialize them"""
61
- import asyncio
62
- import execution
63
- from nodes import init_extra_nodes
64
- import server
65
- loop = asyncio.new_event_loop()
66
- asyncio.set_event_loop(loop)
67
- server_instance = server.PromptServer(loop)
68
- execution.PromptQueue(server_instance)
69
- init_extra_nodes()
70
-
71
-
72
- from nodes import NODE_CLASS_MAPPINGS
73
-
74
-
75
- class FitCheckWorkflow:
76
- def __init__(self):
77
- import_custom_nodes()
78
- with torch.inference_mode():
79
- # Initialize all node classes
80
- self.loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
81
- self.comfyuivtonmaskloader = NODE_CLASS_MAPPINGS["ComfyUIVtonMaskLoader"]()
82
- self.emptyimage = NODE_CLASS_MAPPINGS["EmptyImage"]()
83
- self.rmbg = NODE_CLASS_MAPPINGS["RMBG"]()
84
- self.layerutility_imageremovealpha = NODE_CLASS_MAPPINGS["LayerUtility: ImageRemoveAlpha"]()
85
- self.inpaintcropimproved = NODE_CLASS_MAPPINGS["InpaintCropImproved"]()
86
- self.geminiflash = NODE_CLASS_MAPPINGS["GeminiFlash"]()
87
- self.stringfunctionpysssss = NODE_CLASS_MAPPINGS["StringFunction|pysssss"]()
88
- self.cr_text_replace = NODE_CLASS_MAPPINGS["CR Text Replace"]()
89
- self.dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
90
- self.cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
91
- self.vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
92
- self.unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
93
- self.stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
94
- self.clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
95
- self.clipvisionencode = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
96
- self.loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
97
- self.fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
98
- self.stylemodelapply = NODE_CLASS_MAPPINGS["StyleModelApply"]()
99
- self.conditioningzeroout = NODE_CLASS_MAPPINGS["ConditioningZeroOut"]()
100
- self.controlnetloader = NODE_CLASS_MAPPINGS["ControlNetLoader"]()
101
- self.setunioncontrolnettype = NODE_CLASS_MAPPINGS["SetUnionControlNetType"]()
102
- self.upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]()
103
- self.imageupscalewithmodel = NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"]()
104
- self.imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
105
- self.comfyuivtonmaskgenerator = NODE_CLASS_MAPPINGS["ComfyUIVtonMaskGenerator"]()
106
- self.imagetomask = NODE_CLASS_MAPPINGS["ImageToMask"]()
107
- self.layermask_maskgrow = NODE_CLASS_MAPPINGS["LayerMask: MaskGrow"]()
108
- self.loadimagemask = NODE_CLASS_MAPPINGS["LoadImageMask"]()
109
- self.mask_fill_holes = NODE_CLASS_MAPPINGS["Mask Fill Holes"]()
110
- self.resizemask = NODE_CLASS_MAPPINGS["ResizeMask"]()
111
- self.imageconcanate = NODE_CLASS_MAPPINGS["ImageConcanate"]()
112
- self.getimagesize = NODE_CLASS_MAPPINGS["GetImageSize+"]()
113
- self.pixelperfectresolution = NODE_CLASS_MAPPINGS["PixelPerfectResolution"]()
114
- self.aio_preprocessor = NODE_CLASS_MAPPINGS["AIO_Preprocessor"]()
115
- self.layerutility_purgevram_v2 = NODE_CLASS_MAPPINGS["LayerUtility: PurgeVRAM V2"]()
116
- self.controlnetapplyadvanced = NODE_CLASS_MAPPINGS["ControlNetApplyAdvanced"]()
117
- self.getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
118
- self.sammodelloader_segment_anything = NODE_CLASS_MAPPINGS["SAMModelLoader (segment anything)"]()
119
- self.groundingdinomodelloader_segment_anything = NODE_CLASS_MAPPINGS["GroundingDinoModelLoader (segment anything)"]()
120
- self.groundingdinosamsegment_segment_anything = NODE_CLASS_MAPPINGS["GroundingDinoSAMSegment (segment anything)"]()
121
- self.maskcomposite = NODE_CLASS_MAPPINGS["MaskComposite"]()
122
- self.apersonmaskgenerator = NODE_CLASS_MAPPINGS["APersonMaskGenerator"]()
123
- self.masktoimage = NODE_CLASS_MAPPINGS["MaskToImage"]()
124
- self.inpaintmodelconditioning = NODE_CLASS_MAPPINGS["InpaintModelConditioning"]()
125
- self.differentialdiffusion = NODE_CLASS_MAPPINGS["DifferentialDiffusion"]()
126
- self.ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
127
- self.vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
128
- self.imagecrop = NODE_CLASS_MAPPINGS["ImageCrop+"]()
129
- self.inpaintstitchimproved = NODE_CLASS_MAPPINGS["InpaintStitchImproved"]()
130
- self.showtextpysssss = NODE_CLASS_MAPPINGS["ShowText|pysssss"]()
131
-
132
- # Initialize commonly used nodes
133
- self.comfyuivtonmaskloader_983 = self.comfyuivtonmaskloader.load_mask_model(device="cpu")
134
- self.emptyimage_1015 = self.emptyimage.generate(width=768, height=1024, batch_size=1, color=0)
135
- self.dualcliploader_1024 = self.dualcliploader.load_clip(
136
- clip_name1="clip_l.safetensors",
137
- clip_name2="t5xxl_fp8_e4m3fn.safetensors",
138
- type="flux",
139
- device="default",
140
- )
141
- self.vaeloader_1023 = self.vaeloader.load_vae(vae_name="ae.safetensors")
142
- self.unetloader_1025 = self.unetloader.load_unet(
143
- unet_name="flux1-fill-dev.safetensors", weight_dtype="fp8_e4m3fn"
144
- )
145
- self.stylemodelloader_1026 = self.stylemodelloader.load_style_model(
146
- style_model_name="flux1-redux-dev.safetensors"
147
- )
148
- self.clipvisionloader_1151 = self.clipvisionloader.load_clip(
149
- clip_name="sigclip_vision_patch14_384.safetensors"
150
- )
151
- self.controlnetloader_1042 = self.controlnetloader.load_controlnet(
152
- control_net_name="flux-union-pro-v2.safetensors"
153
- )
154
- self.setunioncontrolnettype_1041 = self.setunioncontrolnettype.set_controlnet_type(
155
- type="depth", control_net=get_value_at_index(self.controlnetloader_1042, 0)
156
- )
157
- self.upscalemodelloader_1155 = self.upscalemodelloader.load_model(
158
- model_name="RealESRGAN_x2.pth"
159
- )
160
- # self.upscalemodelloader_1189 = self.upscalemodelloader.load_model(
161
- # model_name="Phips/1xDeNoise_realplksr_otf.safetensors"
162
- # )
163
- self.comfyuivtonmaskloader_1173 = self.comfyuivtonmaskloader.load_mask_model(device="cpu")
164
- self.sammodelloader_segment_anything_1167 = self.sammodelloader_segment_anything.main(
165
- model_name="sam_vit_h (2.56GB)"
166
- )
167
- self.groundingdinomodelloader_segment_anything_1168 = self.groundingdinomodelloader_segment_anything.main(
168
- model_name="GroundingDINO_SwinT_OGC (694MB)"
169
- )
170
-
171
- @torch.inference_mode()
172
- def __call__(self, *args, **kwargs):
173
- start = time.time()
174
-
175
- # Extract parameters from kwargs with defaults
176
- api_key = kwargs.get("api_key", "AIzaSyA2XScgkb65IaskjGK6EkUb7HKGjl9cKNw")
177
- swap_type = kwargs.get("swap_type", "Dresses")
178
- mode = kwargs.get("mode", "balanced")
179
- seed = kwargs.get("seed", random.randint(1, 2**64))
180
-
181
- # Validate parameters
182
- valid_swap_types = ["Upper-body", "Lower-body", "Dresses", "Manual"]
183
- valid_modes = ["speed", "balanced", "quality"]
184
-
185
- if swap_type not in valid_swap_types:
186
- raise ValueError(f"swap_type must be one of {valid_swap_types}")
187
- if mode not in valid_modes:
188
- raise ValueError(f"mode must be one of {valid_modes}")
189
-
190
- print(f"Running FitCheck with swap_type: {swap_type}, mode: {mode}")
191
-
192
- # Load images
193
- loadimage_904 = self.loadimage.load_image(image="model_img.png")
194
- loadimage_909 = self.loadimage.load_image(image="cloth_img.png")
195
-
196
- # RMBG processing
197
- rmbg_1160 = self.rmbg.process_image(
198
- model="RMBG-2.0",
199
- sensitivity=1,
200
- process_res=1024,
201
- mask_blur=0,
202
- mask_offset=0,
203
- invert_output=False,
204
- refine_foreground=True,
205
- background="Alpha",
206
- background_color="#000000",
207
- image=get_value_at_index(loadimage_909, 0),
208
- )
209
-
210
- layerutility_imageremovealpha_1158 = self.layerutility_imageremovealpha.image_remove_alpha(
211
- fill_background=True,
212
- background_color="#000000",
213
- RGBA_image=get_value_at_index(loadimage_909, 0),
214
- mask=get_value_at_index(rmbg_1160, 1),
215
- )
216
-
217
- inpaintcropimproved_1003 = self.inpaintcropimproved.inpaint_crop(
218
- downscale_algorithm="bilinear",
219
- upscale_algorithm="bicubic",
220
- preresize=False,
221
- preresize_mode="ensure minimum resolution",
222
- preresize_min_width=1024,
223
- preresize_min_height=1024,
224
- preresize_max_width=16384,
225
- preresize_max_height=16384,
226
- mask_fill_holes=True,
227
- mask_expand_pixels=0,
228
- mask_invert=False,
229
- mask_blend_pixels=0,
230
- mask_hipass_filter=0.1,
231
- extend_for_outpainting=False,
232
- extend_up_factor=1,
233
- extend_down_factor=1,
234
- extend_left_factor=1,
235
- extend_right_factor=1,
236
- context_from_mask_extend_factor=1.1500000000000001,
237
- output_resize_to_target_size=True,
238
- output_target_width=768,
239
- output_target_height=1024,
240
- output_padding="0",
241
- image=get_value_at_index(layerutility_imageremovealpha_1158, 0),
242
- mask=get_value_at_index(rmbg_1160, 1),
243
- )
244
-
245
- # Gemini processing with configurable API key
246
- geminiflash_1120 = self.geminiflash.generate_content(
247
- prompt="What kind of outfit is this,models size like slim,plus size etc,and describe it clearly in short, return to the point combined prompt in plain text",
248
- input_type="image",
249
- model_version="gemini-2.0-flash",
250
- operation_mode="analysis",
251
- chat_mode=False,
252
- clear_history=True,
253
- Additional_Context="",
254
- api_key=api_key,
255
- max_output_tokens=8192,
256
- temperature=0.4,
257
- structured_output=False,
258
- max_images=6,
259
- batch_count=1,
260
- seed=random.randint(1, 2**64),
261
- images=get_value_at_index(inpaintcropimproved_1003, 1),
262
- )
263
-
264
- stringfunctionpysssss_1110 = self.stringfunctionpysssss.exec(
265
- action="append",
266
- tidy_tags="no",
267
- text_a="The fashion model wearing the [outfit]\n",
268
- text_b="The 2 shirts on both sides are exactly the same, same color, same logo, same text, same features",
269
- text_c="",
270
- )
271
-
272
- cr_text_replace_1119 = self.cr_text_replace.replace_text(
273
- find1="[outfit]",
274
- replace1=get_value_at_index(geminiflash_1120, 0),
275
- find2="",
276
- replace2="",
277
- find3="",
278
- replace3="",
279
- text=get_value_at_index(stringfunctionpysssss_1110, 0),
280
- )
281
-
282
- print("\n=================\n\n\n")
283
- print("Generated prompt:\n", get_value_at_index(cr_text_replace_1119, 0))
284
- print("\n\n\n=================\n")
285
-
286
- cliptextencode_1022 = self.cliptextencode.encode(
287
- text=get_value_at_index(cr_text_replace_1119, 0),
288
- clip=get_value_at_index(self.dualcliploader_1024, 0),
289
- )
290
-
291
- clipvisionencode_1027 = self.clipvisionencode.encode(
292
- crop="none",
293
- clip_vision=get_value_at_index(self.clipvisionloader_1151, 0),
294
- image=get_value_at_index(inpaintcropimproved_1003, 1),
295
- )
296
-
297
- # Always load cat-vton LoRA first
298
- loraloadermodelonly_1032 = self.loraloadermodelonly.load_lora_model_only(
299
- lora_name="cat-vton.safetensors",
300
- strength_model=1,
301
- model=get_value_at_index(self.unetloader_1025, 0),
302
- )
303
-
304
- # Mode-based LoRA loading and configuration
305
- if mode == "speed":
306
- loraloadermodelonly_1031 = self.loraloadermodelonly.load_lora_model_only(
307
- lora_name="turbo.safetensors",
308
- strength_model=1.0,
309
- model=get_value_at_index(loraloadermodelonly_1032, 0),
310
- )
311
- current_model = get_value_at_index(loraloadermodelonly_1031, 0)
312
- steps = 11
313
- elif mode == "balanced":
314
- loraloadermodelonly_1031 = self.loraloadermodelonly.load_lora_model_only(
315
- lora_name="turbo.safetensors",
316
- strength_model=0.5,
317
- model=get_value_at_index(loraloadermodelonly_1032, 0),
318
- )
319
- current_model = get_value_at_index(loraloadermodelonly_1031, 0)
320
- steps = 17
321
- else: # quality
322
- current_model = get_value_at_index(loraloadermodelonly_1032, 0)
323
- steps = 34
324
-
325
- fluxguidance_1020 = self.fluxguidance.append(
326
- guidance=50, conditioning=get_value_at_index(cliptextencode_1022, 0)
327
- )
328
-
329
- stylemodelapply_1019 = self.stylemodelapply.apply_stylemodel(
330
- strength=1,
331
- strength_type="multiply",
332
- conditioning=get_value_at_index(fluxguidance_1020, 0),
333
- style_model=get_value_at_index(self.stylemodelloader_1026, 0),
334
- clip_vision_output=get_value_at_index(clipvisionencode_1027, 0),
335
- )
336
-
337
- conditioningzeroout_1021 = self.conditioningzeroout.zero_out(
338
- conditioning=get_value_at_index(fluxguidance_1020, 0)
339
- )
340
-
341
- imageupscalewithmodel_1156 = self.imageupscalewithmodel.upscale(
342
- upscale_model=get_value_at_index(self.upscalemodelloader_1155, 0),
343
- image=get_value_at_index(loadimage_904, 0),
344
- )
345
-
346
- imageresize_1058 = self.imageresize.execute(
347
- width=1536,
348
- height=1536,
349
- interpolation="nearest",
350
- method="keep proportion",
351
- condition="always",
352
- multiple_of=0,
353
- image=get_value_at_index(imageupscalewithmodel_1156, 0),
354
- )
355
-
356
- # Conditional logic based on swap_type
357
- if swap_type != "Manual":
358
- # Generate masks automatically for Upper-body, Lower-body, Dresses
359
- comfyuivtonmaskgenerator_982 = self.comfyuivtonmaskgenerator.generate_mask(
360
- category=swap_type,
361
- offset_top=0,
362
- offset_bottom=0,
363
- offset_left=0,
364
- offset_right=0,
365
- mask_model=get_value_at_index(self.comfyuivtonmaskloader_983, 0),
366
- vton_image=get_value_at_index(imageresize_1058, 0),
367
- )
368
-
369
- imagetomask_990 = self.imagetomask.image_to_mask(
370
- channel="red", image=get_value_at_index(comfyuivtonmaskgenerator_982, 1)
371
- )
372
-
373
- layermask_maskgrow_891 = self.layermask_maskgrow.mask_grow(
374
- invert_mask=False,
375
- grow=0,
376
- blur=3,
377
- mask=get_value_at_index(imagetomask_990, 0),
378
- )
379
-
380
- # Use automatically generated mask
381
- resize_mask_source = get_value_at_index(layermask_maskgrow_891, 0)
382
- else:
383
- # Manual mode - load user provided mask
384
- loadimage_manual_mask = self.loadimage.load_image(image="mask_img.png")
385
-
386
- # Convert image to mask (same as automatic mode)
387
- imagetomask_manual = self.imagetomask.image_to_mask(
388
- channel="red", image=get_value_at_index(loadimage_manual_mask, 0)
389
- )
390
- mask_fill_holes_1147 = self.mask_fill_holes.fill_region(
391
- masks=get_value_at_index(imagetomask_manual, 0),
392
- )
393
- # Use user provided mask
394
- resize_mask_source = get_value_at_index(mask_fill_holes_1147, 0)
395
-
396
- resizemask_1059 = self.resizemask.resize(
397
- width=get_value_at_index(imageresize_1058, 1),
398
- height=get_value_at_index(imageresize_1058, 2),
399
- keep_proportions=False,
400
- upscale_method="nearest-exact",
401
- crop="disabled",
402
- mask=resize_mask_source,
403
- )
404
-
405
- inpaintcropimproved_999 = self.inpaintcropimproved.inpaint_crop(
406
- downscale_algorithm="nearest",
407
- upscale_algorithm="nearest",
408
- preresize=False,
409
- preresize_mode="ensure minimum resolution",
410
- preresize_min_width=1024,
411
- preresize_min_height=1024,
412
- preresize_max_width=16384,
413
- preresize_max_height=16384,
414
- mask_fill_holes=True,
415
- mask_expand_pixels=8,
416
- mask_invert=False,
417
- mask_blend_pixels=20,
418
- mask_hipass_filter=0.1,
419
- extend_for_outpainting=False,
420
- extend_up_factor=1,
421
- extend_down_factor=1,
422
- extend_left_factor=1,
423
- extend_right_factor=1,
424
- context_from_mask_extend_factor=1.0500000000000003,
425
- output_resize_to_target_size=True,
426
- output_target_width=768,
427
- output_target_height=1024,
428
- output_padding="64",
429
- image=get_value_at_index(imageresize_1058, 0),
430
- mask=get_value_at_index(resizemask_1059, 0),
431
- )
432
-
433
- imageconcanate_1044 = self.imageconcanate.concatenate(
434
- direction="left",
435
- match_image_size=True,
436
- image1=get_value_at_index(inpaintcropimproved_999, 1),
437
- image2=get_value_at_index(self.emptyimage_1015, 0),
438
- )
439
-
440
- getimagesize_1047 = self.getimagesize.execute(
441
- image=get_value_at_index(imageconcanate_1044, 0)
442
- )
443
-
444
- pixelperfectresolution_1049 = self.pixelperfectresolution.execute(
445
- image_gen_width=get_value_at_index(getimagesize_1047, 0),
446
- image_gen_height=get_value_at_index(getimagesize_1047, 1),
447
- resize_mode="Just Resize",
448
- original_image=get_value_at_index(imageconcanate_1044, 0),
449
- )
450
-
451
- aio_preprocessor_1046 = self.aio_preprocessor.execute(
452
- preprocessor="Zoe_DepthAnythingPreprocessor",
453
- resolution=get_value_at_index(pixelperfectresolution_1049, 0),
454
- image=get_value_at_index(imageconcanate_1044, 0),
455
- )
456
-
457
- layerutility_purgevram_v2_1191 = self.layerutility_purgevram_v2.purge_vram_v2(
458
- purge_cache=True,
459
- purge_models=True,
460
- anything=get_value_at_index(aio_preprocessor_1046, 0),
461
- )
462
-
463
- controlnetapplyadvanced_1043 = self.controlnetapplyadvanced.apply_controlnet(
464
- strength=0.7000000000000002,
465
- start_percent=0,
466
- end_percent=0.5000000000000001,
467
- positive=get_value_at_index(stylemodelapply_1019, 0),
468
- negative=get_value_at_index(conditioningzeroout_1021, 0),
469
- control_net=get_value_at_index(self.setunioncontrolnettype_1041, 0),
470
- image=get_value_at_index(layerutility_purgevram_v2_1191, 0),
471
- vae=get_value_at_index(self.vaeloader_1023, 0),
472
- )
473
-
474
- imageconcanate_1013 = self.imageconcanate.concatenate(
475
- direction="left",
476
- match_image_size=True,
477
- image1=get_value_at_index(inpaintcropimproved_999, 1),
478
- image2=get_value_at_index(inpaintcropimproved_1003, 1),
479
- )
480
-
481
- # Second mask generation logic (only if not Manual)
482
- if swap_type != "Manual":
483
- getimagesizeandcount_1165 = self.getimagesizeandcount.getsize(
484
- image=get_value_at_index(inpaintcropimproved_999, 1)
485
- )
486
-
487
- comfyuivtonmaskgenerator_1179 = self.comfyuivtonmaskgenerator.generate_mask(
488
- category=swap_type,
489
- offset_top=0,
490
- offset_bottom=0,
491
- offset_left=0,
492
- offset_right=0,
493
- mask_model=get_value_at_index(self.comfyuivtonmaskloader_1173, 0),
494
- vton_image=get_value_at_index(getimagesizeandcount_1165, 0),
495
- )
496
-
497
- imagetomask_1175 = self.imagetomask.image_to_mask(
498
- channel="red", image=get_value_at_index(comfyuivtonmaskgenerator_1179, 1)
499
- )
500
-
501
- groundingdinosamsegment_segment_anything_1176 = self.groundingdinosamsegment_segment_anything.main(
502
- prompt="hand",
503
- threshold=0.28,
504
- sam_model=get_value_at_index(self.sammodelloader_segment_anything_1167, 0),
505
- grounding_dino_model=get_value_at_index(self.groundingdinomodelloader_segment_anything_1168, 0),
506
- image=get_value_at_index(getimagesizeandcount_1165, 0),
507
- )
508
-
509
- layerutility_purgevram_v2_1192 = self.layerutility_purgevram_v2.purge_vram_v2(
510
- purge_cache=True,
511
- purge_models=True,
512
- anything=get_value_at_index(groundingdinosamsegment_segment_anything_1176, 1),
513
- )
514
-
515
- maskcomposite_1174 = self.maskcomposite.combine(
516
- x=0,
517
- y=0,
518
- operation="subtract",
519
- destination=get_value_at_index(imagetomask_1175, 0),
520
- source=get_value_at_index(layerutility_purgevram_v2_1192, 0),
521
- )
522
-
523
- apersonmaskgenerator_1181 = self.apersonmaskgenerator.generate_mask(
524
- face_mask=True,
525
- background_mask=False,
526
- hair_mask=False,
527
- body_mask=False,
528
- clothes_mask=False,
529
- confidence=0.4,
530
- refine_mask=True,
531
- images=get_value_at_index(getimagesizeandcount_1165, 0),
532
- )
533
-
534
- apersonmaskgenerator_1177 = self.apersonmaskgenerator.generate_mask(
535
- face_mask=False,
536
- background_mask=False,
537
- hair_mask=True,
538
- body_mask=False,
539
- clothes_mask=False,
540
- confidence=0.4,
541
- refine_mask=True,
542
- images=get_value_at_index(getimagesizeandcount_1165, 0),
543
- )
544
-
545
- maskcomposite_1171 = self.maskcomposite.combine(
546
- x=0,
547
- y=0,
548
- operation="add",
549
- destination=get_value_at_index(apersonmaskgenerator_1181, 0),
550
- source=get_value_at_index(apersonmaskgenerator_1177, 0),
551
- )
552
-
553
- maskcomposite_1169 = self.maskcomposite.combine(
554
- x=0,
555
- y=0,
556
- operation="subtract",
557
- destination=get_value_at_index(maskcomposite_1174, 0),
558
- source=get_value_at_index(maskcomposite_1171, 0),
559
- )
560
-
561
- layermask_maskgrow_1178 = self.layermask_maskgrow.mask_grow(
562
- invert_mask=False,
563
- grow=0,
564
- blur=3,
565
- mask=get_value_at_index(maskcomposite_1169, 0),
566
- )
567
-
568
- # Use processed mask for automatic modes
569
- masktoimage_mask_source = get_value_at_index(layermask_maskgrow_1178, 0)
570
- else:
571
- # Use cropped mask for Manual mode
572
- masktoimage_mask_source = get_value_at_index(inpaintcropimproved_999, 2)
573
-
574
- masktoimage_1017 = self.masktoimage.mask_to_image(
575
- mask=masktoimage_mask_source
576
- )
577
-
578
- imageconcanate_1016 = self.imageconcanate.concatenate(
579
- direction="left",
580
- match_image_size=True,
581
- image1=get_value_at_index(masktoimage_1017, 0),
582
- image2=get_value_at_index(self.emptyimage_1015, 0),
583
- )
584
-
585
- imagetomask_1035 = self.imagetomask.image_to_mask(
586
- channel="red", image=get_value_at_index(imageconcanate_1016, 0)
587
- )
588
-
589
- inpaintmodelconditioning_1033 = self.inpaintmodelconditioning.encode(
590
- noise_mask=True,
591
- positive=get_value_at_index(controlnetapplyadvanced_1043, 0),
592
- negative=get_value_at_index(controlnetapplyadvanced_1043, 1),
593
- vae=get_value_at_index(self.vaeloader_1023, 0),
594
- pixels=get_value_at_index(imageconcanate_1013, 0),
595
- mask=get_value_at_index(imagetomask_1035, 0),
596
- )
597
-
598
- differentialdiffusion_1040 = self.differentialdiffusion.apply(
599
- model=current_model
600
- )
601
-
602
- ksampler_1030 = self.ksampler.sample(
603
- seed=seed,
604
- steps=steps,
605
- cfg=1,
606
- sampler_name="euler",
607
- scheduler="simple",
608
- denoise=1,
609
- model=get_value_at_index(differentialdiffusion_1040, 0),
610
- positive=get_value_at_index(inpaintmodelconditioning_1033, 0),
611
- negative=get_value_at_index(inpaintmodelconditioning_1033, 1),
612
- latent_image=get_value_at_index(inpaintmodelconditioning_1033, 2),
613
- )
614
-
615
- vaedecode_1036 = self.vaedecode.decode(
616
- samples=get_value_at_index(ksampler_1030, 0),
617
- vae=get_value_at_index(self.vaeloader_1023, 0),
618
- )
619
-
620
- imagecrop_1055 = self.imagecrop.execute(
621
- width=768,
622
- height=1024,
623
- position="top-right",
624
- x_offset=0,
625
- y_offset=0,
626
- image=get_value_at_index(vaedecode_1036, 0),
627
- )
628
-
629
-
630
- imageupscalewithmodel_1188 = self.imageupscalewithmodel.upscale(
631
- upscale_model=get_value_at_index(self.upscalemodelloader_1155, 0),
632
- image=get_value_at_index(imagecrop_1055, 0),
633
- )
634
- layerutility_purgevram_v2_1187 = self.layerutility_purgevram_v2.purge_vram_v2(
635
- purge_cache=True,
636
- purge_models=True,
637
- anything=get_value_at_index(imageupscalewithmodel_1188, 0),
638
- )
639
-
640
- inpaintstitchimproved_1054 = self.inpaintstitchimproved.inpaint_stitch(
641
- stitcher=get_value_at_index(inpaintcropimproved_999, 0),
642
- inpainted_image=get_value_at_index(layerutility_purgevram_v2_1187, 0),
643
- )
644
-
645
- showtextpysssss_1111 = self.showtextpysssss.notify(
646
- text=get_value_at_index(cr_text_replace_1119, 0),
647
- unique_id=16351491204491641391,
648
- )
649
-
650
- # layerutility_purgevram_v2_1187 = self.layerutility_purgevram_v2.purge_vram_v2(
651
- # purge_cache=True,
652
- # purge_models=True,
653
- # anything=get_value_at_index(inpaintstitchimproved_1054, 0),
654
- # )
655
-
656
- # imageupscalewithmodel_1188 = self.imageupscalewithmodel.upscale(
657
- # upscale_model=get_value_at_index(self.upscalemodelloader_1189, 0),
658
- # image=get_value_at_index(layerutility_purgevram_v2_1187, 0),
659
- # )
660
-
661
- # Convert output to image and save
662
- imgs = []
663
- for res in inpaintstitchimproved_1054[0]:
664
- img = Image.fromarray(np.clip(255. * res.detach().cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
665
- img.save("fitcheck_output.png")
666
- imgs.append(img)
667
-
668
- stop = time.time()
669
- print(f"Total time: {stop - start:.2f} seconds")
670
- return imgs
671
-
672
- def cleanup(self):
673
- """Clean up VRAM and cache after inference"""
674
- try:
675
- import torch
676
- if torch.cuda.is_available():
677
- torch.cuda.empty_cache()
678
- torch.cuda.synchronize()
679
- print("VRAM cleanup completed")
680
- except Exception as e:
681
- print(f"Cleanup warning: {e}")
682
-
683
-
684
- # Example usage:
685
- # generator = FitCheckWorkflow()
686
  # imgs = generator(api_key="your_api_key", swap_type="Dresses", mode="balanced")
 
1
+ import os
2
+ import random
3
+ import sys
4
+ from typing import Sequence, Mapping, Any, Union
5
+ import torch
6
+ import time
7
+ from PIL import Image
8
+ import numpy as np
9
+
10
+
11
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
12
+ """Returns the value at the given index of a sequence or mapping."""
13
+ try:
14
+ return obj[index]
15
+ except KeyError:
16
+ return obj["result"][index]
17
+
18
+
19
+ def find_path(name: str, path: str = None) -> str:
20
+ """Recursively looks at parent folders to find the given name."""
21
+ if path is None:
22
+ path = os.getcwd()
23
+ if name in os.listdir(path):
24
+ path_name = os.path.join(path, name)
25
+ print(f"{name} found: {path_name}")
26
+ return path_name
27
+ parent_directory = os.path.dirname(path)
28
+ if parent_directory == path:
29
+ return None
30
+ return find_path(name, parent_directory)
31
+
32
+
33
+ def add_comfyui_directory_to_sys_path() -> None:
34
+ """Add 'ComfyUI' to the sys.path"""
35
+ comfyui_path = find_path("ComfyUI")
36
+ if comfyui_path is not None and os.path.isdir(comfyui_path):
37
+ sys.path.append(comfyui_path)
38
+ print(f"'{comfyui_path}' added to sys.path")
39
+
40
+
41
+ def add_extra_model_paths() -> None:
42
+ """Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path."""
43
+ try:
44
+ from main import load_extra_path_config
45
+ except ImportError:
46
+ print("Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead.")
47
+ from utils.extra_config import load_extra_path_config
48
+ extra_model_paths = find_path("extra_model_paths.yaml")
49
+ if extra_model_paths is not None:
50
+ load_extra_path_config(extra_model_paths)
51
+ else:
52
+ print("Could not find the extra_model_paths config file.")
53
+
54
+
55
+ add_comfyui_directory_to_sys_path()
56
+ add_extra_model_paths()
57
+
58
+
59
+ def import_custom_nodes() -> None:
60
+ """Find all custom nodes and initialize them"""
61
+ import asyncio
62
+ import execution
63
+ from nodes import init_extra_nodes
64
+ import server
65
+ loop = asyncio.new_event_loop()
66
+ asyncio.set_event_loop(loop)
67
+ server_instance = server.PromptServer(loop)
68
+ execution.PromptQueue(server_instance)
69
+ init_extra_nodes()
70
+
71
+
72
+ from nodes import NODE_CLASS_MAPPINGS
73
+
74
+
75
+ class FitCheckWorkflow:
76
+ def __init__(self):
77
+ import_custom_nodes()
78
+ with torch.inference_mode():
79
+ # Initialize all node classes
80
+ self.loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
81
+ self.comfyuivtonmaskloader = NODE_CLASS_MAPPINGS["ComfyUIVtonMaskLoader"]()
82
+ self.emptyimage = NODE_CLASS_MAPPINGS["EmptyImage"]()
83
+ self.rmbg = NODE_CLASS_MAPPINGS["RMBG"]()
84
+ self.layerutility_imageremovealpha = NODE_CLASS_MAPPINGS["LayerUtility: ImageRemoveAlpha"]()
85
+ self.inpaintcropimproved = NODE_CLASS_MAPPINGS["InpaintCropImproved"]()
86
+ self.geminiflash = NODE_CLASS_MAPPINGS["GeminiFlash"]()
87
+ self.stringfunctionpysssss = NODE_CLASS_MAPPINGS["StringFunction|pysssss"]()
88
+ self.cr_text_replace = NODE_CLASS_MAPPINGS["CR Text Replace"]()
89
+ self.dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
90
+ self.cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
91
+ self.vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
92
+ self.unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
93
+ self.stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
94
+ self.clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
95
+ self.clipvisionencode = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
96
+ self.loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
97
+ self.fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
98
+ self.stylemodelapply = NODE_CLASS_MAPPINGS["StyleModelApply"]()
99
+ self.conditioningzeroout = NODE_CLASS_MAPPINGS["ConditioningZeroOut"]()
100
+ self.controlnetloader = NODE_CLASS_MAPPINGS["ControlNetLoader"]()
101
+ self.setunioncontrolnettype = NODE_CLASS_MAPPINGS["SetUnionControlNetType"]()
102
+ self.upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]()
103
+ self.imageupscalewithmodel = NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"]()
104
+ self.imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
105
+ self.comfyuivtonmaskgenerator = NODE_CLASS_MAPPINGS["ComfyUIVtonMaskGenerator"]()
106
+ self.imagetomask = NODE_CLASS_MAPPINGS["ImageToMask"]()
107
+ self.layermask_maskgrow = NODE_CLASS_MAPPINGS["LayerMask: MaskGrow"]()
108
+ self.loadimagemask = NODE_CLASS_MAPPINGS["LoadImageMask"]()
109
+ self.mask_fill_holes = NODE_CLASS_MAPPINGS["Mask Fill Holes"]()
110
+ self.resizemask = NODE_CLASS_MAPPINGS["ResizeMask"]()
111
+ self.imageconcanate = NODE_CLASS_MAPPINGS["ImageConcanate"]()
112
+ self.getimagesize = NODE_CLASS_MAPPINGS["GetImageSize+"]()
113
+ self.pixelperfectresolution = NODE_CLASS_MAPPINGS["PixelPerfectResolution"]()
114
+ self.aio_preprocessor = NODE_CLASS_MAPPINGS["AIO_Preprocessor"]()
115
+ self.layerutility_purgevram_v2 = NODE_CLASS_MAPPINGS["LayerUtility: PurgeVRAM V2"]()
116
+ self.controlnetapplyadvanced = NODE_CLASS_MAPPINGS["ControlNetApplyAdvanced"]()
117
+ self.getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
118
+ self.sammodelloader_segment_anything = NODE_CLASS_MAPPINGS["SAMModelLoader (segment anything)"]()
119
+ self.groundingdinomodelloader_segment_anything = NODE_CLASS_MAPPINGS["GroundingDinoModelLoader (segment anything)"]()
120
+ self.groundingdinosamsegment_segment_anything = NODE_CLASS_MAPPINGS["GroundingDinoSAMSegment (segment anything)"]()
121
+ self.maskcomposite = NODE_CLASS_MAPPINGS["MaskComposite"]()
122
+ self.apersonmaskgenerator = NODE_CLASS_MAPPINGS["APersonMaskGenerator"]()
123
+ self.masktoimage = NODE_CLASS_MAPPINGS["MaskToImage"]()
124
+ self.inpaintmodelconditioning = NODE_CLASS_MAPPINGS["InpaintModelConditioning"]()
125
+ self.differentialdiffusion = NODE_CLASS_MAPPINGS["DifferentialDiffusion"]()
126
+ self.ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
127
+ self.vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
128
+ self.imagecrop = NODE_CLASS_MAPPINGS["ImageCrop+"]()
129
+ self.inpaintstitchimproved = NODE_CLASS_MAPPINGS["InpaintStitchImproved"]()
130
+ self.showtextpysssss = NODE_CLASS_MAPPINGS["ShowText|pysssss"]()
131
+
132
+ # Initialize commonly used nodes
133
+ self.comfyuivtonmaskloader_983 = self.comfyuivtonmaskloader.load_mask_model(device="cpu")
134
+ self.emptyimage_1015 = self.emptyimage.generate(width=768, height=1024, batch_size=1, color=0)
135
+ self.dualcliploader_1024 = self.dualcliploader.load_clip(
136
+ clip_name1="clip_l.safetensors",
137
+ clip_name2="t5xxl_fp8_e4m3fn.safetensors",
138
+ type="flux",
139
+ device="default",
140
+ )
141
+ self.vaeloader_1023 = self.vaeloader.load_vae(vae_name="ae.safetensors")
142
+ self.unetloader_1025 = self.unetloader.load_unet(
143
+ unet_name="flux1-fill-dev.safetensors", weight_dtype="fp8_e4m3fn"
144
+ )
145
+ self.stylemodelloader_1026 = self.stylemodelloader.load_style_model(
146
+ style_model_name="flux1-redux-dev.safetensors"
147
+ )
148
+ self.clipvisionloader_1151 = self.clipvisionloader.load_clip(
149
+ clip_name="sigclip_vision_patch14_384.safetensors"
150
+ )
151
+ self.controlnetloader_1042 = self.controlnetloader.load_controlnet(
152
+ control_net_name="flux-union-pro-v2.safetensors"
153
+ )
154
+ self.setunioncontrolnettype_1041 = self.setunioncontrolnettype.set_controlnet_type(
155
+ type="depth", control_net=get_value_at_index(self.controlnetloader_1042, 0)
156
+ )
157
+ self.upscalemodelloader_1155 = self.upscalemodelloader.load_model(
158
+ model_name="RealESRGAN_x2.pth"
159
+ )
160
+ # self.upscalemodelloader_1189 = self.upscalemodelloader.load_model(
161
+ # model_name="Phips/1xDeNoise_realplksr_otf.safetensors"
162
+ # )
163
+ self.comfyuivtonmaskloader_1173 = self.comfyuivtonmaskloader.load_mask_model(device="cpu")
164
+ self.sammodelloader_segment_anything_1167 = self.sammodelloader_segment_anything.main(
165
+ model_name="sam_vit_h (2.56GB)"
166
+ )
167
+ self.groundingdinomodelloader_segment_anything_1168 = self.groundingdinomodelloader_segment_anything.main(
168
+ model_name="GroundingDINO_SwinT_OGC (694MB)"
169
+ )
170
+
171
+ @torch.inference_mode()
172
+ def __call__(self, *args, **kwargs):
173
+ start = time.time()
174
+
175
+ # Extract parameters from kwargs with defaults
176
+ api_key = kwargs.get("api_key", "AIzaSyA2XScgkb65IaskjGK6EkUb7HKGjl9cKNw")
177
+ swap_type = kwargs.get("swap_type", "Dresses")
178
+ mode = kwargs.get("mode", "balanced")
179
+ seed = kwargs.get("seed", random.randint(1, 2**64))
180
+
181
+ # Validate parameters
182
+ valid_swap_types = ["Upper-body", "Lower-body", "Dresses", "Manual"]
183
+ valid_modes = ["speed", "balanced", "quality"]
184
+
185
+ if swap_type not in valid_swap_types:
186
+ raise ValueError(f"swap_type must be one of {valid_swap_types}")
187
+ if mode not in valid_modes:
188
+ raise ValueError(f"mode must be one of {valid_modes}")
189
+
190
+ print(f"Running FitCheck with swap_type: {swap_type}, mode: {mode}")
191
+
192
+ # Load images
193
+ loadimage_904 = self.loadimage.load_image(image="model_img.png")
194
+ loadimage_909 = self.loadimage.load_image(image="cloth_img.png")
195
+
196
+ # RMBG processing
197
+ rmbg_1160 = self.rmbg.process_image(
198
+ model="RMBG-2.0",
199
+ sensitivity=1,
200
+ process_res=1024,
201
+ mask_blur=0,
202
+ mask_offset=0,
203
+ invert_output=False,
204
+ refine_foreground=True,
205
+ background="Alpha",
206
+ background_color="#000000",
207
+ image=get_value_at_index(loadimage_909, 0),
208
+ )
209
+
210
+ layerutility_imageremovealpha_1158 = self.layerutility_imageremovealpha.image_remove_alpha(
211
+ fill_background=True,
212
+ background_color="#000000",
213
+ RGBA_image=get_value_at_index(loadimage_909, 0),
214
+ mask=get_value_at_index(rmbg_1160, 1),
215
+ )
216
+
217
+ inpaintcropimproved_1003 = self.inpaintcropimproved.inpaint_crop(
218
+ downscale_algorithm="bilinear",
219
+ upscale_algorithm="bicubic",
220
+ preresize=False,
221
+ preresize_mode="ensure minimum resolution",
222
+ preresize_min_width=1024,
223
+ preresize_min_height=1024,
224
+ preresize_max_width=16384,
225
+ preresize_max_height=16384,
226
+ mask_fill_holes=True,
227
+ mask_expand_pixels=0,
228
+ mask_invert=False,
229
+ mask_blend_pixels=0,
230
+ mask_hipass_filter=0.1,
231
+ extend_for_outpainting=False,
232
+ extend_up_factor=1,
233
+ extend_down_factor=1,
234
+ extend_left_factor=1,
235
+ extend_right_factor=1,
236
+ context_from_mask_extend_factor=1.1500000000000001,
237
+ output_resize_to_target_size=True,
238
+ output_target_width=768,
239
+ output_target_height=1024,
240
+ output_padding="0",
241
+ image=get_value_at_index(layerutility_imageremovealpha_1158, 0),
242
+ mask=get_value_at_index(rmbg_1160, 1),
243
+ )
244
+
245
+ # Gemini processing with configurable API key
246
+ geminiflash_1120 = self.geminiflash.generate_content(
247
+ prompt="What kind of outfit is this,models size like slim,plus size etc,and describe it clearly in short, return to the point combined prompt in plain text",
248
+ input_type="image",
249
+ model_version="gemini-2.0-flash",
250
+ operation_mode="analysis",
251
+ chat_mode=False,
252
+ clear_history=True,
253
+ Additional_Context="",
254
+ api_key=api_key,
255
+ max_output_tokens=8192,
256
+ temperature=0.4,
257
+ structured_output=False,
258
+ max_images=6,
259
+ batch_count=1,
260
+ seed=random.randint(1, 2**64),
261
+ images=get_value_at_index(inpaintcropimproved_1003, 1),
262
+ )
263
+
264
+ stringfunctionpysssss_1110 = self.stringfunctionpysssss.exec(
265
+ action="append",
266
+ tidy_tags="no",
267
+ text_a="The fashion model wearing the [outfit]\n",
268
+ text_b="The 2 shirts on both sides are exactly the same, same color, same logo, same text, same features",
269
+ text_c="",
270
+ )
271
+
272
+ cr_text_replace_1119 = self.cr_text_replace.replace_text(
273
+ find1="[outfit]",
274
+ replace1=get_value_at_index(geminiflash_1120, 0),
275
+ find2="",
276
+ replace2="",
277
+ find3="",
278
+ replace3="",
279
+ text=get_value_at_index(stringfunctionpysssss_1110, 0),
280
+ )
281
+
282
+ print("\n=================\n\n\n")
283
+ print("Generated prompt:\n", get_value_at_index(cr_text_replace_1119, 0))
284
+ print("\n\n\n=================\n")
285
+
286
+ cliptextencode_1022 = self.cliptextencode.encode(
287
+ text=get_value_at_index(cr_text_replace_1119, 0),
288
+ clip=get_value_at_index(self.dualcliploader_1024, 0),
289
+ )
290
+
291
+ clipvisionencode_1027 = self.clipvisionencode.encode(
292
+ crop="none",
293
+ clip_vision=get_value_at_index(self.clipvisionloader_1151, 0),
294
+ image=get_value_at_index(inpaintcropimproved_1003, 1),
295
+ )
296
+
297
+ # Always load cat-vton LoRA first
298
+ loraloadermodelonly_1032 = self.loraloadermodelonly.load_lora_model_only(
299
+ lora_name="cat-vton.safetensors",
300
+ strength_model=1,
301
+ model=get_value_at_index(self.unetloader_1025, 0),
302
+ )
303
+
304
+ # Mode-based LoRA loading and configuration
305
+ if mode == "speed":
306
+ loraloadermodelonly_1031 = self.loraloadermodelonly.load_lora_model_only(
307
+ lora_name="turbo.safetensors",
308
+ strength_model=1.0,
309
+ model=get_value_at_index(loraloadermodelonly_1032, 0),
310
+ )
311
+ current_model = get_value_at_index(loraloadermodelonly_1031, 0)
312
+ steps = 11
313
+ elif mode == "balanced":
314
+ loraloadermodelonly_1031 = self.loraloadermodelonly.load_lora_model_only(
315
+ lora_name="turbo.safetensors",
316
+ strength_model=0.5,
317
+ model=get_value_at_index(loraloadermodelonly_1032, 0),
318
+ )
319
+ current_model = get_value_at_index(loraloadermodelonly_1031, 0)
320
+ steps = 17
321
+ else: # quality
322
+ current_model = get_value_at_index(loraloadermodelonly_1032, 0)
323
+ steps = 34
324
+
325
+ fluxguidance_1020 = self.fluxguidance.append(
326
+ guidance=50, conditioning=get_value_at_index(cliptextencode_1022, 0)
327
+ )
328
+
329
+ stylemodelapply_1019 = self.stylemodelapply.apply_stylemodel(
330
+ strength=1,
331
+ strength_type="multiply",
332
+ conditioning=get_value_at_index(fluxguidance_1020, 0),
333
+ style_model=get_value_at_index(self.stylemodelloader_1026, 0),
334
+ clip_vision_output=get_value_at_index(clipvisionencode_1027, 0),
335
+ )
336
+
337
+ conditioningzeroout_1021 = self.conditioningzeroout.zero_out(
338
+ conditioning=get_value_at_index(fluxguidance_1020, 0)
339
+ )
340
+
341
+ imageupscalewithmodel_1156 = self.imageupscalewithmodel.upscale(
342
+ upscale_model=get_value_at_index(self.upscalemodelloader_1155, 0),
343
+ image=get_value_at_index(loadimage_904, 0),
344
+ )
345
+
346
+ imageresize_1058 = self.imageresize.execute(
347
+ width=1536,
348
+ height=1536,
349
+ interpolation="nearest",
350
+ method="keep proportion",
351
+ condition="always",
352
+ multiple_of=0,
353
+ image=get_value_at_index(imageupscalewithmodel_1156, 0),
354
+ )
355
+
356
+ # Conditional logic based on swap_type
357
+ if swap_type != "Manual":
358
+ # Generate masks automatically for Upper-body, Lower-body, Dresses
359
+ comfyuivtonmaskgenerator_982 = self.comfyuivtonmaskgenerator.generate_mask(
360
+ category=swap_type,
361
+ offset_top=0,
362
+ offset_bottom=0,
363
+ offset_left=0,
364
+ offset_right=0,
365
+ mask_model=get_value_at_index(self.comfyuivtonmaskloader_983, 0),
366
+ vton_image=get_value_at_index(imageresize_1058, 0),
367
+ )
368
+
369
+ imagetomask_990 = self.imagetomask.image_to_mask(
370
+ channel="red", image=get_value_at_index(comfyuivtonmaskgenerator_982, 1)
371
+ )
372
+
373
+ layermask_maskgrow_891 = self.layermask_maskgrow.mask_grow(
374
+ invert_mask=False,
375
+ grow=0,
376
+ blur=3,
377
+ mask=get_value_at_index(imagetomask_990, 0),
378
+ )
379
+
380
+ # Use automatically generated mask
381
+ resize_mask_source = get_value_at_index(layermask_maskgrow_891, 0)
382
+ else:
383
+ # Manual mode - load user provided mask
384
+ loadimage_manual_mask = self.loadimage.load_image(image="mask_img.png")
385
+
386
+ # Convert image to mask (same as automatic mode)
387
+ imagetomask_manual = self.imagetomask.image_to_mask(
388
+ channel="red", image=get_value_at_index(loadimage_manual_mask, 0)
389
+ )
390
+ # mask_fill_holes_1147 = self.mask_fill_holes.fill_region(
391
+ # masks=get_value_at_index(imagetomask_manual, 0),
392
+ # )
393
+ # Use user provided mask
394
+ resize_mask_source = get_value_at_index(imagetomask_manual, 0)
395
+
396
+ resizemask_1059 = self.resizemask.resize(
397
+ width=get_value_at_index(imageresize_1058, 1),
398
+ height=get_value_at_index(imageresize_1058, 2),
399
+ keep_proportions=False,
400
+ upscale_method="nearest-exact",
401
+ crop="disabled",
402
+ mask=resize_mask_source,
403
+ )
404
+
405
+ inpaintcropimproved_999 = self.inpaintcropimproved.inpaint_crop(
406
+ downscale_algorithm="nearest",
407
+ upscale_algorithm="nearest",
408
+ preresize=False,
409
+ preresize_mode="ensure minimum resolution",
410
+ preresize_min_width=1024,
411
+ preresize_min_height=1024,
412
+ preresize_max_width=16384,
413
+ preresize_max_height=16384,
414
+ mask_fill_holes=True,
415
+ mask_expand_pixels=8,
416
+ mask_invert=False,
417
+ mask_blend_pixels=20,
418
+ mask_hipass_filter=0.1,
419
+ extend_for_outpainting=False,
420
+ extend_up_factor=1,
421
+ extend_down_factor=1,
422
+ extend_left_factor=1,
423
+ extend_right_factor=1,
424
+ context_from_mask_extend_factor=1.0500000000000003,
425
+ output_resize_to_target_size=True,
426
+ output_target_width=768,
427
+ output_target_height=1024,
428
+ output_padding="64",
429
+ image=get_value_at_index(imageresize_1058, 0),
430
+ mask=get_value_at_index(resizemask_1059, 0),
431
+ )
432
+
433
+ imageconcanate_1044 = self.imageconcanate.concatenate(
434
+ direction="left",
435
+ match_image_size=True,
436
+ image1=get_value_at_index(inpaintcropimproved_999, 1),
437
+ image2=get_value_at_index(self.emptyimage_1015, 0),
438
+ )
439
+
440
+ getimagesize_1047 = self.getimagesize.execute(
441
+ image=get_value_at_index(imageconcanate_1044, 0)
442
+ )
443
+
444
+ pixelperfectresolution_1049 = self.pixelperfectresolution.execute(
445
+ image_gen_width=get_value_at_index(getimagesize_1047, 0),
446
+ image_gen_height=get_value_at_index(getimagesize_1047, 1),
447
+ resize_mode="Just Resize",
448
+ original_image=get_value_at_index(imageconcanate_1044, 0),
449
+ )
450
+
451
+ aio_preprocessor_1046 = self.aio_preprocessor.execute(
452
+ preprocessor="Zoe_DepthAnythingPreprocessor",
453
+ resolution=get_value_at_index(pixelperfectresolution_1049, 0),
454
+ image=get_value_at_index(imageconcanate_1044, 0),
455
+ )
456
+
457
+ layerutility_purgevram_v2_1191 = self.layerutility_purgevram_v2.purge_vram_v2(
458
+ purge_cache=True,
459
+ purge_models=True,
460
+ anything=get_value_at_index(aio_preprocessor_1046, 0),
461
+ )
462
+
463
+ controlnetapplyadvanced_1043 = self.controlnetapplyadvanced.apply_controlnet(
464
+ strength=0.7000000000000002,
465
+ start_percent=0,
466
+ end_percent=0.5000000000000001,
467
+ positive=get_value_at_index(stylemodelapply_1019, 0),
468
+ negative=get_value_at_index(conditioningzeroout_1021, 0),
469
+ control_net=get_value_at_index(self.setunioncontrolnettype_1041, 0),
470
+ image=get_value_at_index(layerutility_purgevram_v2_1191, 0),
471
+ vae=get_value_at_index(self.vaeloader_1023, 0),
472
+ )
473
+
474
+ imageconcanate_1013 = self.imageconcanate.concatenate(
475
+ direction="left",
476
+ match_image_size=True,
477
+ image1=get_value_at_index(inpaintcropimproved_999, 1),
478
+ image2=get_value_at_index(inpaintcropimproved_1003, 1),
479
+ )
480
+
481
+ # Second mask generation logic (only if not Manual)
482
+ if swap_type != "Manual":
483
+ getimagesizeandcount_1165 = self.getimagesizeandcount.getsize(
484
+ image=get_value_at_index(inpaintcropimproved_999, 1)
485
+ )
486
+
487
+ comfyuivtonmaskgenerator_1179 = self.comfyuivtonmaskgenerator.generate_mask(
488
+ category=swap_type,
489
+ offset_top=0,
490
+ offset_bottom=0,
491
+ offset_left=0,
492
+ offset_right=0,
493
+ mask_model=get_value_at_index(self.comfyuivtonmaskloader_1173, 0),
494
+ vton_image=get_value_at_index(getimagesizeandcount_1165, 0),
495
+ )
496
+
497
+ imagetomask_1175 = self.imagetomask.image_to_mask(
498
+ channel="red", image=get_value_at_index(comfyuivtonmaskgenerator_1179, 1)
499
+ )
500
+
501
+ groundingdinosamsegment_segment_anything_1176 = self.groundingdinosamsegment_segment_anything.main(
502
+ prompt="hand",
503
+ threshold=0.28,
504
+ sam_model=get_value_at_index(self.sammodelloader_segment_anything_1167, 0),
505
+ grounding_dino_model=get_value_at_index(self.groundingdinomodelloader_segment_anything_1168, 0),
506
+ image=get_value_at_index(getimagesizeandcount_1165, 0),
507
+ )
508
+
509
+ layerutility_purgevram_v2_1192 = self.layerutility_purgevram_v2.purge_vram_v2(
510
+ purge_cache=True,
511
+ purge_models=True,
512
+ anything=get_value_at_index(groundingdinosamsegment_segment_anything_1176, 1),
513
+ )
514
+
515
+ maskcomposite_1174 = self.maskcomposite.combine(
516
+ x=0,
517
+ y=0,
518
+ operation="subtract",
519
+ destination=get_value_at_index(imagetomask_1175, 0),
520
+ source=get_value_at_index(layerutility_purgevram_v2_1192, 0),
521
+ )
522
+
523
+ apersonmaskgenerator_1181 = self.apersonmaskgenerator.generate_mask(
524
+ face_mask=True,
525
+ background_mask=False,
526
+ hair_mask=False,
527
+ body_mask=False,
528
+ clothes_mask=False,
529
+ confidence=0.4,
530
+ refine_mask=True,
531
+ images=get_value_at_index(getimagesizeandcount_1165, 0),
532
+ )
533
+
534
+ apersonmaskgenerator_1177 = self.apersonmaskgenerator.generate_mask(
535
+ face_mask=False,
536
+ background_mask=False,
537
+ hair_mask=True,
538
+ body_mask=False,
539
+ clothes_mask=False,
540
+ confidence=0.4,
541
+ refine_mask=True,
542
+ images=get_value_at_index(getimagesizeandcount_1165, 0),
543
+ )
544
+
545
+ maskcomposite_1171 = self.maskcomposite.combine(
546
+ x=0,
547
+ y=0,
548
+ operation="add",
549
+ destination=get_value_at_index(apersonmaskgenerator_1181, 0),
550
+ source=get_value_at_index(apersonmaskgenerator_1177, 0),
551
+ )
552
+
553
+ maskcomposite_1169 = self.maskcomposite.combine(
554
+ x=0,
555
+ y=0,
556
+ operation="subtract",
557
+ destination=get_value_at_index(maskcomposite_1174, 0),
558
+ source=get_value_at_index(maskcomposite_1171, 0),
559
+ )
560
+
561
+ layermask_maskgrow_1178 = self.layermask_maskgrow.mask_grow(
562
+ invert_mask=False,
563
+ grow=0,
564
+ blur=3,
565
+ mask=get_value_at_index(maskcomposite_1169, 0),
566
+ )
567
+
568
+ # Use processed mask for automatic modes
569
+ masktoimage_mask_source = get_value_at_index(layermask_maskgrow_1178, 0)
570
+ else:
571
+ # Use cropped mask for Manual mode
572
+ masktoimage_mask_source = get_value_at_index(inpaintcropimproved_999, 2)
573
+
574
+ masktoimage_1017 = self.masktoimage.mask_to_image(
575
+ mask=masktoimage_mask_source
576
+ )
577
+
578
+ imageconcanate_1016 = self.imageconcanate.concatenate(
579
+ direction="left",
580
+ match_image_size=True,
581
+ image1=get_value_at_index(masktoimage_1017, 0),
582
+ image2=get_value_at_index(self.emptyimage_1015, 0),
583
+ )
584
+
585
+ imagetomask_1035 = self.imagetomask.image_to_mask(
586
+ channel="red", image=get_value_at_index(imageconcanate_1016, 0)
587
+ )
588
+
589
+ inpaintmodelconditioning_1033 = self.inpaintmodelconditioning.encode(
590
+ noise_mask=True,
591
+ positive=get_value_at_index(controlnetapplyadvanced_1043, 0),
592
+ negative=get_value_at_index(controlnetapplyadvanced_1043, 1),
593
+ vae=get_value_at_index(self.vaeloader_1023, 0),
594
+ pixels=get_value_at_index(imageconcanate_1013, 0),
595
+ mask=get_value_at_index(imagetomask_1035, 0),
596
+ )
597
+
598
+ differentialdiffusion_1040 = self.differentialdiffusion.apply(
599
+ model=current_model
600
+ )
601
+
602
+ ksampler_1030 = self.ksampler.sample(
603
+ seed=seed,
604
+ steps=steps,
605
+ cfg=1,
606
+ sampler_name="euler",
607
+ scheduler="simple",
608
+ denoise=1,
609
+ model=get_value_at_index(differentialdiffusion_1040, 0),
610
+ positive=get_value_at_index(inpaintmodelconditioning_1033, 0),
611
+ negative=get_value_at_index(inpaintmodelconditioning_1033, 1),
612
+ latent_image=get_value_at_index(inpaintmodelconditioning_1033, 2),
613
+ )
614
+
615
+ vaedecode_1036 = self.vaedecode.decode(
616
+ samples=get_value_at_index(ksampler_1030, 0),
617
+ vae=get_value_at_index(self.vaeloader_1023, 0),
618
+ )
619
+
620
+ imagecrop_1055 = self.imagecrop.execute(
621
+ width=768,
622
+ height=1024,
623
+ position="top-right",
624
+ x_offset=0,
625
+ y_offset=0,
626
+ image=get_value_at_index(vaedecode_1036, 0),
627
+ )
628
+
629
+
630
+ imageupscalewithmodel_1188 = self.imageupscalewithmodel.upscale(
631
+ upscale_model=get_value_at_index(self.upscalemodelloader_1155, 0),
632
+ image=get_value_at_index(imagecrop_1055, 0),
633
+ )
634
+ layerutility_purgevram_v2_1187 = self.layerutility_purgevram_v2.purge_vram_v2(
635
+ purge_cache=True,
636
+ purge_models=True,
637
+ anything=get_value_at_index(imageupscalewithmodel_1188, 0),
638
+ )
639
+
640
+ inpaintstitchimproved_1054 = self.inpaintstitchimproved.inpaint_stitch(
641
+ stitcher=get_value_at_index(inpaintcropimproved_999, 0),
642
+ inpainted_image=get_value_at_index(layerutility_purgevram_v2_1187, 0),
643
+ )
644
+
645
+ showtextpysssss_1111 = self.showtextpysssss.notify(
646
+ text=get_value_at_index(cr_text_replace_1119, 0),
647
+ unique_id=16351491204491641391,
648
+ )
649
+
650
+ # layerutility_purgevram_v2_1187 = self.layerutility_purgevram_v2.purge_vram_v2(
651
+ # purge_cache=True,
652
+ # purge_models=True,
653
+ # anything=get_value_at_index(inpaintstitchimproved_1054, 0),
654
+ # )
655
+
656
+ # imageupscalewithmodel_1188 = self.imageupscalewithmodel.upscale(
657
+ # upscale_model=get_value_at_index(self.upscalemodelloader_1189, 0),
658
+ # image=get_value_at_index(layerutility_purgevram_v2_1187, 0),
659
+ # )
660
+
661
+ # Convert output to image and save
662
+ imgs = []
663
+ for res in inpaintstitchimproved_1054[0]:
664
+ img = Image.fromarray(np.clip(255. * res.detach().cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
665
+ img.save("fitcheck_output.png")
666
+ imgs.append(img)
667
+
668
+ stop = time.time()
669
+ print(f"Total time: {stop - start:.2f} seconds")
670
+ return imgs
671
+
672
+ def cleanup(self):
673
+ """Clean up VRAM and cache after inference"""
674
+ try:
675
+ import torch
676
+ if torch.cuda.is_available():
677
+ torch.cuda.empty_cache()
678
+ torch.cuda.synchronize()
679
+ print("VRAM cleanup completed")
680
+ except Exception as e:
681
+ print(f"Cleanup warning: {e}")
682
+
683
+
684
+ # Example usage:
685
+ # generator = FitCheckWorkflow()
686
  # imgs = generator(api_key="your_api_key", swap_type="Dresses", mode="balanced")