RioShiina commited on
Commit
9f5732f
·
verified ·
1 Parent(s): a04c0a4

Upload folder using huggingface_hub

Browse files
app.py CHANGED
@@ -106,6 +106,13 @@ def main():
106
 
107
  check_all_model_urls_on_startup()
108
 
 
 
 
 
 
 
 
109
  print("--- Environment configured. Proceeding with module imports. ---")
110
  from ui.layout import build_ui
111
  from ui.events import attach_event_handlers
 
106
 
107
  check_all_model_urls_on_startup()
108
 
109
+ print("--- Building ControlNet preprocessor maps ---")
110
+ from core.generation_logic import build_reverse_map
111
+ build_reverse_map()
112
+ build_preprocessor_model_map()
113
+ build_preprocessor_parameter_map()
114
+ print("--- ✅ ControlNet preprocessor setup complete. ---")
115
+
116
  print("--- Environment configured. Proceeding with module imports. ---")
117
  from ui.layout import build_ui
118
  from ui.events import attach_event_handlers
chain_injectors/diffsynth_controlnet_injector.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ model_sampler_name = chain_definition.get('model_sampler_node')
6
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
7
+
8
+ target_node_id = None
9
+ target_input_name = 'model'
10
+
11
+ if model_sampler_name and model_sampler_name in assembler.node_map:
12
+ model_sampler_id = assembler.node_map[model_sampler_name]
13
+ if target_input_name in assembler.workflow[model_sampler_id]['inputs']:
14
+ target_node_id = model_sampler_id
15
+ print(f"ControlNet Model Patch injector targeting ModelSamplingAuraFlow node '{model_sampler_name}'.")
16
+
17
+ if not target_node_id:
18
+ if ksampler_name in assembler.node_map:
19
+ ksampler_id = assembler.node_map[ksampler_name]
20
+ if target_input_name in assembler.workflow[ksampler_id]['inputs']:
21
+ target_node_id = ksampler_id
22
+ print(f"ControlNet Model Patch injector targeting KSampler node '{ksampler_name}'.")
23
+ else:
24
+ print(f"Warning: Neither ModelSamplingAuraFlow node '{model_sampler_name}' nor KSampler node '{ksampler_name}' found for ControlNet patch chain. Skipping.")
25
+ return
26
+
27
+ if not target_node_id:
28
+ print(f"Warning: Could not find a valid 'model' input on target nodes. Skipping ControlNet patch chain.")
29
+ return
30
+
31
+ current_model_connection = assembler.workflow[target_node_id]['inputs'][target_input_name]
32
+
33
+ vae_source_str = chain_definition.get('vae_source')
34
+ vae_connection = None
35
+ if vae_source_str:
36
+ try:
37
+ vae_node_name, vae_idx_str = vae_source_str.split(':')
38
+ if vae_node_name in assembler.node_map:
39
+ vae_connection = [assembler.node_map[vae_node_name], int(vae_idx_str)]
40
+ else:
41
+ print(f"Warning: VAE source node '{vae_node_name}' not found for ControlNet patch chain. VAE will not be connected.")
42
+ except ValueError:
43
+ print(f"Warning: Invalid 'vae_source' format '{vae_source_str}' for ControlNet patch chain. Expected 'node_name:index'. VAE will not be connected.")
44
+ else:
45
+ print(f"Warning: 'vae_source' not defined for ControlNet patch chain definition. VAE may not be connected.")
46
+
47
+ for item_data in chain_items:
48
+ patch_loader_id = assembler._get_unique_id()
49
+ patch_loader_node = assembler._get_node_template("ModelPatchLoader")
50
+ patch_loader_node['inputs']['name'] = item_data['control_net_name']
51
+ assembler.workflow[patch_loader_id] = patch_loader_node
52
+
53
+ image_loader_id = assembler._get_unique_id()
54
+ image_loader_node = assembler._get_node_template("LoadImage")
55
+ image_loader_node['inputs']['image'] = item_data['image']
56
+ assembler.workflow[image_loader_id] = image_loader_node
57
+
58
+ apply_cn_id = assembler._get_unique_id()
59
+ apply_cn_node = assembler._get_node_template(chain_definition['template'])
60
+
61
+ apply_cn_node['inputs']['strength'] = item_data.get('strength', 1.0)
62
+ apply_cn_node['inputs']['model'] = current_model_connection
63
+ apply_cn_node['inputs']['model_patch'] = [patch_loader_id, 0]
64
+ apply_cn_node['inputs']['image'] = [image_loader_id, 0]
65
+
66
+ if 'vae' in apply_cn_node['inputs'] and vae_connection:
67
+ apply_cn_node['inputs']['vae'] = vae_connection
68
+
69
+ assembler.workflow[apply_cn_id] = apply_cn_node
70
+
71
+ current_model_connection = [apply_cn_id, 0]
72
+
73
+ assembler.workflow[target_node_id]['inputs'][target_input_name] = current_model_connection
74
+
75
+ print(f"ControlNet Model Patch injector applied. Target 'model' input re-routed through {len(chain_items)} patch(es).")
comfy_integration/setup.py CHANGED
@@ -39,6 +39,14 @@ def initialize_comfyui():
39
  except OSError as e:
40
  print(f"⚠️ Could not remove temporary directory '{COMFYUI_TEMP_DIR}': {e}")
41
 
 
 
 
 
 
 
 
 
42
 
43
  print(f"✅ Current working directory is: {os.getcwd()}")
44
 
@@ -51,6 +59,7 @@ def initialize_comfyui():
51
  os.makedirs(os.path.join(APP_DIR, LORA_DIR), exist_ok=True)
52
  os.makedirs(os.path.join(APP_DIR, EMBEDDING_DIR), exist_ok=True)
53
  os.makedirs(os.path.join(APP_DIR, CONTROLNET_DIR), exist_ok=True)
 
54
  os.makedirs(os.path.join(APP_DIR, DIFFUSION_MODELS_DIR), exist_ok=True)
55
  os.makedirs(os.path.join(APP_DIR, VAE_DIR), exist_ok=True)
56
  os.makedirs(os.path.join(APP_DIR, TEXT_ENCODERS_DIR), exist_ok=True)
 
39
  except OSError as e:
40
  print(f"⚠️ Could not remove temporary directory '{COMFYUI_TEMP_DIR}': {e}")
41
 
42
+ print("--- Cloning third-party extensions for ComfyUI ---")
43
+ controlnet_aux_path = os.path.join(APP_DIR, "custom_nodes", "comfyui_controlnet_aux")
44
+ if not os.path.exists(controlnet_aux_path):
45
+ os.system(f"git clone https://github.com/Fannovel16/comfyui_controlnet_aux.git {controlnet_aux_path}")
46
+ print("✅ comfyui_controlnet_aux extension cloned.")
47
+ else:
48
+ print("✅ comfyui_controlnet_aux extension already exists.")
49
+
50
 
51
  print(f"✅ Current working directory is: {os.getcwd()}")
52
 
 
59
  os.makedirs(os.path.join(APP_DIR, LORA_DIR), exist_ok=True)
60
  os.makedirs(os.path.join(APP_DIR, EMBEDDING_DIR), exist_ok=True)
61
  os.makedirs(os.path.join(APP_DIR, CONTROLNET_DIR), exist_ok=True)
62
+ os.makedirs(os.path.join(APP_DIR, MODEL_PATCHES_DIR), exist_ok=True)
63
  os.makedirs(os.path.join(APP_DIR, DIFFUSION_MODELS_DIR), exist_ok=True)
64
  os.makedirs(os.path.join(APP_DIR, VAE_DIR), exist_ok=True)
65
  os.makedirs(os.path.join(APP_DIR, TEXT_ENCODERS_DIR), exist_ok=True)
core/generation_logic.py CHANGED
@@ -1,8 +1,10 @@
1
  from typing import Any, Dict
2
  import gradio as gr
3
 
 
4
  from core.pipelines.sd_image_pipeline import SdImagePipeline
5
 
 
6
  sd_image_pipeline = SdImagePipeline()
7
 
8
 
@@ -16,5 +18,8 @@ def build_reverse_map():
16
  cn_module.REVERSE_DISPLAY_NAME_MAP["Semantic Segmentor (legacy, alias for UniFormer)"] = "SemSegPreprocessor"
17
 
18
 
 
 
 
19
  def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
20
  return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
 
1
  from typing import Any, Dict
2
  import gradio as gr
3
 
4
+ from core.pipelines.controlnet_preprocessor import ControlNetPreprocessorPipeline
5
  from core.pipelines.sd_image_pipeline import SdImagePipeline
6
 
7
+ controlnet_preprocessor_pipeline = ControlNetPreprocessorPipeline()
8
  sd_image_pipeline = SdImagePipeline()
9
 
10
 
 
18
  cn_module.REVERSE_DISPLAY_NAME_MAP["Semantic Segmentor (legacy, alias for UniFormer)"] = "SemSegPreprocessor"
19
 
20
 
21
+ def run_cn_preprocessor_entry(*args, **kwargs):
22
+ return controlnet_preprocessor_pipeline.run(*args, **kwargs)
23
+
24
  def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
25
  return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
core/pipelines/controlnet_preprocessor.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List
2
+ import imageio
3
+ import tempfile
4
+ import numpy as np
5
+ import torch
6
+ import gradio as gr
7
+ from PIL import Image
8
+ import spaces
9
+
10
+ from .base_pipeline import BasePipeline
11
+ from comfy_integration.nodes import NODE_CLASS_MAPPINGS
12
+ from nodes import NODE_DISPLAY_NAME_MAPPINGS
13
+ from utils.app_utils import get_value_at_index
14
+
15
+ REVERSE_DISPLAY_NAME_MAP = None
16
+ CPU_ONLY_PREPROCESSORS = {
17
+ "Binary Lines", "Canny Edge", "Color Pallete", "Fake Scribble Lines (aka scribble_hed)",
18
+ "Image Intensity", "Image Luminance", "Inpaint Preprocessor", "PyraCanny", "Scribble Lines",
19
+ "Scribble XDoG Lines", "Standard Lineart", "Content Shuffle", "Tile"
20
+ }
21
+
22
+ def run_node_by_function_name(node_instance: Any, **kwargs) -> Any:
23
+ node_class = type(node_instance)
24
+ function_name = getattr(node_class, 'FUNCTION', None)
25
+ if not function_name:
26
+ raise AttributeError(f"Node class '{node_class.__name__}' is missing the required 'FUNCTION' attribute.")
27
+ execution_method = getattr(node_instance, function_name, None)
28
+ if not callable(execution_method):
29
+ raise AttributeError(f"Method '{function_name}' not found or not callable on node '{node_class.__name__}'.")
30
+ return execution_method(**kwargs)
31
+
32
+ class ControlNetPreprocessorPipeline(BasePipeline):
33
+ def get_required_models(self, **kwargs) -> List[str]:
34
+ return []
35
+
36
+ def _gpu_logic(
37
+ self, pil_images: List[Image.Image], preprocessor_name: str, model_name: str,
38
+ params: Dict[str, Any], progress=gr.Progress(track_tqdm=True)
39
+ ) -> List[Image.Image]:
40
+ global REVERSE_DISPLAY_NAME_MAP
41
+ if REVERSE_DISPLAY_NAME_MAP is None:
42
+ raise RuntimeError("REVERSE_DISPLAY_NAME_MAP has not been initialized. `build_reverse_map` must be called on startup.")
43
+
44
+ class_name = REVERSE_DISPLAY_NAME_MAP.get(preprocessor_name)
45
+ if not class_name or class_name not in NODE_CLASS_MAPPINGS:
46
+ raise ValueError(f"Preprocessor '{preprocessor_name}' not found.")
47
+
48
+ preprocessor_instance = NODE_CLASS_MAPPINGS[class_name]()
49
+ call_args = {**params, 'ckpt_name': model_name}
50
+
51
+ processed_pil_images = []
52
+ total_frames = len(pil_images)
53
+
54
+ for i, frame_pil in enumerate(pil_images):
55
+ progress(i / total_frames, desc=f"Processing frame {i+1}/{total_frames} with {preprocessor_name}...")
56
+
57
+ frame_tensor = torch.from_numpy(np.array(frame_pil).astype(np.float32) / 255.0).unsqueeze(0)
58
+
59
+ resolution_arg = {'resolution': max(frame_tensor.shape[2], frame_tensor.shape[3])}
60
+
61
+ result_tuple = run_node_by_function_name(
62
+ preprocessor_instance,
63
+ image=frame_tensor,
64
+ **resolution_arg,
65
+ **call_args
66
+ )
67
+
68
+ processed_tensor = get_value_at_index(result_tuple, 0)
69
+ processed_np = (processed_tensor.squeeze(0).cpu().numpy().clip(0, 1) * 255.0).astype(np.uint8)
70
+ processed_pil_images.append(Image.fromarray(processed_np))
71
+
72
+ return processed_pil_images
73
+
74
+ def run(self, input_type, image_input, video_input, preprocessor_name, model_name, zero_gpu_duration, *args, progress=gr.Progress(track_tqdm=True)):
75
+ from utils import app_utils
76
+ pil_images, is_video, fps = [], False, 30
77
+
78
+ progress(0, desc="Reading input file...")
79
+ if input_type == "Image":
80
+ if image_input is None: raise gr.Error("Please provide an input image.")
81
+ pil_images = [image_input]
82
+ elif input_type == "Video":
83
+ if video_input is None: raise gr.Error("Please provide an input video.")
84
+ try:
85
+ video_reader = imageio.get_reader(video_input)
86
+ meta = video_reader.get_meta_data()
87
+ fps = meta.get('fps', 30)
88
+ pil_images = [Image.fromarray(frame) for frame in video_reader]
89
+ is_video = True
90
+ video_reader.close()
91
+ except Exception as e: raise gr.Error(f"Failed to read video file: {e}")
92
+ else:
93
+ raise gr.Error("Invalid input type selected.")
94
+
95
+ if not pil_images: raise gr.Error("Could not extract any frames from the input.")
96
+
97
+ if app_utils.PREPROCESSOR_PARAMETER_MAP is None:
98
+ raise RuntimeError("Preprocessor parameter map is not built. Check startup logs.")
99
+
100
+ params_config = app_utils.PREPROCESSOR_PARAMETER_MAP.get(preprocessor_name, [])
101
+ sliders_params = [p for p in params_config if p['type'] in ["INT", "FLOAT"]]
102
+ dropdown_params = [p for p in params_config if isinstance(p['type'], list)]
103
+ checkbox_params = [p for p in params_config if p['type'] == "BOOLEAN"]
104
+ ordered_params_config = sliders_params + dropdown_params + checkbox_params
105
+ param_names = [p['name'] for p in ordered_params_config]
106
+ provided_params = {param_names[i]: args[i] for i in range(len(param_names))}
107
+
108
+ if preprocessor_name not in CPU_ONLY_PREPROCESSORS:
109
+ print(f"--- '{preprocessor_name}' requires GPU, requesting ZeroGPU. ---")
110
+ try:
111
+ processed_pil_images = self._execute_gpu_logic(
112
+ self._gpu_logic,
113
+ duration=zero_gpu_duration,
114
+ default_duration=60,
115
+ task_name=f"Preprocessor '{preprocessor_name}'",
116
+ pil_images=pil_images,
117
+ preprocessor_name=preprocessor_name,
118
+ model_name=model_name,
119
+ params=provided_params,
120
+ progress=progress
121
+ )
122
+ except Exception as e:
123
+ import traceback; traceback.print_exc()
124
+ raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on GPU: {e}")
125
+ else:
126
+ print(f"--- Running '{preprocessor_name}' on CPU, no ZeroGPU requested. ---")
127
+ try:
128
+ processed_pil_images = self._gpu_logic(pil_images, preprocessor_name, model_name, provided_params, progress=progress)
129
+ except Exception as e:
130
+ import traceback; traceback.print_exc()
131
+ raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on CPU: {e}")
132
+
133
+ if not processed_pil_images: raise gr.Error("Processing returned no frames.")
134
+
135
+ progress(0.9, desc="Finalizing output...")
136
+ if is_video:
137
+ frames_np = [np.array(img) for img in processed_pil_images]
138
+ frames_tensor = torch.from_numpy(np.stack(frames_np)).to(torch.float32) / 255.0
139
+ video_path = self._encode_video_from_frames(frames_tensor, fps, progress)
140
+ return [video_path]
141
+ else:
142
+ progress(1.0, desc="Done!")
143
+ return processed_pil_images
core/pipelines/sd_image_pipeline.py CHANGED
@@ -289,6 +289,23 @@ class SdImagePipeline(BasePipeline):
289
  "start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
290
  })
291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
  from utils.app_utils import get_vae_path
293
  vae_source = ui_inputs.get('vae_source')
294
  vae_id = ui_inputs.get('vae_id')
@@ -362,6 +379,7 @@ class SdImagePipeline(BasePipeline):
362
  "clip_name": components['clip'],
363
  "vae_name": ui_inputs.get('vae_name', components['vae']),
364
  "controlnet_chain": active_controlnets,
 
365
  "conditioning_chain": active_conditioning,
366
  }
367
 
 
289
  "start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
290
  })
291
 
292
+ diffsynth_controlnet_data = ui_inputs.get('diffsynth_controlnet_data', [])
293
+ active_diffsynth_controlnets = []
294
+ if diffsynth_controlnet_data:
295
+ (cn_images, _, _, cn_strengths, cn_filepaths) = [diffsynth_controlnet_data[i::5] for i in range(5)]
296
+ for i in range(len(cn_images)):
297
+ if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
298
+ ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
299
+
300
+ if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
301
+ cn_temp_path = os.path.join(INPUT_DIR, f"temp_diffsynth_cn_{i}_{random.randint(1000, 9999)}.png")
302
+ cn_images[i].save(cn_temp_path, "PNG")
303
+ temp_files_to_clean.append(cn_temp_path)
304
+ active_diffsynth_controlnets.append({
305
+ "image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
306
+ "control_net_name": cn_filepaths[i]
307
+ })
308
+
309
  from utils.app_utils import get_vae_path
310
  vae_source = ui_inputs.get('vae_source')
311
  vae_id = ui_inputs.get('vae_id')
 
379
  "clip_name": components['clip'],
380
  "vae_name": ui_inputs.get('vae_name', components['vae']),
381
  "controlnet_chain": active_controlnets,
382
+ "diffsynth_controlnet_chain": active_diffsynth_controlnets,
383
  "conditioning_chain": active_conditioning,
384
  }
385
 
core/pipelines/workflow_recipes/_partials/conditioning/z-image.yaml CHANGED
@@ -45,10 +45,12 @@ connections:
45
  - from: "vae_loader:0"
46
  to: "vae_encode:vae"
47
 
48
- dynamic_controlnet_chains:
49
- controlnet_chain:
50
- template: "ControlNetApplyAdvanced"
 
51
  ksampler_node: "ksampler"
 
52
 
53
  dynamic_conditioning_chains:
54
  conditioning_chain:
 
45
  - from: "vae_loader:0"
46
  to: "vae_encode:vae"
47
 
48
+ dynamic_diffsynth_controlnet_chains:
49
+ diffsynth_controlnet_chain:
50
+ template: "QwenImageDiffsynthControlnet"
51
+ model_sampler_node: "model_sampler"
52
  ksampler_node: "ksampler"
53
+ vae_source: "vae_loader:0"
54
 
55
  dynamic_conditioning_chains:
56
  conditioning_chain:
core/settings.py CHANGED
@@ -6,6 +6,7 @@ CHECKPOINT_DIR = "models/checkpoints"
6
  LORA_DIR = "models/loras"
7
  EMBEDDING_DIR = "models/embeddings"
8
  CONTROLNET_DIR = "models/controlnet"
 
9
  DIFFUSION_MODELS_DIR = "models/diffusion_models"
10
  VAE_DIR = "models/vae"
11
  TEXT_ENCODERS_DIR = "models/text_encoders"
 
6
  LORA_DIR = "models/loras"
7
  EMBEDDING_DIR = "models/embeddings"
8
  CONTROLNET_DIR = "models/controlnet"
9
+ MODEL_PATCHES_DIR = "models/model_patches"
10
  DIFFUSION_MODELS_DIR = "models/diffusion_models"
11
  VAE_DIR = "models/vae"
12
  TEXT_ENCODERS_DIR = "models/text_encoders"
ui/events.py CHANGED
@@ -8,7 +8,8 @@ from utils.app_utils import *
8
  from core.generation_logic import *
9
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
10
 
11
- from utils.app_utils import save_uploaded_file_with_hash
 
12
  from ui.shared.ui_components import RESOLUTION_MAP, MAX_CONTROLNETS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_LORAS
13
 
14
 
@@ -26,8 +27,131 @@ def load_controlnet_config():
26
  print(f"Error loading controlnet_models.yaml: {e}")
27
  return []
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  def attach_event_handlers(ui_components, demo):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def create_lora_event_handlers(prefix):
32
  lora_rows = ui_components[f'lora_rows_{prefix}']
33
  lora_ids = ui_components[f'lora_ids_{prefix}']
@@ -159,6 +283,98 @@ def attach_event_handlers(ui_components, demo):
159
  show_progress=False
160
  )
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  def create_embedding_event_handlers(prefix):
163
  rows = ui_components[f'embedding_rows_{prefix}']
164
  ids = ui_components[f'embeddings_ids_{prefix}']
@@ -277,6 +493,7 @@ def attach_event_handlers(ui_components, demo):
277
 
278
  lora_data_components = ui_components.get(f'all_lora_components_flat_{prefix}', [])
279
  controlnet_data_components = ui_components.get(f'all_controlnet_components_flat_{prefix}', [])
 
280
  embedding_data_components = ui_components.get(f'all_embedding_components_flat_{prefix}', [])
281
  conditioning_data_components = ui_components.get(f'all_conditioning_components_flat_{prefix}', [])
282
 
@@ -286,7 +503,7 @@ def attach_event_handlers(ui_components, demo):
286
 
287
  input_keys = list(run_inputs_map.keys())
288
  input_list_flat = [v for v in run_inputs_map.values() if v is not None]
289
- input_list_flat += lora_data_components + controlnet_data_components + embedding_data_components + conditioning_data_components
290
 
291
  def create_ui_inputs_dict(*args):
292
  valid_keys = [k for k in input_keys if run_inputs_map[k] is not None]
@@ -297,6 +514,8 @@ def attach_event_handlers(ui_components, demo):
297
  arg_idx += len(lora_data_components)
298
  ui_dict['controlnet_data'] = list(args[arg_idx : arg_idx + len(controlnet_data_components)])
299
  arg_idx += len(controlnet_data_components)
 
 
300
  ui_dict['embedding_data'] = list(args[arg_idx : arg_idx + len(embedding_data_components)])
301
  arg_idx += len(embedding_data_components)
302
  ui_dict['conditioning_data'] = list(args[arg_idx : arg_idx + len(conditioning_data_components)])
@@ -327,6 +546,7 @@ def attach_event_handlers(ui_components, demo):
327
  show_progress=False
328
  )
329
  if f'add_controlnet_button_{prefix}' in ui_components: create_controlnet_event_handlers(prefix)
 
330
  if f'add_embedding_button_{prefix}' in ui_components:
331
  create_embedding_event_handlers(prefix)
332
  if f'embeddings_uploads_{prefix}' in ui_components:
@@ -392,38 +612,70 @@ def attach_event_handlers(ui_components, demo):
392
  ui_components['view_mode_inpaint'].change(fn=toggle_inpaint_fullscreen_view, inputs=[ui_components['view_mode_inpaint']], outputs=output_components, show_progress=False)
393
 
394
  def initialize_all_cn_dropdowns():
 
395
  cn_config = load_controlnet_config()
396
- if not cn_config: return {}
397
-
398
- all_types = sorted(list(set(t for model in cn_config for t in model.get("Type", []))))
399
- default_type = all_types[0] if all_types else None
400
-
401
- series_choices = []
402
- if default_type:
403
- series_choices = sorted(list(set(model.get("Series", "Default") for model in cn_config if default_type in model.get("Type", []))))
404
- default_series = series_choices[0] if series_choices else None
 
 
 
 
 
 
 
 
 
 
405
 
406
- filepath = "None"
407
- if default_series and default_type:
408
- for model in cn_config:
409
- if model.get("Series") == default_series and default_type in model.get("Type", []):
410
- filepath = model.get("Filepath")
411
- break
412
-
413
- updates = {}
414
- for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
415
- if f'controlnet_types_{prefix}' in ui_components:
416
- for type_dd in ui_components[f'controlnet_types_{prefix}']:
417
- updates[type_dd] = gr.update(choices=all_types, value=default_type)
418
- for series_dd in ui_components[f'controlnet_series_{prefix}']:
419
- updates[series_dd] = gr.update(choices=series_choices, value=default_series)
420
- for filepath_state in ui_components[f'controlnet_filepaths_{prefix}']:
421
- updates[filepath_state] = filepath
422
- return updates
 
 
 
 
 
 
 
423
 
424
  def run_on_load():
425
- cn_updates = initialize_all_cn_dropdowns()
426
- return cn_updates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
 
428
  all_load_outputs = []
429
  for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
@@ -431,6 +683,19 @@ def attach_event_handlers(ui_components, demo):
431
  all_load_outputs.extend(ui_components[f'controlnet_types_{prefix}'])
432
  all_load_outputs.extend(ui_components[f'controlnet_series_{prefix}'])
433
  all_load_outputs.extend(ui_components[f'controlnet_filepaths_{prefix}'])
 
 
 
 
 
 
 
 
 
 
 
 
 
434
 
435
  if all_load_outputs:
436
  demo.load(
 
8
  from core.generation_logic import *
9
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
10
 
11
+ from core.pipelines.controlnet_preprocessor import CPU_ONLY_PREPROCESSORS
12
+ from utils.app_utils import PREPROCESSOR_MODEL_MAP, PREPROCESSOR_PARAMETER_MAP, save_uploaded_file_with_hash
13
  from ui.shared.ui_components import RESOLUTION_MAP, MAX_CONTROLNETS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_LORAS
14
 
15
 
 
27
  print(f"Error loading controlnet_models.yaml: {e}")
28
  return []
29
 
30
+ @lru_cache(maxsize=1)
31
+ def load_diffsynth_controlnet_config():
32
+ _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
33
+ _CN_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'diffsynth_controlnet_models.yaml')
34
+ try:
35
+ print("--- Loading diffsynth_controlnet_models.yaml ---")
36
+ with open(_CN_MODEL_LIST_PATH, 'r', encoding='utf-8') as f:
37
+ config = yaml.safe_load(f)
38
+ print("--- ✅ diffsynth_controlnet_models.yaml loaded successfully ---")
39
+ return config.get("DiffSynth_ControlNet", {}).get("Z-Image", [])
40
+ except Exception as e:
41
+ print(f"Error loading diffsynth_controlnet_models.yaml: {e}")
42
+ return []
43
+
44
 
45
  def attach_event_handlers(ui_components, demo):
46
+ def update_cn_input_visibility(choice):
47
+ return {
48
+ ui_components["cn_image_input"]: gr.update(visible=choice == "Image"),
49
+ ui_components["cn_video_input"]: gr.update(visible=choice == "Video")
50
+ }
51
+ ui_components["cn_input_type"].change(
52
+ fn=update_cn_input_visibility,
53
+ inputs=[ui_components["cn_input_type"]],
54
+ outputs=[ui_components["cn_image_input"], ui_components["cn_video_input"]]
55
+ )
56
+
57
+ def update_preprocessor_models_dropdown(preprocessor_name):
58
+ models = PREPROCESSOR_MODEL_MAP.get(preprocessor_name)
59
+ if models:
60
+ model_filenames = [m[1] for m in models]
61
+ return gr.update(choices=model_filenames, value=model_filenames[0], visible=True)
62
+ else:
63
+ return gr.update(choices=[], value=None, visible=False)
64
+
65
+ def update_preprocessor_settings_ui(preprocessor_name):
66
+ from ui.layout import MAX_DYNAMIC_CONTROLS
67
+ params = PREPROCESSOR_PARAMETER_MAP.get(preprocessor_name, [])
68
+
69
+ slider_updates, dropdown_updates, checkbox_updates = [], [], []
70
+
71
+ s_idx, d_idx, c_idx = 0, 0, 0
72
+
73
+ for param in params:
74
+ if s_idx + d_idx + c_idx >= MAX_DYNAMIC_CONTROLS: break
75
+
76
+ name = param["name"]
77
+ ptype = param["type"]
78
+ config = param["config"]
79
+ label = name.replace('_', ' ').title()
80
+
81
+ if ptype == "INT" or ptype == "FLOAT":
82
+ if s_idx < MAX_DYNAMIC_CONTROLS:
83
+ slider_updates.append(gr.update(
84
+ label=label,
85
+ minimum=config.get('min', 0),
86
+ maximum=config.get('max', 255),
87
+ step=config.get('step', 0.1 if ptype == "FLOAT" else 1),
88
+ value=config.get('default', 0),
89
+ visible=True
90
+ ))
91
+ s_idx += 1
92
+ elif isinstance(ptype, list):
93
+ if d_idx < MAX_DYNAMIC_CONTROLS:
94
+ dropdown_updates.append(gr.update(
95
+ label=label,
96
+ choices=ptype,
97
+ value=config.get('default', ptype[0] if ptype else None),
98
+ visible=True
99
+ ))
100
+ d_idx += 1
101
+ elif ptype == "BOOLEAN":
102
+ if c_idx < MAX_DYNAMIC_CONTROLS:
103
+ checkbox_updates.append(gr.update(
104
+ label=label,
105
+ value=config.get('default', False),
106
+ visible=True
107
+ ))
108
+ c_idx += 1
109
+
110
+ for _ in range(s_idx, MAX_DYNAMIC_CONTROLS): slider_updates.append(gr.update(visible=False))
111
+ for _ in range(d_idx, MAX_DYNAMIC_CONTROLS): dropdown_updates.append(gr.update(visible=False))
112
+ for _ in range(c_idx, MAX_DYNAMIC_CONTROLS): checkbox_updates.append(gr.update(visible=False))
113
+
114
+ return slider_updates + dropdown_updates + checkbox_updates
115
+
116
+ def update_run_button_for_cpu(preprocessor_name):
117
+ if preprocessor_name in CPU_ONLY_PREPROCESSORS:
118
+ return gr.update(value="Run Preprocessor CPU Only", variant="primary"), gr.update(visible=False)
119
+ else:
120
+ return gr.update(value="Run Preprocessor", variant="primary"), gr.update(visible=True)
121
+
122
+ ui_components["preprocessor_cn"].change(
123
+ fn=update_preprocessor_models_dropdown,
124
+ inputs=[ui_components["preprocessor_cn"]],
125
+ outputs=[ui_components["preprocessor_model_cn"]]
126
+ ).then(
127
+ fn=update_preprocessor_settings_ui,
128
+ inputs=[ui_components["preprocessor_cn"]],
129
+ outputs=ui_components["cn_sliders"] + ui_components["cn_dropdowns"] + ui_components["cn_checkboxes"]
130
+ ).then(
131
+ fn=update_run_button_for_cpu,
132
+ inputs=[ui_components["preprocessor_cn"]],
133
+ outputs=[ui_components["run_cn"], ui_components["zero_gpu_cn"]]
134
+ )
135
+
136
+ all_dynamic_inputs = (
137
+ ui_components["cn_sliders"] +
138
+ ui_components["cn_dropdowns"] +
139
+ ui_components["cn_checkboxes"]
140
+ )
141
+
142
+ ui_components["run_cn"].click(
143
+ fn=run_cn_preprocessor_entry,
144
+ inputs=[
145
+ ui_components["cn_input_type"],
146
+ ui_components["cn_image_input"],
147
+ ui_components["cn_video_input"],
148
+ ui_components["preprocessor_cn"],
149
+ ui_components["preprocessor_model_cn"],
150
+ ui_components["zero_gpu_cn"],
151
+ ] + all_dynamic_inputs,
152
+ outputs=[ui_components["output_gallery_cn"]]
153
+ )
154
+
155
  def create_lora_event_handlers(prefix):
156
  lora_rows = ui_components[f'lora_rows_{prefix}']
157
  lora_ids = ui_components[f'lora_ids_{prefix}']
 
283
  show_progress=False
284
  )
285
 
286
+ def create_diffsynth_controlnet_event_handlers(prefix):
287
+ cn_rows = ui_components[f'diffsynth_controlnet_rows_{prefix}']
288
+ cn_types = ui_components[f'diffsynth_controlnet_types_{prefix}']
289
+ cn_series = ui_components[f'diffsynth_controlnet_series_{prefix}']
290
+ cn_filepaths = ui_components[f'diffsynth_controlnet_filepaths_{prefix}']
291
+ cn_images = ui_components[f'diffsynth_controlnet_images_{prefix}']
292
+ cn_strengths = ui_components[f'diffsynth_controlnet_strengths_{prefix}']
293
+
294
+ count_state = ui_components[f'diffsynth_controlnet_count_state_{prefix}']
295
+ add_button = ui_components[f'add_diffsynth_controlnet_button_{prefix}']
296
+ del_button = ui_components[f'delete_diffsynth_controlnet_button_{prefix}']
297
+ accordion = ui_components[f'diffsynth_controlnet_accordion_{prefix}']
298
+
299
+ def add_cn_row(c):
300
+ c += 1
301
+ updates = {
302
+ count_state: c,
303
+ cn_rows[c-1]: gr.update(visible=True),
304
+ add_button: gr.update(visible=c < MAX_CONTROLNETS),
305
+ del_button: gr.update(visible=True)
306
+ }
307
+ return updates
308
+
309
+ def del_cn_row(c):
310
+ c -= 1
311
+ updates = {
312
+ count_state: c,
313
+ cn_rows[c]: gr.update(visible=False),
314
+ cn_images[c]: None,
315
+ cn_strengths[c]: 1.0,
316
+ add_button: gr.update(visible=True),
317
+ del_button: gr.update(visible=c > 0)
318
+ }
319
+ return updates
320
+
321
+ add_outputs = [count_state, add_button, del_button] + cn_rows
322
+ del_outputs = [count_state, add_button, del_button] + cn_rows + cn_images + cn_strengths
323
+ add_button.click(fn=add_cn_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
324
+ del_button.click(fn=del_cn_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
325
+
326
+ def on_cn_type_change(selected_type):
327
+ cn_config = load_diffsynth_controlnet_config()
328
+ series_choices = []
329
+ if selected_type:
330
+ series_choices = sorted(list(set(
331
+ model.get("Series", "Default") for model in cn_config
332
+ if selected_type in model.get("Type", [])
333
+ )))
334
+ default_series = series_choices[0] if series_choices else None
335
+ filepath = "None"
336
+ if default_series:
337
+ for model in cn_config:
338
+ if model.get("Series") == default_series and selected_type in model.get("Type", []):
339
+ filepath = model.get("Filepath")
340
+ break
341
+ return gr.update(choices=series_choices, value=default_series), filepath
342
+
343
+ def on_cn_series_change(selected_series, selected_type):
344
+ cn_config = load_diffsynth_controlnet_config()
345
+ filepath = "None"
346
+ if selected_series and selected_type:
347
+ for model in cn_config:
348
+ if model.get("Series") == selected_series and selected_type in model.get("Type", []):
349
+ filepath = model.get("Filepath")
350
+ break
351
+ return filepath
352
+
353
+ for i in range(MAX_CONTROLNETS):
354
+ cn_types[i].change(
355
+ fn=on_cn_type_change,
356
+ inputs=[cn_types[i]],
357
+ outputs=[cn_series[i], cn_filepaths[i]],
358
+ show_progress=False
359
+ )
360
+ cn_series[i].change(
361
+ fn=on_cn_series_change,
362
+ inputs=[cn_series[i], cn_types[i]],
363
+ outputs=[cn_filepaths[i]],
364
+ show_progress=False
365
+ )
366
+
367
+ def on_accordion_expand(*images):
368
+ return [gr.update() for _ in images]
369
+
370
+ accordion.expand(
371
+ fn=on_accordion_expand,
372
+ inputs=cn_images,
373
+ outputs=cn_images,
374
+ show_progress=False
375
+ )
376
+
377
+
378
  def create_embedding_event_handlers(prefix):
379
  rows = ui_components[f'embedding_rows_{prefix}']
380
  ids = ui_components[f'embeddings_ids_{prefix}']
 
493
 
494
  lora_data_components = ui_components.get(f'all_lora_components_flat_{prefix}', [])
495
  controlnet_data_components = ui_components.get(f'all_controlnet_components_flat_{prefix}', [])
496
+ diffsynth_controlnet_data_components = ui_components.get(f'all_diffsynth_controlnet_components_flat_{prefix}', [])
497
  embedding_data_components = ui_components.get(f'all_embedding_components_flat_{prefix}', [])
498
  conditioning_data_components = ui_components.get(f'all_conditioning_components_flat_{prefix}', [])
499
 
 
503
 
504
  input_keys = list(run_inputs_map.keys())
505
  input_list_flat = [v for v in run_inputs_map.values() if v is not None]
506
+ input_list_flat += lora_data_components + controlnet_data_components + diffsynth_controlnet_data_components + embedding_data_components + conditioning_data_components
507
 
508
  def create_ui_inputs_dict(*args):
509
  valid_keys = [k for k in input_keys if run_inputs_map[k] is not None]
 
514
  arg_idx += len(lora_data_components)
515
  ui_dict['controlnet_data'] = list(args[arg_idx : arg_idx + len(controlnet_data_components)])
516
  arg_idx += len(controlnet_data_components)
517
+ ui_dict['diffsynth_controlnet_data'] = list(args[arg_idx : arg_idx + len(diffsynth_controlnet_data_components)])
518
+ arg_idx += len(diffsynth_controlnet_data_components)
519
  ui_dict['embedding_data'] = list(args[arg_idx : arg_idx + len(embedding_data_components)])
520
  arg_idx += len(embedding_data_components)
521
  ui_dict['conditioning_data'] = list(args[arg_idx : arg_idx + len(conditioning_data_components)])
 
546
  show_progress=False
547
  )
548
  if f'add_controlnet_button_{prefix}' in ui_components: create_controlnet_event_handlers(prefix)
549
+ if f'add_diffsynth_controlnet_button_{prefix}' in ui_components: create_diffsynth_controlnet_event_handlers(prefix)
550
  if f'add_embedding_button_{prefix}' in ui_components:
551
  create_embedding_event_handlers(prefix)
552
  if f'embeddings_uploads_{prefix}' in ui_components:
 
612
  ui_components['view_mode_inpaint'].change(fn=toggle_inpaint_fullscreen_view, inputs=[ui_components['view_mode_inpaint']], outputs=output_components, show_progress=False)
613
 
614
  def initialize_all_cn_dropdowns():
615
+ # Standard ControlNet
616
  cn_config = load_controlnet_config()
617
+ cn_updates = {}
618
+ if cn_config:
619
+ all_types = sorted(list(set(t for model in cn_config for t in model.get("Type", []))))
620
+ default_type = all_types[0] if all_types else None
621
+ series_choices = []
622
+ if default_type:
623
+ series_choices = sorted(list(set(model.get("Series", "Default") for model in cn_config if default_type in model.get("Type", []))))
624
+ default_series = series_choices[0] if series_choices else None
625
+ filepath = "None"
626
+ if default_series and default_type:
627
+ for model in cn_config:
628
+ if model.get("Series") == default_series and default_type in model.get("Type", []):
629
+ filepath = model.get("Filepath")
630
+ break
631
+ for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
632
+ if f'controlnet_types_{prefix}' in ui_components:
633
+ for type_dd in ui_components[f'controlnet_types_{prefix}']: cn_updates[type_dd] = gr.update(choices=all_types, value=default_type)
634
+ for series_dd in ui_components[f'controlnet_series_{prefix}']: cn_updates[series_dd] = gr.update(choices=series_choices, value=default_series)
635
+ for filepath_state in ui_components[f'controlnet_filepaths_{prefix}']: cn_updates[filepath_state] = filepath
636
 
637
+ # DiffSynth ControlNet
638
+ diffsynth_cn_config = load_diffsynth_controlnet_config()
639
+ diffsynth_updates = {}
640
+ if diffsynth_cn_config:
641
+ all_types = sorted(list(set(t for model in diffsynth_cn_config for t in model.get("Type", []))))
642
+ default_type = all_types[0] if all_types else None
643
+ series_choices = []
644
+ if default_type:
645
+ series_choices = sorted(list(set(model.get("Series", "Default") for model in diffsynth_cn_config if default_type in model.get("Type", []))))
646
+ default_series = series_choices[0] if series_choices else None
647
+ filepath = "None"
648
+ if default_series and default_type:
649
+ for model in diffsynth_cn_config:
650
+ if model.get("Series") == default_series and default_type in model.get("Type", []):
651
+ filepath = model.get("Filepath")
652
+ break
653
+ for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
654
+ if f'diffsynth_controlnet_types_{prefix}' in ui_components:
655
+ for type_dd in ui_components[f'diffsynth_controlnet_types_{prefix}']: diffsynth_updates[type_dd] = gr.update(choices=all_types, value=default_type)
656
+ for series_dd in ui_components[f'diffsynth_controlnet_series_{prefix}']: diffsynth_updates[series_dd] = gr.update(choices=series_choices, value=default_series)
657
+ for filepath_state in ui_components[f'diffsynth_controlnet_filepaths_{prefix}']: diffsynth_updates[filepath_state] = filepath
658
+
659
+ return {**cn_updates, **diffsynth_updates}
660
+
661
 
662
  def run_on_load():
663
+ all_updates = initialize_all_cn_dropdowns()
664
+
665
+ default_preprocessor = "Canny Edge"
666
+ model_update = update_preprocessor_models_dropdown(default_preprocessor)
667
+ all_updates[ui_components["preprocessor_model_cn"]] = model_update
668
+
669
+ settings_outputs = update_preprocessor_settings_ui(default_preprocessor)
670
+ dynamic_outputs = ui_components["cn_sliders"] + ui_components["cn_dropdowns"] + ui_components["cn_checkboxes"]
671
+ for i, comp in enumerate(dynamic_outputs):
672
+ all_updates[comp] = settings_outputs[i]
673
+
674
+ run_button_update, zero_gpu_update = update_run_button_for_cpu(default_preprocessor)
675
+ all_updates[ui_components["run_cn"]] = run_button_update
676
+ all_updates[ui_components["zero_gpu_cn"]] = zero_gpu_update
677
+
678
+ return all_updates
679
 
680
  all_load_outputs = []
681
  for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
 
683
  all_load_outputs.extend(ui_components[f'controlnet_types_{prefix}'])
684
  all_load_outputs.extend(ui_components[f'controlnet_series_{prefix}'])
685
  all_load_outputs.extend(ui_components[f'controlnet_filepaths_{prefix}'])
686
+ if f'diffsynth_controlnet_types_{prefix}' in ui_components:
687
+ all_load_outputs.extend(ui_components[f'diffsynth_controlnet_types_{prefix}'])
688
+ all_load_outputs.extend(ui_components[f'diffsynth_controlnet_series_{prefix}'])
689
+ all_load_outputs.extend(ui_components[f'diffsynth_controlnet_filepaths_{prefix}'])
690
+
691
+ all_load_outputs.extend([
692
+ ui_components["preprocessor_model_cn"],
693
+ *ui_components["cn_sliders"],
694
+ *ui_components["cn_dropdowns"],
695
+ *ui_components["cn_checkboxes"],
696
+ ui_components["run_cn"],
697
+ ui_components["zero_gpu_cn"]
698
+ ])
699
 
700
  if all_load_outputs:
701
  demo.load(
ui/layout.py CHANGED
@@ -6,6 +6,17 @@ from .shared import txt2img_ui, img2img_ui, inpaint_ui, outpaint_ui, hires_fix_u
6
 
7
  MAX_DYNAMIC_CONTROLS = 10
8
 
 
 
 
 
 
 
 
 
 
 
 
9
  def build_ui(event_handler_function):
10
  ui_components = {}
11
 
@@ -21,23 +32,56 @@ def build_ui(event_handler_function):
21
  "[SDXL](https://huggingface.co/spaces/RioShiina/ImageGen-SDXL), "
22
  "[SD1.5](https://huggingface.co/spaces/RioShiina/ImageGen-SD15)"
23
  )
24
- with gr.Tabs(elem_id="image_gen_tabs") as image_gen_tabs:
25
- with gr.TabItem("Txt2Img", id=0):
26
- ui_components.update(txt2img_ui.create_ui())
27
-
28
- with gr.TabItem("Img2Img", id=1):
29
- ui_components.update(img2img_ui.create_ui())
 
 
30
 
31
- with gr.TabItem("Inpaint", id=2):
32
- ui_components.update(inpaint_ui.create_ui())
33
 
34
- with gr.TabItem("Outpaint", id=3):
35
- ui_components.update(outpaint_ui.create_ui())
36
 
37
- with gr.TabItem("Hires. Fix", id=4):
38
- ui_components.update(hires_fix_ui.create_ui())
39
-
40
- ui_components['image_gen_tabs'] = image_gen_tabs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by RioShiina with ❤️<br><a href='https://github.com/RioShiina47' target='_blank'>GitHub</a> | <a href='https://huggingface.co/RioShiina' target='_blank'>Hugging Face</a> | <a href='https://civitai.com/user/RioShiina' target='_blank'>Civitai</a></div>")
43
 
 
6
 
7
  MAX_DYNAMIC_CONTROLS = 10
8
 
9
+ def get_preprocessor_choices():
10
+ from nodes import NODE_DISPLAY_NAME_MAPPINGS
11
+
12
+ preprocessor_names = [
13
+ display_name for class_name, display_name in NODE_DISPLAY_NAME_MAPPINGS.items()
14
+ if "Preprocessor" in class_name or "Segmentor" in class_name or
15
+ "Estimator" in class_name or "Detector" in class_name
16
+ ]
17
+ return sorted(list(set(preprocessor_names)))
18
+
19
+
20
  def build_ui(event_handler_function):
21
  ui_components = {}
22
 
 
32
  "[SDXL](https://huggingface.co/spaces/RioShiina/ImageGen-SDXL), "
33
  "[SD1.5](https://huggingface.co/spaces/RioShiina/ImageGen-SD15)"
34
  )
35
+ with gr.Tabs(elem_id="tabs_container") as tabs:
36
+ with gr.TabItem("Z-Image", id=0):
37
+ with gr.Tabs(elem_id="image_gen_tabs") as image_gen_tabs:
38
+ with gr.TabItem("Txt2Img", id=0):
39
+ ui_components.update(txt2img_ui.create_ui())
40
+
41
+ with gr.TabItem("Img2Img", id=1):
42
+ ui_components.update(img2img_ui.create_ui())
43
 
44
+ with gr.TabItem("Inpaint", id=2):
45
+ ui_components.update(inpaint_ui.create_ui())
46
 
47
+ with gr.TabItem("Outpaint", id=3):
48
+ ui_components.update(outpaint_ui.create_ui())
49
 
50
+ with gr.TabItem("Hires. Fix", id=4):
51
+ ui_components.update(hires_fix_ui.create_ui())
52
+
53
+ ui_components['image_gen_tabs'] = image_gen_tabs
54
+
55
+ with gr.TabItem("Controlnet Preprocessors", id=1):
56
+ gr.Markdown("## ControlNet Auxiliary Preprocessors")
57
+ gr.Markdown("Powered by [Fannovel16/comfyui_controlnet_aux](https://github.com/Fannovel16/comfyui_controlnet_aux).")
58
+ gr.Markdown("Upload an image or video to process it with a ControlNet preprocessor.")
59
+ with gr.Row():
60
+ with gr.Column(scale=1):
61
+ cn_input_type = gr.Radio(["Image", "Video"], label="Input Type", value="Image")
62
+ cn_image_input = gr.Image(type="pil", label="Input Image", visible=True, height=384)
63
+ cn_video_input = gr.Video(label="Input Video", visible=False)
64
+ preprocessor_cn = gr.Dropdown(label="Preprocessor", choices=get_preprocessor_choices(), value="Canny Edge")
65
+ preprocessor_model_cn = gr.Dropdown(label="Preprocessor Model", choices=[], value=None, visible=False)
66
+ with gr.Column() as preprocessor_settings_ui:
67
+ cn_sliders, cn_dropdowns, cn_checkboxes = [], [], []
68
+ for i in range(MAX_DYNAMIC_CONTROLS):
69
+ cn_sliders.append(gr.Slider(visible=False, label=f"dyn_slider_{i}"))
70
+ cn_dropdowns.append(gr.Dropdown(visible=False, label=f"dyn_dropdown_{i}"))
71
+ cn_checkboxes.append(gr.Checkbox(visible=False, label=f"dyn_checkbox_{i}"))
72
+ run_cn = gr.Button("Run Preprocessor", variant="primary")
73
+ with gr.Column(scale=1):
74
+ output_gallery_cn = gr.Gallery(label="Output", show_label=False, object_fit="contain", height=512)
75
+ zero_gpu_cn = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional")
76
+ ui_components.update({
77
+ "cn_input_type": cn_input_type, "cn_image_input": cn_image_input, "cn_video_input": cn_video_input,
78
+ "preprocessor_cn": preprocessor_cn, "preprocessor_model_cn": preprocessor_model_cn, "run_cn": run_cn,
79
+ "zero_gpu_cn": zero_gpu_cn, "output_gallery_cn": output_gallery_cn,
80
+ "preprocessor_settings_ui": preprocessor_settings_ui, "cn_sliders": cn_sliders,
81
+ "cn_dropdowns": cn_dropdowns, "cn_checkboxes": cn_checkboxes
82
+ })
83
+
84
+ ui_components["tabs"] = tabs
85
 
86
  gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by RioShiina with ❤️<br><a href='https://github.com/RioShiina47' target='_blank'>GitHub</a> | <a href='https://huggingface.co/RioShiina' target='_blank'>Hugging Face</a> | <a href='https://civitai.com/user/RioShiina' target='_blank'>Civitai</a></div>")
87
 
ui/shared/hires_fix_ui.py CHANGED
@@ -3,7 +3,7 @@ from core.settings import MODEL_MAP_CHECKPOINT
3
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
4
  from .ui_components import (
5
  create_lora_settings_ui,
6
- create_controlnet_ui, create_embedding_ui,
7
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
8
  )
9
 
@@ -65,6 +65,7 @@ def create_ui():
65
 
66
  components.update(create_api_key_ui(prefix))
67
  components.update(create_lora_settings_ui(prefix))
 
68
  # components.update(create_controlnet_ui(prefix))
69
  # components.update(create_embedding_ui(prefix))
70
  components.update(create_conditioning_ui(prefix))
 
3
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
4
  from .ui_components import (
5
  create_lora_settings_ui,
6
+ create_controlnet_ui, create_diffsynth_controlnet_ui, create_embedding_ui,
7
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
8
  )
9
 
 
65
 
66
  components.update(create_api_key_ui(prefix))
67
  components.update(create_lora_settings_ui(prefix))
68
+ components.update(create_diffsynth_controlnet_ui(prefix))
69
  # components.update(create_controlnet_ui(prefix))
70
  # components.update(create_embedding_ui(prefix))
71
  components.update(create_conditioning_ui(prefix))
ui/shared/img2img_ui.py CHANGED
@@ -3,7 +3,7 @@ from core.settings import MODEL_MAP_CHECKPOINT
3
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
4
  from .ui_components import (
5
  create_lora_settings_ui,
6
- create_controlnet_ui, create_embedding_ui,
7
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
8
  )
9
 
@@ -48,6 +48,7 @@ def create_ui():
48
 
49
  components.update(create_api_key_ui(prefix))
50
  components.update(create_lora_settings_ui(prefix))
 
51
  # components.update(create_controlnet_ui(prefix))
52
  # components.update(create_embedding_ui(prefix))
53
  components.update(create_conditioning_ui(prefix))
 
3
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
4
  from .ui_components import (
5
  create_lora_settings_ui,
6
+ create_controlnet_ui, create_diffsynth_controlnet_ui, create_embedding_ui,
7
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
8
  )
9
 
 
48
 
49
  components.update(create_api_key_ui(prefix))
50
  components.update(create_lora_settings_ui(prefix))
51
+ components.update(create_diffsynth_controlnet_ui(prefix))
52
  # components.update(create_controlnet_ui(prefix))
53
  # components.update(create_embedding_ui(prefix))
54
  components.update(create_conditioning_ui(prefix))
ui/shared/inpaint_ui.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from core.settings import MODEL_MAP_CHECKPOINT
3
  from .ui_components import (
4
  create_base_parameter_ui, create_lora_settings_ui,
5
- create_controlnet_ui, create_embedding_ui,
6
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
7
  )
8
 
@@ -71,6 +71,7 @@ def create_ui():
71
  with gr.Column() as accordion_wrapper:
72
  components.update(create_api_key_ui(prefix))
73
  components.update(create_lora_settings_ui(prefix))
 
74
  # components.update(create_controlnet_ui(prefix))
75
  # components.update(create_embedding_ui(prefix))
76
  components.update(create_conditioning_ui(prefix))
 
2
  from core.settings import MODEL_MAP_CHECKPOINT
3
  from .ui_components import (
4
  create_base_parameter_ui, create_lora_settings_ui,
5
+ create_controlnet_ui, create_diffsynth_controlnet_ui, create_embedding_ui,
6
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
7
  )
8
 
 
71
  with gr.Column() as accordion_wrapper:
72
  components.update(create_api_key_ui(prefix))
73
  components.update(create_lora_settings_ui(prefix))
74
+ components.update(create_diffsynth_controlnet_ui(prefix))
75
  # components.update(create_controlnet_ui(prefix))
76
  # components.update(create_embedding_ui(prefix))
77
  components.update(create_conditioning_ui(prefix))
ui/shared/outpaint_ui.py CHANGED
@@ -3,7 +3,7 @@ from core.settings import MODEL_MAP_CHECKPOINT
3
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
4
  from .ui_components import (
5
  create_lora_settings_ui,
6
- create_controlnet_ui, create_embedding_ui,
7
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
8
  )
9
 
@@ -59,6 +59,7 @@ def create_ui():
59
 
60
  components.update(create_api_key_ui(prefix))
61
  components.update(create_lora_settings_ui(prefix))
 
62
  # components.update(create_controlnet_ui(prefix))
63
  # components.update(create_embedding_ui(prefix))
64
  components.update(create_conditioning_ui(prefix))
 
3
  from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
4
  from .ui_components import (
5
  create_lora_settings_ui,
6
+ create_controlnet_ui, create_diffsynth_controlnet_ui, create_embedding_ui,
7
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
8
  )
9
 
 
59
 
60
  components.update(create_api_key_ui(prefix))
61
  components.update(create_lora_settings_ui(prefix))
62
+ components.update(create_diffsynth_controlnet_ui(prefix))
63
  # components.update(create_controlnet_ui(prefix))
64
  # components.update(create_embedding_ui(prefix))
65
  components.update(create_conditioning_ui(prefix))
ui/shared/txt2img_ui.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from core.settings import MODEL_MAP_CHECKPOINT
3
  from .ui_components import (
4
  create_base_parameter_ui, create_lora_settings_ui,
5
- create_controlnet_ui, create_embedding_ui,
6
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
7
  )
8
 
@@ -29,6 +29,7 @@ def create_ui():
29
 
30
  components.update(create_api_key_ui(prefix))
31
  components.update(create_lora_settings_ui(prefix))
 
32
  # components.update(create_controlnet_ui(prefix))
33
  # components.update(create_embedding_ui(prefix))
34
  components.update(create_conditioning_ui(prefix))
 
2
  from core.settings import MODEL_MAP_CHECKPOINT
3
  from .ui_components import (
4
  create_base_parameter_ui, create_lora_settings_ui,
5
+ create_controlnet_ui, create_diffsynth_controlnet_ui, create_embedding_ui,
6
  create_conditioning_ui, create_vae_override_ui, create_api_key_ui
7
  )
8
 
 
29
 
30
  components.update(create_api_key_ui(prefix))
31
  components.update(create_lora_settings_ui(prefix))
32
+ components.update(create_diffsynth_controlnet_ui(prefix))
33
  # components.update(create_controlnet_ui(prefix))
34
  # components.update(create_embedding_ui(prefix))
35
  components.update(create_conditioning_ui(prefix))
ui/shared/ui_components.py CHANGED
@@ -137,6 +137,48 @@ def create_controlnet_ui(prefix: str, max_units=MAX_CONTROLNETS):
137
 
138
  return components
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  def create_embedding_ui(prefix: str):
141
  components = {}
142
  key = lambda name: f"{name}_{prefix}"
 
137
 
138
  return components
139
 
140
+ def create_diffsynth_controlnet_ui(prefix: str, max_units=MAX_CONTROLNETS):
141
+ components = {}
142
+ key = lambda name: f"{name}_{prefix}"
143
+
144
+ with gr.Accordion("DiffSynth ControlNet Settings", open=False) as accordion:
145
+ components[key('diffsynth_controlnet_accordion')] = accordion
146
+
147
+ cn_rows, images, series, types, strengths, filepaths = [], [], [], [], [], []
148
+ components.update({
149
+ key('diffsynth_controlnet_rows'): cn_rows,
150
+ key('diffsynth_controlnet_images'): images,
151
+ key('diffsynth_controlnet_series'): series,
152
+ key('diffsynth_controlnet_types'): types,
153
+ key('diffsynth_controlnet_strengths'): strengths,
154
+ key('diffsynth_controlnet_filepaths'): filepaths
155
+ })
156
+
157
+ for i in range(max_units):
158
+ with gr.Row(visible=(i < 1)) as row:
159
+ with gr.Column(scale=1):
160
+ images.append(gr.Image(label=f"Control Image {i+1}", type="pil", sources=["upload"], height=256))
161
+ with gr.Column(scale=2):
162
+ types.append(gr.Dropdown(label="Type", choices=[], interactive=True))
163
+ series.append(gr.Dropdown(label="Series", choices=[], interactive=True))
164
+ strengths.append(gr.Slider(label="Strength", minimum=0.0, maximum=2.0, step=0.05, value=1.0, interactive=True))
165
+ filepaths.append(gr.State(None))
166
+ cn_rows.append(row)
167
+
168
+ with gr.Row():
169
+ components[key('add_diffsynth_controlnet_button')] = gr.Button("✚ Add DiffSynth ControlNet")
170
+ components[key('delete_diffsynth_controlnet_button')] = gr.Button("➖ Delete DiffSynth ControlNet", visible=False)
171
+ components[key('diffsynth_controlnet_count_state')] = gr.State(1)
172
+
173
+ all_cn_components_flat = []
174
+ for i in range(max_units):
175
+ all_cn_components_flat.extend([
176
+ images[i], types[i], series[i], strengths[i], filepaths[i]
177
+ ])
178
+ components[key('all_diffsynth_controlnet_components_flat')] = all_cn_components_flat
179
+
180
+ return components
181
+
182
  def create_embedding_ui(prefix: str):
183
  components = {}
184
  key = lambda name: f"{name}_{prefix}"
utils/app_utils.py CHANGED
@@ -313,6 +313,7 @@ def _ensure_model_downloaded(filename: str, progress=gr.Progress()):
313
  "checkpoints": CHECKPOINT_DIR,
314
  "loras": LORA_DIR,
315
  "controlnet": CONTROLNET_DIR,
 
316
  "clip_vision": os.path.join(os.path.dirname(LORA_DIR), "clip_vision")
317
  }
318
 
@@ -383,15 +384,63 @@ def build_preprocessor_model_map():
383
  global PREPROCESSOR_MODEL_MAP
384
  if PREPROCESSOR_MODEL_MAP is not None: return PREPROCESSOR_MODEL_MAP
385
  print("--- Building ControlNet Preprocessor model map ---")
386
- PREPROCESSOR_MODEL_MAP = {}
387
- print(" ControlNet Preprocessor model map build skipped as feature is disabled."); return PREPROCESSOR_MODEL_MAP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
 
389
  def build_preprocessor_parameter_map():
390
  global PREPROCESSOR_PARAMETER_MAP
391
  if PREPROCESSOR_PARAMETER_MAP is not None: return
392
  print("--- Building ControlNet Preprocessor parameter map ---")
393
- PREPROCESSOR_PARAMETER_MAP = {}
394
- print("✅ ControlNet Preprocessor parameter map build skipped as feature is disabled.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395
 
396
  def print_welcome_message():
397
  author_name = "RioShiina"
 
313
  "checkpoints": CHECKPOINT_DIR,
314
  "loras": LORA_DIR,
315
  "controlnet": CONTROLNET_DIR,
316
+ "model_patches": MODEL_PATCHES_DIR,
317
  "clip_vision": os.path.join(os.path.dirname(LORA_DIR), "clip_vision")
318
  }
319
 
 
384
  global PREPROCESSOR_MODEL_MAP
385
  if PREPROCESSOR_MODEL_MAP is not None: return PREPROCESSOR_MODEL_MAP
386
  print("--- Building ControlNet Preprocessor model map ---")
387
+ manual_map = {
388
+ "dwpose": [("yzd-v/DWPose", "yolox_l.onnx"), ("yzd-v/DWPose", "dw-ll_ucoco_384.onnx"), ("hr16/UnJIT-DWPose", "dw-ll_ucoco.onnx"), ("hr16/DWPose-TorchScript-BatchSize5", "dw-ll_ucoco_384_bs5.torchscript.pt"), ("hr16/DWPose-TorchScript-BatchSize5", "rtmpose-m_ap10k_256_bs5.torchscript.pt"), ("hr16/yolo-nas-fp16", "yolo_nas_l_fp16.onnx"), ("hr16/yolo-nas-fp16", "yolo_nas_m_fp16.onnx"), ("hr16/yolo-nas-fp16", "yolo_nas_s_fp16.onnx")],
389
+ "densepose": [("LayerNorm/DensePose-TorchScript-with-hint-image", "densepose_r50_fpn_dl.torchscript"), ("LayerNorm/DensePose-TorchScript-with-hint-image", "densepose_r101_fpn_dl.torchscript")]
390
+ }
391
+ temp_map = {}
392
+ from nodes import NODE_DISPLAY_NAME_MAPPINGS
393
+ wrappers_dir = Path("./custom_nodes/comfyui_controlnet_aux/node_wrappers/")
394
+ if not wrappers_dir.exists():
395
+ print("⚠️ ControlNet AUX wrappers directory not found. Cannot build model map.")
396
+ PREPROCESSOR_MODEL_MAP = {}; return PREPROCESSOR_MODEL_MAP
397
+ for wrapper_file in wrappers_dir.glob("*.py"):
398
+ if wrapper_file.name == "__init__.py": continue
399
+ with open(wrapper_file, 'r', encoding='utf-8') as f:
400
+ content = f.read()
401
+ display_name_matches = re.findall(r'NODE_DISPLAY_NAME_MAPPINGS\s*=\s*{(?:.|\n)*?["\'](.*?)["\']\s*:\s*["\'](.*?)["\']', content)
402
+ for _, display_name in display_name_matches:
403
+ if display_name not in temp_map: temp_map[display_name] = []
404
+ manual_key = wrapper_file.stem
405
+ if manual_key in manual_map: temp_map[display_name].extend(manual_map[manual_key])
406
+ matches = re.findall(r"from_pretrained\s*\(\s*(?:filename=)?\s*f?[\"']([^\"']+)[\"']", content)
407
+ for model_filename in matches:
408
+ repo_id = "lllyasviel/Annotators"
409
+ if "depth_anything" in model_filename and "v2" in model_filename: repo_id = "LiheYoung/Depth-Anything-V2"
410
+ elif "depth_anything" in model_filename: repo_id = "LiheYoung/Depth-Anything"
411
+ elif "diffusion_edge" in model_filename: repo_id = "hr16/Diffusion-Edge"
412
+ temp_map[display_name].append((repo_id, model_filename))
413
+ final_map = {name: sorted(list(set(models))) for name, models in temp_map.items() if models}
414
+ PREPROCESSOR_MODEL_MAP = final_map
415
+ print("✅ ControlNet Preprocessor model map built."); return PREPROCESSOR_MODEL_MAP
416
 
417
  def build_preprocessor_parameter_map():
418
  global PREPROCESSOR_PARAMETER_MAP
419
  if PREPROCESSOR_PARAMETER_MAP is not None: return
420
  print("--- Building ControlNet Preprocessor parameter map ---")
421
+ param_map = {}
422
+ from nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
423
+ for class_name, node_class in NODE_CLASS_MAPPINGS.items():
424
+ if not hasattr(node_class, "INPUT_TYPES"): continue
425
+ if hasattr(node_class, '__module__') and 'comfyui_controlnet_aux.node_wrappers' not in node_class.__module__: continue
426
+ display_name = NODE_DISPLAY_NAME_MAPPINGS.get(class_name)
427
+ if not display_name: continue
428
+ try:
429
+ input_types = node_class.INPUT_TYPES()
430
+ all_inputs = {**input_types.get('required', {}), **input_types.get('optional', {})}
431
+ params = []
432
+ for name, details in all_inputs.items():
433
+ if name in ['image', 'resolution', 'pose_kps']: continue
434
+ if not isinstance(details, (list, tuple)) or not details: continue
435
+ param_type = details[0]
436
+ param_config = details[1] if len(details) > 1 and isinstance(details[1], dict) else {}
437
+ param_info = {"name": name, "type": param_type, "config": param_config}
438
+ params.append(param_info)
439
+ if params: param_map[display_name] = params
440
+ except Exception as e:
441
+ print(f"⚠️ Could not parse parameters for {display_name}: {e}")
442
+ PREPROCESSOR_PARAMETER_MAP = param_map
443
+ print("✅ ControlNet Preprocessor parameter map built.")
444
 
445
  def print_welcome_message():
446
  author_name = "RioShiina"
yaml/diffsynth_controlnet_models.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ DiffSynth_ControlNet:
2
+ Z-Image:
3
+ - Filepath: "Z-Image-Turbo-Fun-Controlnet-Union.safetensors"
4
+ Series: "alibaba-pai Controlnet Union"
5
+ Type: ["Pose", "Canny", "HED", "Depth", "MLSD"]
yaml/file_list.yaml CHANGED
@@ -17,7 +17,7 @@ file:
17
  repo_id: "Comfy-Org/z_image_turbo"
18
  repository_file_path: "split_files/vae/ae.safetensors"
19
 
20
- controlnet:
21
  - filename: "Z-Image-Turbo-Fun-Controlnet-Union.safetensors"
22
  source: "hf"
23
  repo_id: "alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union"
 
17
  repo_id: "Comfy-Org/z_image_turbo"
18
  repository_file_path: "split_files/vae/ae.safetensors"
19
 
20
+ model_patches:
21
  - filename: "Z-Image-Turbo-Fun-Controlnet-Union.safetensors"
22
  source: "hf"
23
  repo_id: "alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union"
yaml/injectors.yaml CHANGED
@@ -1,9 +1,12 @@
1
  injector_definitions:
2
  dynamic_controlnet_chains:
3
  module: "chain_injectors.controlnet_injector"
 
 
4
  dynamic_conditioning_chains:
5
  module: "chain_injectors.conditioning_injector"
6
 
7
  injector_order:
8
  - dynamic_conditioning_chains
 
9
  - dynamic_controlnet_chains
 
1
  injector_definitions:
2
  dynamic_controlnet_chains:
3
  module: "chain_injectors.controlnet_injector"
4
+ dynamic_diffsynth_controlnet_chains:
5
+ module: "chain_injectors.diffsynth_controlnet_injector"
6
  dynamic_conditioning_chains:
7
  module: "chain_injectors.conditioning_injector"
8
 
9
  injector_order:
10
  - dynamic_conditioning_chains
11
+ - dynamic_diffsynth_controlnet_chains
12
  - dynamic_controlnet_chains