File size: 23,630 Bytes
0070fce
66deebd
0070fce
66deebd
 
0070fce
66deebd
 
 
 
 
 
0070fce
 
66deebd
0070fce
 
66deebd
 
0070fce
66deebd
 
 
 
 
0070fce
 
 
 
3abb63f
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
66deebd
 
0070fce
 
 
 
 
66deebd
0070fce
66deebd
0070fce
66deebd
0070fce
 
 
 
 
 
66deebd
 
0070fce
66deebd
0070fce
66deebd
0070fce
 
 
 
 
66deebd
0070fce
 
 
 
 
 
 
 
66deebd
 
 
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
 
 
 
66deebd
 
 
0070fce
66deebd
0070fce
 
 
66deebd
0070fce
 
 
 
66deebd
 
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
 
66deebd
0070fce
66deebd
0070fce
 
66deebd
0070fce
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
 
0070fce
 
 
 
 
66deebd
0070fce
 
 
66deebd
0070fce
 
 
 
 
 
 
66deebd
0070fce
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
66deebd
0070fce
66deebd
0070fce
 
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
0070fce
 
 
 
 
66deebd
0070fce
66deebd
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3abb63f
0070fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66deebd
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
import functools
from typing import Optional, TYPE_CHECKING

if TYPE_CHECKING:
    from modules_forge.supported_preprocessor import Preprocessor

import cv2
import gradio as gr
import numpy as np
import torch
from lib_controlnet import external_code, global_state
from lib_controlnet.api import controlnet_api
from lib_controlnet.controlnet_ui.controlnet_ui_group import ControlNetUiGroup
from lib_controlnet.enums import HiResFixOption
from lib_controlnet.external_code import ControlNetUnit
from lib_controlnet.infotext import Infotext
from lib_controlnet.logging import logger
from lib_controlnet.utils import align_dim_latent, crop_and_resize_image, judge_image_type, prepare_mask, set_numpy_seed
from PIL import Image, ImageOps

from modules import images, masking, script_callbacks, scripts, shared
from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img, StableDiffusionProcessingTxt2Img
from modules_forge.forge_util import HWC3, numpy_to_pytorch
from modules_forge.shared import try_load_supported_control_model
from modules_forge.supported_controlnet import ControlModelPatcher

global_state.update_controlnet_filenames()


@functools.lru_cache(maxsize=getattr(shared.opts, "control_net_model_cache_size", 1))
def cached_controlnet_loader(filename):
    return try_load_supported_control_model(filename)


class ControlNetCachedParameters:
    def __init__(self):
        self.preprocessor = None
        self.model = None
        self.control_cond = None
        self.control_cond_for_hr_fix = None
        self.control_mask = None
        self.control_mask_for_hr_fix = None


class ControlNetForForgeOfficial(scripts.Script):
    sorting_priority = 10

    def title(self):
        return "ControlNet"

    def show(self, is_img2img):
        return scripts.AlwaysVisible

    def ui(self, is_img2img):
        default_unit = ControlNetUnit(enabled=False, module="None", model="None")
        elem_id_tabname = f"{'img2img' if is_img2img else 'txt2img'}_controlnet"
        infotext = Infotext()
        ui_groups = []
        controls = []

        with gr.Group(elem_id=elem_id_tabname):
            with gr.Accordion(
                open=False,
                label="ControlNet Integrated",
                elem_id="controlnet",
                elem_classes=["controlnet"],
            ):
                with gr.Tabs(elem_classes="controlnet_tabs"):
                    max_models = shared.opts.data.get("control_net_unit_count", 3)
                    for i in range(max_models):
                        with gr.Tab(label=f"ControlNet Unit {i + 1}", id=i):
                            group = ControlNetUiGroup(is_img2img, default_unit)
                            ui_groups.append(group)
                            controls.append(group.render(f"ControlNet-{i}", elem_id_tabname))

        for i, ui_group in enumerate(ui_groups):
            infotext.register_unit(i, ui_group)

        if shared.opts.data.get("control_net_sync_field_args", True):
            self.infotext_fields = infotext.infotext_fields
            self.paste_field_names = infotext.paste_field_names

        return controls

    def get_enabled_units(self, units: list[ControlNetUnit]):  # Parse dict from API calls
        units = [ControlNetUnit.from_dict(unit) if isinstance(unit, dict) else unit for unit in units]
        assert all(isinstance(unit, ControlNetUnit) for unit in units)
        enabled_units = [x for x in units if x.enabled]
        return enabled_units

    @staticmethod
    def try_crop_image_with_a1111_mask(p: StableDiffusionProcessing, input_image: np.ndarray, resize_mode: external_code.ResizeMode, preprocessor: "Preprocessor") -> np.ndarray:
        a1111_mask_image: Optional[Image.Image] = getattr(p, "image_mask", None)
        is_only_masked_inpaint: bool = issubclass(type(p), StableDiffusionProcessingImg2Img) and p.inpaint_full_res and a1111_mask_image is not None

        if preprocessor.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab and is_only_masked_inpaint:
            logger.info("Crop input image based on A1111 mask.")
            input_image = [input_image[:, :, i] for i in range(input_image.shape[2])]
            input_image = [Image.fromarray(x) for x in input_image]

            mask = prepare_mask(a1111_mask_image, p)

            crop_region = masking.get_crop_region(np.array(mask), p.inpaint_full_res_padding)
            crop_region = masking.expand_crop_region(crop_region, p.width, p.height, mask.width, mask.height)

            input_image = [images.resize_image(resize_mode.int_value(), i, mask.width, mask.height) for i in input_image]
            input_image = [x.crop(crop_region) for x in input_image]
            input_image = [images.resize_image(external_code.ResizeMode.OUTER_FIT.int_value(), x, p.width, p.height) for x in input_image]
            input_image = [np.asarray(x)[:, :, 0] for x in input_image]
            input_image = np.stack(input_image, axis=2)

        return input_image

    def get_input_data(self, p: StableDiffusionProcessing, unit: ControlNetUnit, preprocessor: "Preprocessor", h: int, w: int):
        resize_mode = external_code.resize_mode_from_value(unit.resize_mode)
        image_list = []

        assert unit.use_preview_as_input is False

        a1111_i2i_image = getattr(p, "init_images", [None])[0]
        a1111_i2i_mask = getattr(p, "image_mask", None)

        if a1111_i2i_mask is not None and getattr(p, "inpainting_mask_invert", False):
            a1111_i2i_mask = ImageOps.invert(a1111_i2i_mask)

        using_a1111_data = False

        if unit.image is None:
            if isinstance(p, StableDiffusionProcessingImg2Img):
                resize_mode = external_code.resize_mode_from_value(p.resize_mode)
                image = HWC3(np.asarray(a1111_i2i_image))
                using_a1111_data = True
            else:
                image = None
        elif (unit.image["image"] < 5).all() and (unit.image["mask"] > 5).any():
            image = unit.image["mask"]
        else:
            image = unit.image["image"]

        if not isinstance(image, np.ndarray):
            logger.error("ControlNet is enabled but no input image is given...")
            raise ValueError

        image = HWC3(image)

        if using_a1111_data:
            mask = None if a1111_i2i_mask is None else HWC3(np.asarray(a1111_i2i_mask))
        elif unit.mask_image is not None and (unit.mask_image["image"] > 5).any():
            mask = unit.mask_image["image"]
        elif unit.mask_image is not None and (unit.mask_image["mask"] > 5).any():
            mask = unit.mask_image["mask"]
        elif unit.image is not None and (unit.image["mask"] > 5).any():
            mask = unit.image["mask"]
        else:
            mask = None

        image = self.try_crop_image_with_a1111_mask(p, image, resize_mode, preprocessor)

        if mask is not None:
            mask = cv2.resize(
                HWC3(mask),
                (image.shape[1], image.shape[0]),
                interpolation=cv2.INTER_NEAREST,
            )
            mask = self.try_crop_image_with_a1111_mask(p, mask, resize_mode, preprocessor)

        image_list = [[image, mask]]

        if resize_mode == external_code.ResizeMode.OUTER_FIT and preprocessor.expand_mask_when_resize_and_fill:
            new_image_list = []
            for input_image, input_mask in image_list:
                if input_mask is None:
                    input_mask = np.zeros_like(input_image)
                input_mask = crop_and_resize_image(
                    input_mask,
                    external_code.ResizeMode.OUTER_FIT,
                    h,
                    w,
                    fill_border_with_255=True,
                )
                input_image = crop_and_resize_image(
                    input_image,
                    external_code.ResizeMode.OUTER_FIT,
                    h,
                    w,
                    fill_border_with_255=False,
                )
                new_image_list.append((input_image, input_mask))
            image_list = new_image_list

        return image_list, resize_mode

    @staticmethod
    def get_target_dimensions(p: StableDiffusionProcessing) -> tuple[int, int, int, int]:
        """Returns (h, w, hr_h, hr_w)."""
        h = align_dim_latent(p.height)
        w = align_dim_latent(p.width)

        high_res_fix = getattr(p, "enable_hr", False) and isinstance(p, StableDiffusionProcessingTxt2Img)

        if high_res_fix:
            if p.hr_resize_x == 0 and p.hr_resize_y == 0:
                hr_y = int(p.height * p.hr_scale)
                hr_x = int(p.width * p.hr_scale)
            else:
                hr_y, hr_x = p.hr_resize_y, p.hr_resize_x
            hr_y = align_dim_latent(hr_y)
            hr_x = align_dim_latent(hr_x)
        else:
            hr_y = h
            hr_x = w

        return h, w, hr_y, hr_x

    @torch.no_grad()
    def process_unit_after_click_generate(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs) -> bool:

        h, w, hr_y, hr_x = self.get_target_dimensions(p)

        has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, "enable_hr", False)

        if unit.use_preview_as_input:
            unit.module = "None"

        preprocessor = global_state.get_preprocessor(unit.module)

        try:
            input_list, resize_mode = self.get_input_data(p, unit, preprocessor, h, w)
        except ValueError:
            return False

        preprocessor_outputs = []
        control_masks = []
        preprocessor_output_is_image = False
        preprocessor_output = None

        def optional_tqdm(iterable, use_tqdm):
            from tqdm import tqdm

            return tqdm(iterable) if use_tqdm else iterable

        for input_image, input_mask in optional_tqdm(input_list, len(input_list) > 1):
            if unit.pixel_perfect:
                unit.processor_res = external_code.pixel_perfect_resolution(
                    input_image,
                    target_H=h,
                    target_W=w,
                    resize_mode=resize_mode,
                )

            seed = set_numpy_seed(p)
            logger.debug(f"Use numpy seed {seed}.")
            logger.info(f"Using preprocessor: {unit.module}")
            logger.info(f"preprocessor resolution = {unit.processor_res}")

            preprocessor_output = preprocessor(
                input_image=input_image,
                input_mask=input_mask,
                resolution=unit.processor_res,
                slider_1=unit.threshold_a,
                slider_2=unit.threshold_b,
            )

            preprocessor_outputs.append(preprocessor_output)
            preprocessor_output_is_image = judge_image_type(preprocessor_output)

            if input_mask is not None:
                control_masks.append(input_mask)

            if len(input_list) > 1 and not preprocessor_output_is_image:
                logger.info("Batch wise input only support controlnet, control-lora, and t2i adapters!")
                break

        if has_high_res_fix:
            hr_option = HiResFixOption.from_value(unit.hr_option)
        else:
            hr_option = HiResFixOption.BOTH

        alignment_indices = [i % len(preprocessor_outputs) for i in range(p.batch_size)]

        def attach_extra_result_image(img: np.ndarray, is_high_res: bool = False):
            if not shared.opts.data.get("control_net_no_detectmap", False) and ((is_high_res and hr_option.high_res_enabled) or (not is_high_res and hr_option.low_res_enabled)) and unit.save_detected_map:
                p.extra_result_images.append(img)

        if preprocessor_output_is_image:
            params.control_cond = []
            params.control_cond_for_hr_fix = []

            for preprocessor_output in preprocessor_outputs:
                control_cond = crop_and_resize_image(preprocessor_output, resize_mode, h, w)
                attach_extra_result_image(external_code.visualize_inpaint_mask(control_cond))
                params.control_cond.append(numpy_to_pytorch(control_cond).movedim(-1, 1))

            params.control_cond = torch.cat(params.control_cond, dim=0)[alignment_indices].contiguous()

            if has_high_res_fix:
                for preprocessor_output in preprocessor_outputs:
                    control_cond_for_hr_fix = crop_and_resize_image(preprocessor_output, resize_mode, hr_y, hr_x)
                    attach_extra_result_image(
                        external_code.visualize_inpaint_mask(control_cond_for_hr_fix),
                        is_high_res=True,
                    )
                    params.control_cond_for_hr_fix.append(numpy_to_pytorch(control_cond_for_hr_fix).movedim(-1, 1))
                params.control_cond_for_hr_fix = torch.cat(params.control_cond_for_hr_fix, dim=0)[alignment_indices].contiguous()
            else:
                params.control_cond_for_hr_fix = params.control_cond
        else:
            params.control_cond = preprocessor_output
            params.control_cond_for_hr_fix = preprocessor_output
            attach_extra_result_image(input_image)

        if len(control_masks) > 0:
            params.control_mask = []
            params.control_mask_for_hr_fix = []

            for input_mask in control_masks:
                fill_border = preprocessor.fill_mask_with_one_when_resize_and_fill
                control_mask = crop_and_resize_image(input_mask, resize_mode, h, w, fill_border)
                attach_extra_result_image(control_mask)
                control_mask = numpy_to_pytorch(control_mask).movedim(-1, 1)[:, :1]
                params.control_mask.append(control_mask)

                if has_high_res_fix:
                    control_mask_for_hr_fix = crop_and_resize_image(input_mask, resize_mode, hr_y, hr_x, fill_border)
                    attach_extra_result_image(control_mask_for_hr_fix, is_high_res=True)
                    control_mask_for_hr_fix = numpy_to_pytorch(control_mask_for_hr_fix).movedim(-1, 1)[:, :1]
                    params.control_mask_for_hr_fix.append(control_mask_for_hr_fix)

            params.control_mask = torch.cat(params.control_mask, dim=0)[alignment_indices].contiguous()
            if has_high_res_fix:
                params.control_mask_for_hr_fix = torch.cat(params.control_mask_for_hr_fix, dim=0)[alignment_indices].contiguous()
            else:
                params.control_mask_for_hr_fix = params.control_mask

        if preprocessor.do_not_need_model:
            model_filename = "Not Needed"
            params.model = ControlModelPatcher()
        else:
            if unit.model == "None":
                logger.error("You have not selected any control model!")
                return False
            model_filename = global_state.get_controlnet_filename(unit.model)
            params.model = cached_controlnet_loader(model_filename)
            if params.model is None:
                logger.error(f"Failed to recognize {model_filename}...")
                return False

        params.preprocessor = preprocessor

        params.preprocessor.process_after_running_preprocessors(process=p, params=params, **kwargs)
        params.model.process_after_running_preprocessors(process=p, params=params, **kwargs)

        logger.info(f"{type(params.model).__name__}: {model_filename}")
        return True

    @torch.no_grad()
    def process_unit_before_every_sampling(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs):

        is_hr_pass = getattr(p, "is_hr_pass", False)

        has_high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, "enable_hr", False)

        if has_high_res_fix:
            hr_option = HiResFixOption.from_value(unit.hr_option)
        else:
            hr_option = HiResFixOption.BOTH

        if has_high_res_fix and is_hr_pass and (not hr_option.high_res_enabled):
            logger.info("ControlNet Skipped High-res pass.")
            return

        if has_high_res_fix and (not is_hr_pass) and (not hr_option.low_res_enabled):
            logger.info("ControlNet Skipped Low-res pass.")
            return

        if is_hr_pass:
            cond = params.control_cond_for_hr_fix
            mask = params.control_mask_for_hr_fix
        else:
            cond = params.control_cond
            mask = params.control_mask

        kwargs.update(
            dict(
                unit=unit,
                params=params,
                cond_original=cond.clone() if isinstance(cond, torch.Tensor) else cond,
                mask_original=mask.clone() if isinstance(mask, torch.Tensor) else mask,
            )
        )

        params.model.strength = float(unit.weight)
        params.model.start_percent = float(unit.guidance_start)
        params.model.end_percent = float(unit.guidance_end)
        params.model.positive_advanced_weighting = None
        params.model.negative_advanced_weighting = None
        params.model.advanced_frame_weighting = None
        params.model.advanced_sigma_weighting = None

        soft_weighting = {
            "input": [
                0.09941396206337118,
                0.12050177219802567,
                0.14606275417942507,
                0.17704576264172736,
                0.214600924414215,
                0.26012233262329093,
                0.3152997971191405,
                0.3821815722656249,
                0.4632503906249999,
                0.561515625,
                0.6806249999999999,
                0.825,
            ],
            "middle": [0.561515625] if p.sd_model.is_sdxl else [1.0],
            "output": [
                0.09941396206337118,
                0.12050177219802567,
                0.14606275417942507,
                0.17704576264172736,
                0.214600924414215,
                0.26012233262329093,
                0.3152997971191405,
                0.3821815722656249,
                0.4632503906249999,
                0.561515625,
                0.6806249999999999,
                0.825,
            ],
        }

        zero_weighting = {"input": [0.0] * 12, "middle": [0.0], "output": [0.0] * 12}

        if unit.control_mode == external_code.ControlMode.CONTROL.value:
            params.model.positive_advanced_weighting = soft_weighting.copy()
            params.model.negative_advanced_weighting = zero_weighting.copy()

        if unit.control_mode == external_code.ControlMode.PROMPT.value:
            params.model.positive_advanced_weighting = soft_weighting.copy()
            params.model.negative_advanced_weighting = soft_weighting.copy()

        if is_hr_pass and params.preprocessor.use_soft_projection_in_hr_fix:
            params.model.positive_advanced_weighting = soft_weighting.copy()
            params.model.negative_advanced_weighting = soft_weighting.copy()

        cond, mask = params.preprocessor.process_before_every_sampling(p, cond, mask, *args, **kwargs)

        params.model.advanced_mask_weighting = mask

        params.model.process_before_every_sampling(p, cond, mask, *args, **kwargs)

        logger.info(f"ControlNet Method {params.preprocessor.name} patched.")

    @staticmethod
    def bound_check_params(unit: ControlNetUnit) -> None:
        """
        Checks and corrects negative parameters in ControlNetUnit 'unit'.
        Parameters 'processor_res', 'threshold_a', 'threshold_b' are reset to
        their default values if negative.

        Args:
            unit (ControlNetUnit): The ControlNetUnit instance to check.
        """
        preprocessor = global_state.get_preprocessor(unit.module)

        if unit.processor_res < 0:
            unit.processor_res = int(preprocessor.slider_resolution.gradio_update_kwargs.get("value", 512))
        if unit.threshold_a < 0:
            unit.threshold_a = int(preprocessor.slider_1.gradio_update_kwargs.get("value", 1.0))
        if unit.threshold_b < 0:
            unit.threshold_b = int(preprocessor.slider_2.gradio_update_kwargs.get("value", 1.0))

    @torch.no_grad()
    def process_unit_after_every_sampling(self, p: StableDiffusionProcessing, unit: ControlNetUnit, params: ControlNetCachedParameters, *args, **kwargs):
        params.preprocessor.process_after_every_sampling(p, params, *args, **kwargs)
        params.model.process_after_every_sampling(p, params, *args, **kwargs)

    @torch.no_grad()
    def process(self, p, *args, **kwargs):
        self.current_params = {}
        enabled_units = self.get_enabled_units(args)
        Infotext.write_infotext(enabled_units, p)
        for i, unit in enumerate(enabled_units):
            self.bound_check_params(unit)
            params = ControlNetCachedParameters()
            if self.process_unit_after_click_generate(p, unit, params, *args, **kwargs):
                self.current_params[i] = params

    @torch.no_grad()
    def process_before_every_sampling(self, p, *args, **kwargs):
        for i, unit in enumerate(self.get_enabled_units(args)):
            if i not in self.current_params:
                logger.warning(f"ControlNet Unit {i + 1} is skipped...")
                continue
            self.process_unit_before_every_sampling(p, unit, self.current_params[i], *args, **kwargs)

    @torch.no_grad()
    def postprocess_batch_list(self, p, pp, *args, **kwargs):
        for i, unit in enumerate(self.get_enabled_units(args)):
            if i in self.current_params:
                self.process_unit_after_every_sampling(p, unit, self.current_params[i], pp, *args, **kwargs)

    def postprocess(self, *args):
        self.current_params = {}


def on_ui_settings():
    section = ("control_net", "ControlNet")
    category_id = "sd"

    shared.opts.add_option(
        "control_net_models_path",
        shared.OptionInfo(
            "",
            "Extra Path to look for ControlNet Models",
            section=section,
            category_id=category_id,
        ).info("e.g. training output directory"),
    )
    shared.opts.add_option(
        "control_net_unit_count",
        shared.OptionInfo(
            3,
            "Number of ControlNet Units",
            gr.Slider,
            {"minimum": 1, "maximum": 5, "step": 1},
            section=section,
            category_id=category_id,
        ).needs_reload_ui(),
    )
    shared.opts.add_option(
        "control_net_model_cache_size",
        shared.OptionInfo(
            3,
            "Number of Models to Cache in Memory",
            gr.Slider,
            {"minimum": 0, "maximum": 10, "step": 1},
            section=section,
            category_id=category_id,
        ).needs_reload_ui(),
    )
    shared.opts.add_option(
        "control_net_sync_field_args",
        shared.OptionInfo(
            True,
            "Read ControlNet parameters from Infotext",
            section=section,
            category_id=category_id,
        ).needs_reload_ui(),
    )
    shared.opts.add_option(
        "control_net_no_detectmap",
        shared.OptionInfo(
            False,
            "Do not append detectmap to output",
            section=section,
            category_id=category_id,
        ),
    )


script_callbacks.on_ui_settings(on_ui_settings)
script_callbacks.on_infotext_pasted(Infotext.on_infotext_pasted)
script_callbacks.on_after_component(ControlNetUiGroup.on_after_component)
script_callbacks.on_before_reload(ControlNetUiGroup.reset)

if shared.cmd_opts.api:
    script_callbacks.on_app_started(controlnet_api)