File size: 1,560 Bytes
67ea22c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import torch


from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from diffusers.models.controlnets.controlnet import ControlNetModel
from diffusers.pipelines.controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline
from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler

from transformers import CLIPTextModel, CLIPTokenizer

def prep_control_image(cond_values: torch.Tensor, device: torch.device) -> torch.Tensor:
    x = cond_values
    if x.min() < 0:
        x = (x * 0.5 + 0.5).clamp(0, 1)
    x = x.to(device=device, dtype=torch.float32)
    return x

def build_controlnet_pipe(

    base_model_name: str,

    controlnet: ControlNetModel,

    vae: AutoencoderKL,

    unet: UNet2DConditionModel,

    text_encoder: CLIPTextModel,

    tokenizer: CLIPTokenizer,

    device: torch.device,

    weight_dtype: torch.dtype,

    use_unipc: bool = True,



) -> StableDiffusionControlNetPipeline:
    
    pipe = StableDiffusionControlNetPipeline.from_pretrained(
        base_model_name,
        vae=vae,
        text_encoder=text_encoder,
        tokenizer=tokenizer,
        unet=unet,
        controlnet=controlnet,
        safety_checker=None,
        torch_dtype=weight_dtype,
    )

    if use_unipc:
        pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)

    pipe = pipe.to(device)
    pipe.set_progress_bar_config(disable=True)
    return pipe