YAML Metadata Warning: empty or missing yaml metadata in repo card

Check out the documentation for more information.

How to make this


import torch

from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler

from animatediff.models.unet import UNet3DConditionModel
from animatediff.models.sparse_controlnet import SparseControlNetModel
from animatediff.pipelines.pipeline_animation import AnimationPipeline
from animatediff.utils.util import load_weights

sdpipe = StableDiffusionPipeline.from_single_file(pretrained_model_path, use_safetensors=True, add_watermarker=False).to(dtype=torch.float16)
sdpipe.load_lora_weights(lora_model_path)
sdpipe.fuse_lora(lora_scale=0.3)

text_encoder = sdpipe.text_encoder.cuda()
vae          = sdpipe.vae.cuda()
tokenizer    = sdpipe.tokenizer

unet_additional_kwargs = params["unet_additional_kwargs"]
controlnet_additional_kwargs = params["controlnet_additional_kwargs"]

unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_config=sdpipe.unet.config, unet_additional_kwargs=unet_additional_kwargs).cuda()
unet.config.num_attention_heads = 8
unet.config.projection_class_embeddings_input_dim = None
unet.to(dtype=torch.float16)

controlnet = SparseControlNetModel.from_unet(unet, controlnet_additional_kwargs=controlnet_additional_kwargs)
controlnet_path = "models/motion_module/v3_sd15_sparsectrl_rgb.ckpt"

print(f"loading controlnet checkpoint from {controlnet_path} ...")
controlnet_state_dict = torch.load(controlnet_path, map_location="cpu")
controlnet_state_dict = controlnet_state_dict["controlnet"] if "controlnet" in controlnet_state_dict else controlnet_state_dict
controlnet_state_dict = {name: param for name, param in controlnet_state_dict.items() if "pos_encoder.pe" not in name}
controlnet_state_dict.pop("animatediff_config", "")
controlnet.load_state_dict(controlnet_state_dict)
controlnet.to(dtype=torch.float16)
controlnet.cuda()

pipe = load_weights(
    pipeline,
    # motion module
    motion_module_path         = "models/Motion_Module/v3_sd15_mm.ckpt", 
    motion_module_lora_configs = [],
    # domain adapter
    adapter_lora_path          = "models/Motion_Module/v3_sd15_adapter.ckpt",
    adapter_lora_scale         = 1.0,
    # image layers
    dreambooth_model_path      = pretrained_model_path, 
    lora_model_path            = "",
    lora_alpha                 = 0.8,
).to("cuda")
pipe.to(dtype=torch.float16)
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()

pipe.scheduler = DPMSolverMultistepScheduler(
    beta_start = 0.00075,
    beta_end = 0.0145,
    beta_schedule = "linear",
    use_karras_sigmas = True,
)

How to use this

controlnet_additional_kwargs = params["controlnet_additional_kwargs"]

pipe = AnimationPipeline.from_pretrained("models/animatediff_model")
unet = pipe.unet
vae = pipe.vae

unet.config.num_attention_heads = 8
unet.config.projection_class_embeddings_input_dim = None
unet.to(dtype=torch.float16)

controlnet = SparseControlNetModel.from_unet(unet, controlnet_additional_kwargs=controlnet_additional_kwargs)
controlnet_path = "./models/motion_module/v3_sd15_sparsectrl_rgb.ckpt"

print(f"loading controlnet checkpoint from {controlnet_path} ...")
controlnet_state_dict = torch.load(controlnet_path, map_location="cpu")
controlnet_state_dict = controlnet_state_dict["controlnet"] if "controlnet" in controlnet_state_dict else controlnet_state_dict
controlnet_state_dict = {name: param for name, param in controlnet_state_dict.items() if "pos_encoder.pe" not in name}
controlnet_state_dict.pop("animatediff_config", "")
controlnet.load_state_dict(controlnet_state_dict)
controlnet.to(dtype=torch.float16)
controlnet.cuda()

pipe.controlnet = controlnet

without_xformers = False
if is_xformers_available() and (not without_xformers):
    unet.enable_xformers_memory_efficient_attention()
    if controlnet is not None: 
        print("\nenable_xformers_memory_efficient_attention\n")
        controlnet.enable_xformers_memory_efficient_attention()

pipe.to(dtype=torch.float16)
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()
pipe.to("cuda")
Downloads last month
3
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support