File size: 3,966 Bytes
2cbdeaf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import fnmatch
from contextlib import contextmanager

from diffusers.models.attention import BasicTransformerBlock, JointTransformerBlock
from diffusers.models.transformers.pixart_transformer_2d import PixArtTransformer2DModel
from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel
from diffusers.models.unets.unet_2d_blocks import (
    CrossAttnDownBlock2D,
    CrossAttnUpBlock2D,
    DownBlock2D,
    UNetMidBlock2DCrossAttn,
    UpBlock2D,
)
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from diffusers.models.unets.unet_3d_blocks import (
    CrossAttnDownBlockSpatioTemporal,
    CrossAttnUpBlockSpatioTemporal,
    DownBlockSpatioTemporal,
    UNetMidBlockSpatioTemporal,
    UpBlockSpatioTemporal,
)
from diffusers.models.unets.unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel

from .module import CachedModule
from .utils import replace_module

CACHED_PIPE = {
    UNet2DConditionModel: (
        DownBlock2D,
        CrossAttnDownBlock2D,
        UNetMidBlock2DCrossAttn,
        CrossAttnUpBlock2D,
        UpBlock2D,
    ),
    PixArtTransformer2DModel: (BasicTransformerBlock),
    UNetSpatioTemporalConditionModel: (
        CrossAttnDownBlockSpatioTemporal,
        DownBlockSpatioTemporal,
        UpBlockSpatioTemporal,
        CrossAttnUpBlockSpatioTemporal,
        UNetMidBlockSpatioTemporal,
    ),
    SD3Transformer2DModel: (JointTransformerBlock),
}


def _apply_to_modules(model, action, modules=None, config_list=None):
    if hasattr(model, "use_trt_infer") and model.use_trt_infer:
        for key, module in model.engines.items():
            if isinstance(module, CachedModule):
                action(module)
            elif config_list:
                for config in config_list:
                    if _pass(key, config["wildcard_or_filter_func"]):
                        model.engines[key] = CachedModule(module, config["select_cache_step_func"])
    else:
        for name, module in model.named_modules():
            if isinstance(module, CachedModule):
                action(module)
            elif modules and config_list:
                for config in config_list:
                    if _pass(name, config["wildcard_or_filter_func"]) and isinstance(
                        module, modules
                    ):
                        replace_module(
                            model,
                            name,
                            CachedModule(module, config["select_cache_step_func"]),
                        )


def cachify(model, config_list, modules):
    def cache_action(module):
        pass  # No action needed, caching is handled in the loop itself

    _apply_to_modules(model, cache_action, modules, config_list)


def disable(pipe):
    model = get_model(pipe)
    _apply_to_modules(model, lambda module: module.disable_cache())


def enable(pipe):
    model = get_model(pipe)
    _apply_to_modules(model, lambda module: module.enable_cache())


def reset_status(pipe):
    model = get_model(pipe)
    _apply_to_modules(model, lambda module: setattr(module, "cur_step", 0))


def _pass(name, wildcard_or_filter_func):
    if isinstance(wildcard_or_filter_func, str):
        return fnmatch.fnmatch(name, wildcard_or_filter_func)
    elif callable(wildcard_or_filter_func):
        return wildcard_or_filter_func(name)
    else:
        raise NotImplementedError(f"Unsupported type {type(wildcard_or_filter_func)}")


def get_model(pipe):
    if hasattr(pipe, "unet"):
        return pipe.unet
    elif hasattr(pipe, "transformer"):
        return pipe.transformer
    else:
        raise KeyError


@contextmanager
def infer(pipe):
    try:
        yield pipe
    finally:
        reset_status(pipe)


def prepare(pipe, config_list):
    model = get_model(pipe)
    assert model.__class__ in CACHED_PIPE.keys(), f"{model.__class__} is not supported!"
    cachify(model, config_list, CACHED_PIPE[model.__class__])