Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,999 Bytes
9ab8b5f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
from .fbcache_nodes import ApplyFBCacheOnModel
from .misc_nodes import (
EnhancedLoadDiffusionModel,
EnhancedCompileModel,
)
from .velocator_nodes import (
VelocatorCompileModel,
VelocatorLoadAndQuantizeClip,
VelocatorLoadAndQuantizeDiffusionModel,
VelocatorQuantizeModel,
)
# def patch_cast_to():
# def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False):
# if device is None or weight.device == device:
# if not copy:
# if dtype is None or weight.dtype == dtype:
# return weight
# return weight.to(dtype=dtype, copy=copy)
#
# # torch.empty_like does not work with tensor subclasses well
# # r = torch.empty_like(weight, dtype=dtype, device=device)
# # r.copy_(weight, non_blocking=non_blocking)
# r = weight.to(device=device, dtype=dtype, non_blocking=non_blocking, copy=copy)
# return r
#
# from comfy import model_management
#
# model_management.cast_to = cast_to
#
#
# patch_cast_to()
NODE_CLASS_MAPPINGS = {
"ApplyFBCacheOnModel": ApplyFBCacheOnModel,
"EnhancedLoadDiffusionModel": EnhancedLoadDiffusionModel,
"EnhancedCompileModel": EnhancedCompileModel,
"VelocatorLoadAndQuantizeDiffusionModel": VelocatorLoadAndQuantizeDiffusionModel,
"VelocatorLoadAndQuantizeClip": VelocatorLoadAndQuantizeClip,
"VelocatorQuantizeModel": VelocatorQuantizeModel,
"VelocatorCompileModel": VelocatorCompileModel,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"ApplyFBCacheOnModel": "Apply First Block Cache",
"EnhancedLoadDiffusionModel": "Load Diffusion Model+",
"EnhancedCompileModel": "Compile Model+",
"VelocatorLoadAndQuantizeDiffusionModel": "🚀Load & Quantize Diffusion Model",
"VelocatorLoadAndQuantizeClip": "🚀Load & Quantize CLIP",
"VelocatorQuantizeModel": "🚀Quantize Model",
"VelocatorCompileModel": "🚀Compile Model",
}
|