Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- fastvideo/models/hunyuan/__pycache__/__init__.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/__pycache__/__init__.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/__pycache__/constants.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/__pycache__/constants.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/diffusion/__init__.py +3 -0
- fastvideo/models/hunyuan/diffusion/pipelines/__init__.py +2 -0
- fastvideo/models/hunyuan/diffusion/pipelines/pipeline_hunyuan_video.py +1010 -0
- fastvideo/models/hunyuan/diffusion/schedulers/__init__.py +2 -0
- fastvideo/models/hunyuan/diffusion/schedulers/scheduling_flow_match_discrete.py +248 -0
- fastvideo/models/hunyuan/modules/__init__.py +25 -0
- fastvideo/models/hunyuan/modules/__pycache__/activation_layers.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/activation_layers.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/attenion.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/embed_layers.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/mlp_layers.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/mlp_layers.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/modulate_layers.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/modulate_layers.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/norm_layers.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/posemb_layers.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/modules/__pycache__/token_refiner.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/modules/activation_layers.py +23 -0
- fastvideo/models/hunyuan/modules/attenion.py +90 -0
- fastvideo/models/hunyuan/modules/embed_layers.py +163 -0
- fastvideo/models/hunyuan/modules/models.py +750 -0
- fastvideo/models/hunyuan/modules/modulate_layers.py +156 -0
- fastvideo/models/hunyuan/modules/norm_layers.py +79 -0
- fastvideo/models/hunyuan/modules/posemb_layers.py +314 -0
- fastvideo/models/hunyuan/modules/token_refiner.py +230 -0
- fastvideo/models/hunyuan/text_encoder/__init__.py +353 -0
- fastvideo/models/hunyuan/text_encoder/__pycache__/__init__.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/text_encoder/__pycache__/__init__.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/utils/__init__.py +0 -0
- fastvideo/models/hunyuan/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/utils/__pycache__/__init__.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/utils/__pycache__/helpers.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/utils/__pycache__/helpers.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/utils/data_utils.py +14 -0
- fastvideo/models/hunyuan/utils/file_utils.py +75 -0
- fastvideo/models/hunyuan/utils/helpers.py +41 -0
- fastvideo/models/hunyuan/utils/preprocess_text_encoder_tokenizer_utils.py +41 -0
- fastvideo/models/hunyuan/vae/__init__.py +68 -0
- fastvideo/models/hunyuan/vae/__pycache__/__init__.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/__init__.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/autoencoder_kl_causal_3d.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/autoencoder_kl_causal_3d.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/unet_causal_3d_blocks.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/unet_causal_3d_blocks.cpython-312.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/vae.cpython-310.pyc +0 -0
- fastvideo/models/hunyuan/vae/__pycache__/vae.cpython-312.pyc +0 -0
fastvideo/models/hunyuan/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
fastvideo/models/hunyuan/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (232 Bytes). View file
|
|
|
fastvideo/models/hunyuan/__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (2.04 kB). View file
|
|
|
fastvideo/models/hunyuan/__pycache__/constants.cpython-312.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
fastvideo/models/hunyuan/diffusion/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ruff: noqa: F401
|
| 2 |
+
from .pipelines import HunyuanVideoPipeline
|
| 3 |
+
from .schedulers import FlowMatchDiscreteScheduler
|
fastvideo/models/hunyuan/diffusion/pipelines/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ruff: noqa: F401
|
| 2 |
+
from .pipeline_hunyuan_video import HunyuanVideoPipeline
|
fastvideo/models/hunyuan/diffusion/pipelines/pipeline_hunyuan_video.py
ADDED
|
@@ -0,0 +1,1010 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
#
|
| 16 |
+
# Modified from diffusers==0.29.2
|
| 17 |
+
#
|
| 18 |
+
# ==============================================================================
|
| 19 |
+
import inspect
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 22 |
+
|
| 23 |
+
import numpy as np
|
| 24 |
+
import torch
|
| 25 |
+
import torch.distributed as dist
|
| 26 |
+
import torch.nn.functional as F
|
| 27 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 28 |
+
from diffusers.configuration_utils import FrozenDict
|
| 29 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
| 31 |
+
from diffusers.models import AutoencoderKL
|
| 32 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 33 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 34 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 35 |
+
from diffusers.utils import (USE_PEFT_BACKEND, BaseOutput, deprecate, logging,
|
| 36 |
+
replace_example_docstring, scale_lora_layers)
|
| 37 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 38 |
+
from einops import rearrange
|
| 39 |
+
|
| 40 |
+
from fastvideo.utils.communications import all_gather
|
| 41 |
+
from fastvideo.utils.parallel_states import (get_sequence_parallel_state,
|
| 42 |
+
nccl_info)
|
| 43 |
+
|
| 44 |
+
from ...constants import PRECISION_TO_TYPE
|
| 45 |
+
from ...modules import HYVideoDiffusionTransformer
|
| 46 |
+
from ...text_encoder import TextEncoder
|
| 47 |
+
from ...vae.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 48 |
+
|
| 49 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 50 |
+
|
| 51 |
+
EXAMPLE_DOC_STRING = """"""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 55 |
+
"""
|
| 56 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 57 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 58 |
+
"""
|
| 59 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)),
|
| 60 |
+
keepdim=True)
|
| 61 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 62 |
+
# rescale the results from guidance (fixes overexposure)
|
| 63 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 64 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 65 |
+
noise_cfg = (guidance_rescale * noise_pred_rescaled +
|
| 66 |
+
(1 - guidance_rescale) * noise_cfg)
|
| 67 |
+
return noise_cfg
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def retrieve_timesteps(
|
| 71 |
+
scheduler,
|
| 72 |
+
num_inference_steps: Optional[int] = None,
|
| 73 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 74 |
+
timesteps: Optional[List[int]] = None,
|
| 75 |
+
sigmas: Optional[List[float]] = None,
|
| 76 |
+
**kwargs,
|
| 77 |
+
):
|
| 78 |
+
"""
|
| 79 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 80 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
scheduler (`SchedulerMixin`):
|
| 84 |
+
The scheduler to get timesteps from.
|
| 85 |
+
num_inference_steps (`int`):
|
| 86 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 87 |
+
must be `None`.
|
| 88 |
+
device (`str` or `torch.device`, *optional*):
|
| 89 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 90 |
+
timesteps (`List[int]`, *optional*):
|
| 91 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 92 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 93 |
+
sigmas (`List[float]`, *optional*):
|
| 94 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 95 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 99 |
+
second element is the number of inference steps.
|
| 100 |
+
"""
|
| 101 |
+
if timesteps is not None and sigmas is not None:
|
| 102 |
+
raise ValueError(
|
| 103 |
+
"Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values"
|
| 104 |
+
)
|
| 105 |
+
if timesteps is not None:
|
| 106 |
+
accepts_timesteps = "timesteps" in set(
|
| 107 |
+
inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 108 |
+
if not accepts_timesteps:
|
| 109 |
+
raise ValueError(
|
| 110 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 111 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 112 |
+
)
|
| 113 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 114 |
+
timesteps = scheduler.timesteps
|
| 115 |
+
num_inference_steps = len(timesteps)
|
| 116 |
+
elif sigmas is not None:
|
| 117 |
+
accept_sigmas = "sigmas" in set(
|
| 118 |
+
inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 119 |
+
if not accept_sigmas:
|
| 120 |
+
raise ValueError(
|
| 121 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 122 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 123 |
+
)
|
| 124 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 125 |
+
timesteps = scheduler.timesteps
|
| 126 |
+
num_inference_steps = len(timesteps)
|
| 127 |
+
else:
|
| 128 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 129 |
+
timesteps = scheduler.timesteps
|
| 130 |
+
return timesteps, num_inference_steps
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@dataclass
|
| 134 |
+
class HunyuanVideoPipelineOutput(BaseOutput):
|
| 135 |
+
videos: Union[torch.Tensor, np.ndarray]
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class HunyuanVideoPipeline(DiffusionPipeline):
|
| 139 |
+
r"""
|
| 140 |
+
Pipeline for text-to-video generation using HunyuanVideo.
|
| 141 |
+
|
| 142 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 143 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
vae ([`AutoencoderKL`]):
|
| 147 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 148 |
+
text_encoder ([`TextEncoder`]):
|
| 149 |
+
Frozen text-encoder.
|
| 150 |
+
text_encoder_2 ([`TextEncoder`]):
|
| 151 |
+
Frozen text-encoder_2.
|
| 152 |
+
transformer ([`HYVideoDiffusionTransformer`]):
|
| 153 |
+
A `HYVideoDiffusionTransformer` to denoise the encoded video latents.
|
| 154 |
+
scheduler ([`SchedulerMixin`]):
|
| 155 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
|
| 159 |
+
_optional_components = ["text_encoder_2"]
|
| 160 |
+
_exclude_from_cpu_offload = ["transformer"]
|
| 161 |
+
_callback_tensor_inputs = [
|
| 162 |
+
"latents", "prompt_embeds", "negative_prompt_embeds"
|
| 163 |
+
]
|
| 164 |
+
|
| 165 |
+
def __init__(
|
| 166 |
+
self,
|
| 167 |
+
vae: AutoencoderKL,
|
| 168 |
+
text_encoder: TextEncoder,
|
| 169 |
+
transformer: HYVideoDiffusionTransformer,
|
| 170 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 171 |
+
text_encoder_2: Optional[TextEncoder] = None,
|
| 172 |
+
progress_bar_config: Dict[str, Any] = None,
|
| 173 |
+
args=None,
|
| 174 |
+
):
|
| 175 |
+
super().__init__()
|
| 176 |
+
|
| 177 |
+
# ==========================================================================================
|
| 178 |
+
if progress_bar_config is None:
|
| 179 |
+
progress_bar_config = {}
|
| 180 |
+
if not hasattr(self, "_progress_bar_config"):
|
| 181 |
+
self._progress_bar_config = {}
|
| 182 |
+
self._progress_bar_config.update(progress_bar_config)
|
| 183 |
+
|
| 184 |
+
self.args = args
|
| 185 |
+
# ==========================================================================================
|
| 186 |
+
|
| 187 |
+
if (hasattr(scheduler.config, "steps_offset")
|
| 188 |
+
and scheduler.config.steps_offset != 1):
|
| 189 |
+
deprecation_message = (
|
| 190 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 191 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 192 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 193 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 194 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 195 |
+
" file")
|
| 196 |
+
deprecate("steps_offset!=1",
|
| 197 |
+
"1.0.0",
|
| 198 |
+
deprecation_message,
|
| 199 |
+
standard_warn=False)
|
| 200 |
+
new_config = dict(scheduler.config)
|
| 201 |
+
new_config["steps_offset"] = 1
|
| 202 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 203 |
+
|
| 204 |
+
if (hasattr(scheduler.config, "clip_sample")
|
| 205 |
+
and scheduler.config.clip_sample is True):
|
| 206 |
+
deprecation_message = (
|
| 207 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 208 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 209 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 210 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 211 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 212 |
+
)
|
| 213 |
+
deprecate("clip_sample not set",
|
| 214 |
+
"1.0.0",
|
| 215 |
+
deprecation_message,
|
| 216 |
+
standard_warn=False)
|
| 217 |
+
new_config = dict(scheduler.config)
|
| 218 |
+
new_config["clip_sample"] = False
|
| 219 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 220 |
+
|
| 221 |
+
self.register_modules(
|
| 222 |
+
vae=vae,
|
| 223 |
+
text_encoder=text_encoder,
|
| 224 |
+
transformer=transformer,
|
| 225 |
+
scheduler=scheduler,
|
| 226 |
+
text_encoder_2=text_encoder_2,
|
| 227 |
+
)
|
| 228 |
+
self.vae_scale_factor = 2**(len(self.vae.config.block_out_channels) -
|
| 229 |
+
1)
|
| 230 |
+
self.image_processor = VaeImageProcessor(
|
| 231 |
+
vae_scale_factor=self.vae_scale_factor)
|
| 232 |
+
|
| 233 |
+
def encode_prompt(
|
| 234 |
+
self,
|
| 235 |
+
prompt,
|
| 236 |
+
device,
|
| 237 |
+
num_videos_per_prompt,
|
| 238 |
+
do_classifier_free_guidance,
|
| 239 |
+
negative_prompt=None,
|
| 240 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 241 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 242 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 243 |
+
negative_attention_mask: Optional[torch.Tensor] = None,
|
| 244 |
+
lora_scale: Optional[float] = None,
|
| 245 |
+
clip_skip: Optional[int] = None,
|
| 246 |
+
text_encoder: Optional[TextEncoder] = None,
|
| 247 |
+
data_type: Optional[str] = "image",
|
| 248 |
+
):
|
| 249 |
+
r"""
|
| 250 |
+
Encodes the prompt into text encoder hidden states.
|
| 251 |
+
|
| 252 |
+
Args:
|
| 253 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 254 |
+
prompt to be encoded
|
| 255 |
+
device: (`torch.device`):
|
| 256 |
+
torch device
|
| 257 |
+
num_videos_per_prompt (`int`):
|
| 258 |
+
number of videos that should be generated per prompt
|
| 259 |
+
do_classifier_free_guidance (`bool`):
|
| 260 |
+
whether to use classifier free guidance or not
|
| 261 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 262 |
+
The prompt or prompts not to guide the video generation. If not defined, one has to pass
|
| 263 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 264 |
+
less than `1`).
|
| 265 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 266 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 267 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 268 |
+
attention_mask (`torch.Tensor`, *optional*):
|
| 269 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 270 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 271 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 272 |
+
argument.
|
| 273 |
+
negative_attention_mask (`torch.Tensor`, *optional*):
|
| 274 |
+
lora_scale (`float`, *optional*):
|
| 275 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 276 |
+
clip_skip (`int`, *optional*):
|
| 277 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 278 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 279 |
+
text_encoder (TextEncoder, *optional*):
|
| 280 |
+
data_type (`str`, *optional*):
|
| 281 |
+
"""
|
| 282 |
+
if text_encoder is None:
|
| 283 |
+
text_encoder = self.text_encoder
|
| 284 |
+
|
| 285 |
+
# set lora scale so that monkey patched LoRA
|
| 286 |
+
# function of text encoder can correctly access it
|
| 287 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 288 |
+
self._lora_scale = lora_scale
|
| 289 |
+
|
| 290 |
+
# dynamically adjust the LoRA scale
|
| 291 |
+
if not USE_PEFT_BACKEND:
|
| 292 |
+
adjust_lora_scale_text_encoder(text_encoder.model, lora_scale)
|
| 293 |
+
else:
|
| 294 |
+
scale_lora_layers(text_encoder.model, lora_scale)
|
| 295 |
+
|
| 296 |
+
if prompt_embeds is None:
|
| 297 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 298 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 299 |
+
prompt = self.maybe_convert_prompt(prompt,
|
| 300 |
+
text_encoder.tokenizer)
|
| 301 |
+
|
| 302 |
+
text_inputs = text_encoder.text2tokens(prompt, data_type=data_type)
|
| 303 |
+
if clip_skip is None:
|
| 304 |
+
prompt_outputs = text_encoder.encode(text_inputs,
|
| 305 |
+
data_type=data_type,
|
| 306 |
+
device=device)
|
| 307 |
+
prompt_embeds = prompt_outputs.hidden_state
|
| 308 |
+
else:
|
| 309 |
+
prompt_outputs = text_encoder.encode(
|
| 310 |
+
text_inputs,
|
| 311 |
+
output_hidden_states=True,
|
| 312 |
+
data_type=data_type,
|
| 313 |
+
device=device,
|
| 314 |
+
)
|
| 315 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 316 |
+
# all the hidden states from the encoder layers. Then index into
|
| 317 |
+
# the tuple to access the hidden states from the desired layer.
|
| 318 |
+
prompt_embeds = prompt_outputs.hidden_states_list[-(clip_skip +
|
| 319 |
+
1)]
|
| 320 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 321 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 322 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 323 |
+
# layer.
|
| 324 |
+
prompt_embeds = text_encoder.model.text_model.final_layer_norm(
|
| 325 |
+
prompt_embeds)
|
| 326 |
+
|
| 327 |
+
attention_mask = prompt_outputs.attention_mask
|
| 328 |
+
if attention_mask is not None:
|
| 329 |
+
attention_mask = attention_mask.to(device)
|
| 330 |
+
bs_embed, seq_len = attention_mask.shape
|
| 331 |
+
attention_mask = attention_mask.repeat(1,
|
| 332 |
+
num_videos_per_prompt)
|
| 333 |
+
attention_mask = attention_mask.view(
|
| 334 |
+
bs_embed * num_videos_per_prompt, seq_len)
|
| 335 |
+
|
| 336 |
+
if text_encoder is not None:
|
| 337 |
+
prompt_embeds_dtype = text_encoder.dtype
|
| 338 |
+
elif self.transformer is not None:
|
| 339 |
+
prompt_embeds_dtype = self.transformer.dtype
|
| 340 |
+
else:
|
| 341 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 342 |
+
|
| 343 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype,
|
| 344 |
+
device=device)
|
| 345 |
+
|
| 346 |
+
if prompt_embeds.ndim == 2:
|
| 347 |
+
bs_embed, _ = prompt_embeds.shape
|
| 348 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 349 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt)
|
| 350 |
+
prompt_embeds = prompt_embeds.view(
|
| 351 |
+
bs_embed * num_videos_per_prompt, -1)
|
| 352 |
+
else:
|
| 353 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 354 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 355 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
| 356 |
+
prompt_embeds = prompt_embeds.view(
|
| 357 |
+
bs_embed * num_videos_per_prompt, seq_len, -1)
|
| 358 |
+
|
| 359 |
+
return (
|
| 360 |
+
prompt_embeds,
|
| 361 |
+
negative_prompt_embeds,
|
| 362 |
+
attention_mask,
|
| 363 |
+
negative_attention_mask,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
def decode_latents(self, latents, enable_tiling=True):
|
| 367 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 368 |
+
deprecate("decode_latents",
|
| 369 |
+
"1.0.0",
|
| 370 |
+
deprecation_message,
|
| 371 |
+
standard_warn=False)
|
| 372 |
+
|
| 373 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 374 |
+
if enable_tiling:
|
| 375 |
+
self.vae.enable_tiling()
|
| 376 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 377 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 378 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 379 |
+
if image.ndim == 4:
|
| 380 |
+
image = image.cpu().permute(0, 2, 3, 1).float()
|
| 381 |
+
else:
|
| 382 |
+
image = image.cpu().float()
|
| 383 |
+
return image
|
| 384 |
+
|
| 385 |
+
def prepare_extra_func_kwargs(self, func, kwargs):
|
| 386 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 387 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 388 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 389 |
+
# and should be between [0, 1]
|
| 390 |
+
extra_step_kwargs = {}
|
| 391 |
+
|
| 392 |
+
for k, v in kwargs.items():
|
| 393 |
+
accepts = k in set(inspect.signature(func).parameters.keys())
|
| 394 |
+
if accepts:
|
| 395 |
+
extra_step_kwargs[k] = v
|
| 396 |
+
return extra_step_kwargs
|
| 397 |
+
|
| 398 |
+
def check_inputs(
|
| 399 |
+
self,
|
| 400 |
+
prompt,
|
| 401 |
+
height,
|
| 402 |
+
width,
|
| 403 |
+
video_length,
|
| 404 |
+
callback_steps,
|
| 405 |
+
negative_prompt=None,
|
| 406 |
+
prompt_embeds=None,
|
| 407 |
+
negative_prompt_embeds=None,
|
| 408 |
+
callback_on_step_end_tensor_inputs=None,
|
| 409 |
+
vae_ver="88-4c-sd",
|
| 410 |
+
):
|
| 411 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 412 |
+
raise ValueError(
|
| 413 |
+
f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
if video_length is not None:
|
| 417 |
+
if "884" in vae_ver:
|
| 418 |
+
if video_length != 1 and (video_length - 1) % 4 != 0:
|
| 419 |
+
raise ValueError(
|
| 420 |
+
f"`video_length` has to be 1 or a multiple of 4 but is {video_length}."
|
| 421 |
+
)
|
| 422 |
+
elif "888" in vae_ver:
|
| 423 |
+
if video_length != 1 and (video_length - 1) % 8 != 0:
|
| 424 |
+
raise ValueError(
|
| 425 |
+
f"`video_length` has to be 1 or a multiple of 8 but is {video_length}."
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
if callback_steps is not None and (not isinstance(callback_steps, int)
|
| 429 |
+
or callback_steps <= 0):
|
| 430 |
+
raise ValueError(
|
| 431 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 432 |
+
f" {type(callback_steps)}.")
|
| 433 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 434 |
+
k in self._callback_tensor_inputs
|
| 435 |
+
for k in callback_on_step_end_tensor_inputs):
|
| 436 |
+
raise ValueError(
|
| 437 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
if prompt is not None and prompt_embeds is not None:
|
| 441 |
+
raise ValueError(
|
| 442 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 443 |
+
" only forward one of the two.")
|
| 444 |
+
elif prompt is None and prompt_embeds is None:
|
| 445 |
+
raise ValueError(
|
| 446 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 447 |
+
)
|
| 448 |
+
elif prompt is not None and (not isinstance(prompt, str)
|
| 449 |
+
and not isinstance(prompt, list)):
|
| 450 |
+
raise ValueError(
|
| 451 |
+
f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 455 |
+
raise ValueError(
|
| 456 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 457 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 461 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 462 |
+
raise ValueError(
|
| 463 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 464 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 465 |
+
f" {negative_prompt_embeds.shape}.")
|
| 466 |
+
|
| 467 |
+
def prepare_latents(
|
| 468 |
+
self,
|
| 469 |
+
batch_size,
|
| 470 |
+
num_channels_latents,
|
| 471 |
+
height,
|
| 472 |
+
width,
|
| 473 |
+
video_length,
|
| 474 |
+
dtype,
|
| 475 |
+
device,
|
| 476 |
+
generator,
|
| 477 |
+
latents=None,
|
| 478 |
+
):
|
| 479 |
+
shape = (
|
| 480 |
+
batch_size,
|
| 481 |
+
num_channels_latents,
|
| 482 |
+
video_length,
|
| 483 |
+
int(height) // self.vae_scale_factor,
|
| 484 |
+
int(width) // self.vae_scale_factor,
|
| 485 |
+
)
|
| 486 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 487 |
+
raise ValueError(
|
| 488 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 489 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
if latents is None:
|
| 493 |
+
latents = randn_tensor(shape,
|
| 494 |
+
generator=generator,
|
| 495 |
+
device=device,
|
| 496 |
+
dtype=dtype)
|
| 497 |
+
else:
|
| 498 |
+
latents = latents.to(device)
|
| 499 |
+
|
| 500 |
+
# Check existence to make it compatible with FlowMatchEulerDiscreteScheduler
|
| 501 |
+
if hasattr(self.scheduler, "init_noise_sigma"):
|
| 502 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 503 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 504 |
+
return latents
|
| 505 |
+
|
| 506 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 507 |
+
def get_guidance_scale_embedding(
|
| 508 |
+
self,
|
| 509 |
+
w: torch.Tensor,
|
| 510 |
+
embedding_dim: int = 512,
|
| 511 |
+
dtype: torch.dtype = torch.float32,
|
| 512 |
+
) -> torch.Tensor:
|
| 513 |
+
"""
|
| 514 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
w (`torch.Tensor`):
|
| 518 |
+
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
|
| 519 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 520 |
+
Dimension of the embeddings to generate.
|
| 521 |
+
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
|
| 522 |
+
Data type of the generated embeddings.
|
| 523 |
+
|
| 524 |
+
Returns:
|
| 525 |
+
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
|
| 526 |
+
"""
|
| 527 |
+
assert len(w.shape) == 1
|
| 528 |
+
w = w * 1000.0
|
| 529 |
+
|
| 530 |
+
half_dim = embedding_dim // 2
|
| 531 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 532 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 533 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 534 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 535 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 536 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 537 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 538 |
+
return emb
|
| 539 |
+
|
| 540 |
+
@property
|
| 541 |
+
def guidance_scale(self):
|
| 542 |
+
return self._guidance_scale
|
| 543 |
+
|
| 544 |
+
@property
|
| 545 |
+
def guidance_rescale(self):
|
| 546 |
+
return self._guidance_rescale
|
| 547 |
+
|
| 548 |
+
@property
|
| 549 |
+
def clip_skip(self):
|
| 550 |
+
return self._clip_skip
|
| 551 |
+
|
| 552 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 553 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 554 |
+
# corresponds to doing no classifier free guidance.
|
| 555 |
+
@property
|
| 556 |
+
def do_classifier_free_guidance(self):
|
| 557 |
+
# return self._guidance_scale > 1 and self.transformer.config.time_cond_proj_dim is None
|
| 558 |
+
return self._guidance_scale > 1
|
| 559 |
+
|
| 560 |
+
@property
|
| 561 |
+
def cross_attention_kwargs(self):
|
| 562 |
+
return self._cross_attention_kwargs
|
| 563 |
+
|
| 564 |
+
@property
|
| 565 |
+
def num_timesteps(self):
|
| 566 |
+
return self._num_timesteps
|
| 567 |
+
|
| 568 |
+
@property
|
| 569 |
+
def interrupt(self):
|
| 570 |
+
return self._interrupt
|
| 571 |
+
|
| 572 |
+
@torch.no_grad()
|
| 573 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 574 |
+
def __call__(
|
| 575 |
+
self,
|
| 576 |
+
prompt: Union[str, List[str]],
|
| 577 |
+
height: int,
|
| 578 |
+
width: int,
|
| 579 |
+
video_length: int,
|
| 580 |
+
data_type: str = "video",
|
| 581 |
+
num_inference_steps: int = 50,
|
| 582 |
+
timesteps: List[int] = None,
|
| 583 |
+
sigmas: List[float] = None,
|
| 584 |
+
guidance_scale: float = 7.5,
|
| 585 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 586 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 587 |
+
eta: float = 0.0,
|
| 588 |
+
generator: Optional[Union[torch.Generator,
|
| 589 |
+
List[torch.Generator]]] = None,
|
| 590 |
+
latents: Optional[torch.Tensor] = None,
|
| 591 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 592 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 593 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 594 |
+
negative_attention_mask: Optional[torch.Tensor] = None,
|
| 595 |
+
output_type: Optional[str] = "pil",
|
| 596 |
+
return_dict: bool = True,
|
| 597 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 598 |
+
guidance_rescale: float = 0.0,
|
| 599 |
+
clip_skip: Optional[int] = None,
|
| 600 |
+
callback_on_step_end: Optional[Union[Callable[[int, int, Dict],
|
| 601 |
+
None], PipelineCallback,
|
| 602 |
+
MultiPipelineCallbacks, ]] = None,
|
| 603 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 604 |
+
vae_ver: str = "88-4c-sd",
|
| 605 |
+
enable_tiling: bool = False,
|
| 606 |
+
enable_vae_sp: bool = False,
|
| 607 |
+
n_tokens: Optional[int] = None,
|
| 608 |
+
embedded_guidance_scale: Optional[float] = None,
|
| 609 |
+
**kwargs,
|
| 610 |
+
):
|
| 611 |
+
r"""
|
| 612 |
+
The call function to the pipeline for generation.
|
| 613 |
+
|
| 614 |
+
Args:
|
| 615 |
+
prompt (`str` or `List[str]`):
|
| 616 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 617 |
+
height (`int`):
|
| 618 |
+
The height in pixels of the generated image.
|
| 619 |
+
width (`int`):
|
| 620 |
+
The width in pixels of the generated image.
|
| 621 |
+
video_length (`int`):
|
| 622 |
+
The number of frames in the generated video.
|
| 623 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 624 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 625 |
+
expense of slower inference.
|
| 626 |
+
timesteps (`List[int]`, *optional*):
|
| 627 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 628 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 629 |
+
passed will be used. Must be in descending order.
|
| 630 |
+
sigmas (`List[float]`, *optional*):
|
| 631 |
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
| 632 |
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
| 633 |
+
will be used.
|
| 634 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 635 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 636 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 637 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 638 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 639 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 640 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 641 |
+
The number of images to generate per prompt.
|
| 642 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 643 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 644 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 645 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 646 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 647 |
+
generation deterministic.
|
| 648 |
+
latents (`torch.Tensor`, *optional*):
|
| 649 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 650 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 651 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 652 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 653 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 654 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 655 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 656 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 657 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 658 |
+
|
| 659 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 660 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 661 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 662 |
+
Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a
|
| 663 |
+
plain tuple.
|
| 664 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 665 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 666 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 667 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 668 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 669 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 670 |
+
using zero terminal SNR.
|
| 671 |
+
clip_skip (`int`, *optional*):
|
| 672 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 673 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 674 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 675 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 676 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 677 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 678 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 679 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 680 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 681 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 682 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 683 |
+
|
| 684 |
+
Examples:
|
| 685 |
+
|
| 686 |
+
Returns:
|
| 687 |
+
[`~HunyuanVideoPipelineOutput`] or `tuple`:
|
| 688 |
+
If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned,
|
| 689 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 690 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 691 |
+
"not-safe-for-work" (nsfw) content.
|
| 692 |
+
"""
|
| 693 |
+
callback = kwargs.pop("callback", None)
|
| 694 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 695 |
+
|
| 696 |
+
if callback is not None:
|
| 697 |
+
deprecate(
|
| 698 |
+
"callback",
|
| 699 |
+
"1.0.0",
|
| 700 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 701 |
+
)
|
| 702 |
+
if callback_steps is not None:
|
| 703 |
+
deprecate(
|
| 704 |
+
"callback_steps",
|
| 705 |
+
"1.0.0",
|
| 706 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
if isinstance(callback_on_step_end,
|
| 710 |
+
(PipelineCallback, MultiPipelineCallbacks)):
|
| 711 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 712 |
+
|
| 713 |
+
# 0. Default height and width to unet
|
| 714 |
+
# height = height or self.transformer.config.sample_size * self.vae_scale_factor
|
| 715 |
+
# width = width or self.transformer.config.sample_size * self.vae_scale_factor
|
| 716 |
+
# to deal with lora scaling and other possible forward hooks
|
| 717 |
+
|
| 718 |
+
# 1. Check inputs. Raise error if not correct
|
| 719 |
+
self.check_inputs(
|
| 720 |
+
prompt,
|
| 721 |
+
height,
|
| 722 |
+
width,
|
| 723 |
+
video_length,
|
| 724 |
+
callback_steps,
|
| 725 |
+
negative_prompt,
|
| 726 |
+
prompt_embeds,
|
| 727 |
+
negative_prompt_embeds,
|
| 728 |
+
callback_on_step_end_tensor_inputs,
|
| 729 |
+
vae_ver=vae_ver,
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
self._guidance_scale = guidance_scale
|
| 733 |
+
self._guidance_rescale = guidance_rescale
|
| 734 |
+
self._clip_skip = clip_skip
|
| 735 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 736 |
+
self._interrupt = False
|
| 737 |
+
|
| 738 |
+
# 2. Define call parameters
|
| 739 |
+
if prompt is not None and isinstance(prompt, str):
|
| 740 |
+
batch_size = 1
|
| 741 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 742 |
+
batch_size = len(prompt)
|
| 743 |
+
else:
|
| 744 |
+
batch_size = prompt_embeds.shape[0]
|
| 745 |
+
|
| 746 |
+
device = (torch.device(f"cuda:{dist.get_rank()}")
|
| 747 |
+
if dist.is_initialized() else self._execution_device)
|
| 748 |
+
|
| 749 |
+
# 3. Encode input prompt
|
| 750 |
+
lora_scale = (self.cross_attention_kwargs.get("scale", None)
|
| 751 |
+
if self.cross_attention_kwargs is not None else None)
|
| 752 |
+
|
| 753 |
+
(
|
| 754 |
+
prompt_embeds,
|
| 755 |
+
negative_prompt_embeds,
|
| 756 |
+
prompt_mask,
|
| 757 |
+
negative_prompt_mask,
|
| 758 |
+
) = self.encode_prompt(
|
| 759 |
+
prompt,
|
| 760 |
+
device,
|
| 761 |
+
num_videos_per_prompt,
|
| 762 |
+
self.do_classifier_free_guidance,
|
| 763 |
+
negative_prompt,
|
| 764 |
+
prompt_embeds=prompt_embeds,
|
| 765 |
+
attention_mask=attention_mask,
|
| 766 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 767 |
+
negative_attention_mask=negative_attention_mask,
|
| 768 |
+
lora_scale=lora_scale,
|
| 769 |
+
clip_skip=self.clip_skip,
|
| 770 |
+
data_type=data_type,
|
| 771 |
+
)
|
| 772 |
+
if self.text_encoder_2 is not None:
|
| 773 |
+
(
|
| 774 |
+
prompt_embeds_2,
|
| 775 |
+
negative_prompt_embeds_2,
|
| 776 |
+
prompt_mask_2,
|
| 777 |
+
negative_prompt_mask_2,
|
| 778 |
+
) = self.encode_prompt(
|
| 779 |
+
prompt,
|
| 780 |
+
device,
|
| 781 |
+
num_videos_per_prompt,
|
| 782 |
+
self.do_classifier_free_guidance,
|
| 783 |
+
negative_prompt,
|
| 784 |
+
prompt_embeds=None,
|
| 785 |
+
attention_mask=None,
|
| 786 |
+
negative_prompt_embeds=None,
|
| 787 |
+
negative_attention_mask=None,
|
| 788 |
+
lora_scale=lora_scale,
|
| 789 |
+
clip_skip=self.clip_skip,
|
| 790 |
+
text_encoder=self.text_encoder_2,
|
| 791 |
+
data_type=data_type,
|
| 792 |
+
)
|
| 793 |
+
else:
|
| 794 |
+
prompt_embeds_2 = None
|
| 795 |
+
negative_prompt_embeds_2 = None
|
| 796 |
+
prompt_mask_2 = None
|
| 797 |
+
negative_prompt_mask_2 = None
|
| 798 |
+
|
| 799 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 800 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 801 |
+
# to avoid doing two forward passes
|
| 802 |
+
if self.do_classifier_free_guidance:
|
| 803 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 804 |
+
if prompt_mask is not None:
|
| 805 |
+
prompt_mask = torch.cat([negative_prompt_mask, prompt_mask])
|
| 806 |
+
if prompt_embeds_2 is not None:
|
| 807 |
+
prompt_embeds_2 = torch.cat(
|
| 808 |
+
[negative_prompt_embeds_2, prompt_embeds_2])
|
| 809 |
+
if prompt_mask_2 is not None:
|
| 810 |
+
prompt_mask_2 = torch.cat(
|
| 811 |
+
[negative_prompt_mask_2, prompt_mask_2])
|
| 812 |
+
|
| 813 |
+
# 4. Prepare timesteps
|
| 814 |
+
extra_set_timesteps_kwargs = self.prepare_extra_func_kwargs(
|
| 815 |
+
self.scheduler.set_timesteps, {"n_tokens": n_tokens})
|
| 816 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
| 817 |
+
self.scheduler,
|
| 818 |
+
num_inference_steps,
|
| 819 |
+
device,
|
| 820 |
+
timesteps,
|
| 821 |
+
sigmas,
|
| 822 |
+
**extra_set_timesteps_kwargs,
|
| 823 |
+
)
|
| 824 |
+
if "884" in vae_ver:
|
| 825 |
+
video_length = (video_length - 1) // 4 + 1
|
| 826 |
+
elif "888" in vae_ver:
|
| 827 |
+
video_length = (video_length - 1) // 8 + 1
|
| 828 |
+
else:
|
| 829 |
+
video_length = video_length
|
| 830 |
+
|
| 831 |
+
# 5. Prepare latent variables
|
| 832 |
+
num_channels_latents = self.transformer.config.in_channels
|
| 833 |
+
latents = self.prepare_latents(
|
| 834 |
+
batch_size * num_videos_per_prompt,
|
| 835 |
+
num_channels_latents,
|
| 836 |
+
height,
|
| 837 |
+
width,
|
| 838 |
+
video_length,
|
| 839 |
+
prompt_embeds.dtype,
|
| 840 |
+
device,
|
| 841 |
+
generator,
|
| 842 |
+
latents,
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
world_size, rank = nccl_info.sp_size, nccl_info.rank_within_group
|
| 846 |
+
if get_sequence_parallel_state():
|
| 847 |
+
latents = rearrange(latents,
|
| 848 |
+
"b t (n s) h w -> b t n s h w",
|
| 849 |
+
n=world_size).contiguous()
|
| 850 |
+
latents = latents[:, :, rank, :, :, :]
|
| 851 |
+
|
| 852 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 853 |
+
extra_step_kwargs = self.prepare_extra_func_kwargs(
|
| 854 |
+
self.scheduler.step,
|
| 855 |
+
{
|
| 856 |
+
"generator": generator,
|
| 857 |
+
"eta": eta
|
| 858 |
+
},
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
target_dtype = PRECISION_TO_TYPE[self.args.precision]
|
| 862 |
+
autocast_enabled = (target_dtype !=
|
| 863 |
+
torch.float32) and not self.args.disable_autocast
|
| 864 |
+
vae_dtype = PRECISION_TO_TYPE[self.args.vae_precision]
|
| 865 |
+
vae_autocast_enabled = (
|
| 866 |
+
vae_dtype != torch.float32) and not self.args.disable_autocast
|
| 867 |
+
|
| 868 |
+
# 7. Denoising loop
|
| 869 |
+
num_warmup_steps = len(
|
| 870 |
+
timesteps) - num_inference_steps * self.scheduler.order
|
| 871 |
+
self._num_timesteps = len(timesteps)
|
| 872 |
+
|
| 873 |
+
# if is_progress_bar:
|
| 874 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 875 |
+
for i, t in enumerate(timesteps):
|
| 876 |
+
if self.interrupt:
|
| 877 |
+
continue
|
| 878 |
+
|
| 879 |
+
# expand the latents if we are doing classifier free guidance
|
| 880 |
+
latent_model_input = (torch.cat(
|
| 881 |
+
[latents] *
|
| 882 |
+
2) if self.do_classifier_free_guidance else latents)
|
| 883 |
+
latent_model_input = self.scheduler.scale_model_input(
|
| 884 |
+
latent_model_input, t)
|
| 885 |
+
|
| 886 |
+
t_expand = t.repeat(latent_model_input.shape[0])
|
| 887 |
+
guidance_expand = (torch.tensor(
|
| 888 |
+
[embedded_guidance_scale] * latent_model_input.shape[0],
|
| 889 |
+
dtype=torch.float32,
|
| 890 |
+
device=device,
|
| 891 |
+
).to(target_dtype) * 1000.0 if embedded_guidance_scale
|
| 892 |
+
is not None else None)
|
| 893 |
+
# predict the noise residual
|
| 894 |
+
with torch.autocast(device_type="cuda",
|
| 895 |
+
dtype=target_dtype,
|
| 896 |
+
enabled=autocast_enabled):
|
| 897 |
+
# concat prompt_embeds_2 and prompt_embeds. Mismatch fill with zeros
|
| 898 |
+
if prompt_embeds_2.shape[-1] != prompt_embeds.shape[-1]:
|
| 899 |
+
prompt_embeds_2 = F.pad(
|
| 900 |
+
prompt_embeds_2,
|
| 901 |
+
(0, prompt_embeds.shape[2] -
|
| 902 |
+
prompt_embeds_2.shape[1]),
|
| 903 |
+
value=0,
|
| 904 |
+
).unsqueeze(1)
|
| 905 |
+
encoder_hidden_states = torch.cat(
|
| 906 |
+
[prompt_embeds_2, prompt_embeds], dim=1)
|
| 907 |
+
noise_pred = self.transformer( # For an input image (129, 192, 336) (1, 256, 256)
|
| 908 |
+
latent_model_input, # [2, 16, 33, 24, 42]
|
| 909 |
+
encoder_hidden_states,
|
| 910 |
+
t_expand, # [2]
|
| 911 |
+
prompt_mask, # [2, 256]fpdb
|
| 912 |
+
guidance=guidance_expand,
|
| 913 |
+
return_dict=False,
|
| 914 |
+
)[0]
|
| 915 |
+
|
| 916 |
+
# perform guidance
|
| 917 |
+
if self.do_classifier_free_guidance:
|
| 918 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 919 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (
|
| 920 |
+
noise_pred_text - noise_pred_uncond)
|
| 921 |
+
|
| 922 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 923 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 924 |
+
noise_pred = rescale_noise_cfg(
|
| 925 |
+
noise_pred,
|
| 926 |
+
noise_pred_text,
|
| 927 |
+
guidance_rescale=self.guidance_rescale,
|
| 928 |
+
)
|
| 929 |
+
|
| 930 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 931 |
+
latents = self.scheduler.step(noise_pred,
|
| 932 |
+
t,
|
| 933 |
+
latents,
|
| 934 |
+
**extra_step_kwargs,
|
| 935 |
+
return_dict=False)[0]
|
| 936 |
+
|
| 937 |
+
if callback_on_step_end is not None:
|
| 938 |
+
callback_kwargs = {}
|
| 939 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 940 |
+
callback_kwargs[k] = locals()[k]
|
| 941 |
+
callback_outputs = callback_on_step_end(
|
| 942 |
+
self, i, t, callback_kwargs)
|
| 943 |
+
|
| 944 |
+
latents = callback_outputs.pop("latents", latents)
|
| 945 |
+
prompt_embeds = callback_outputs.pop(
|
| 946 |
+
"prompt_embeds", prompt_embeds)
|
| 947 |
+
negative_prompt_embeds = callback_outputs.pop(
|
| 948 |
+
"negative_prompt_embeds", negative_prompt_embeds)
|
| 949 |
+
|
| 950 |
+
# call the callback, if provided
|
| 951 |
+
if i == len(timesteps) - 1 or (
|
| 952 |
+
(i + 1) > num_warmup_steps and
|
| 953 |
+
(i + 1) % self.scheduler.order == 0):
|
| 954 |
+
if progress_bar is not None:
|
| 955 |
+
progress_bar.update()
|
| 956 |
+
if callback is not None and i % callback_steps == 0:
|
| 957 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 958 |
+
callback(step_idx, t, latents)
|
| 959 |
+
|
| 960 |
+
if get_sequence_parallel_state():
|
| 961 |
+
latents = all_gather(latents, dim=2)
|
| 962 |
+
|
| 963 |
+
if not output_type == "latent":
|
| 964 |
+
expand_temporal_dim = False
|
| 965 |
+
if len(latents.shape) == 4:
|
| 966 |
+
if isinstance(self.vae, AutoencoderKLCausal3D):
|
| 967 |
+
latents = latents.unsqueeze(2)
|
| 968 |
+
expand_temporal_dim = True
|
| 969 |
+
elif len(latents.shape) == 5:
|
| 970 |
+
pass
|
| 971 |
+
else:
|
| 972 |
+
raise ValueError(
|
| 973 |
+
f"Only support latents with shape (b, c, h, w) or (b, c, f, h, w), but got {latents.shape}."
|
| 974 |
+
)
|
| 975 |
+
|
| 976 |
+
if (hasattr(self.vae.config, "shift_factor")
|
| 977 |
+
and self.vae.config.shift_factor):
|
| 978 |
+
latents = (latents / self.vae.config.scaling_factor +
|
| 979 |
+
self.vae.config.shift_factor)
|
| 980 |
+
else:
|
| 981 |
+
latents = latents / self.vae.config.scaling_factor
|
| 982 |
+
|
| 983 |
+
with torch.autocast(device_type="cuda",
|
| 984 |
+
dtype=vae_dtype,
|
| 985 |
+
enabled=vae_autocast_enabled):
|
| 986 |
+
if enable_tiling:
|
| 987 |
+
self.vae.enable_tiling()
|
| 988 |
+
if enable_vae_sp:
|
| 989 |
+
self.vae.enable_parallel()
|
| 990 |
+
image = self.vae.decode(latents,
|
| 991 |
+
return_dict=False,
|
| 992 |
+
generator=generator)[0]
|
| 993 |
+
|
| 994 |
+
if expand_temporal_dim or image.shape[2] == 1:
|
| 995 |
+
image = image.squeeze(2)
|
| 996 |
+
|
| 997 |
+
else:
|
| 998 |
+
image = latents
|
| 999 |
+
|
| 1000 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 1001 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
| 1002 |
+
image = image.cpu().float()
|
| 1003 |
+
|
| 1004 |
+
# Offload all models
|
| 1005 |
+
self.maybe_free_model_hooks()
|
| 1006 |
+
|
| 1007 |
+
if not return_dict:
|
| 1008 |
+
return image
|
| 1009 |
+
|
| 1010 |
+
return HunyuanVideoPipelineOutput(videos=image)
|
fastvideo/models/hunyuan/diffusion/schedulers/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ruff: noqa: F401
|
| 2 |
+
from .scheduling_flow_match_discrete import FlowMatchDiscreteScheduler
|
fastvideo/models/hunyuan/diffusion/schedulers/scheduling_flow_match_discrete.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
#
|
| 16 |
+
# Modified from diffusers==0.29.2
|
| 17 |
+
#
|
| 18 |
+
# ==============================================================================
|
| 19 |
+
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 25 |
+
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
| 26 |
+
from diffusers.utils import BaseOutput, logging
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class FlowMatchDiscreteSchedulerOutput(BaseOutput):
|
| 33 |
+
"""
|
| 34 |
+
Output class for the scheduler's `step` function output.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 38 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 39 |
+
denoising loop.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
prev_sample: torch.FloatTensor
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class FlowMatchDiscreteScheduler(SchedulerMixin, ConfigMixin):
|
| 46 |
+
"""
|
| 47 |
+
Euler scheduler.
|
| 48 |
+
|
| 49 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 50 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 54 |
+
The number of diffusion steps to train the model.
|
| 55 |
+
timestep_spacing (`str`, defaults to `"linspace"`):
|
| 56 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 57 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 58 |
+
shift (`float`, defaults to 1.0):
|
| 59 |
+
The shift value for the timestep schedule.
|
| 60 |
+
reverse (`bool`, defaults to `True`):
|
| 61 |
+
Whether to reverse the timestep schedule.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
_compatibles = []
|
| 65 |
+
order = 1
|
| 66 |
+
|
| 67 |
+
@register_to_config
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
num_train_timesteps: int = 1000,
|
| 71 |
+
shift: float = 1.0,
|
| 72 |
+
reverse: bool = True,
|
| 73 |
+
solver: str = "euler",
|
| 74 |
+
n_tokens: Optional[int] = None,
|
| 75 |
+
):
|
| 76 |
+
sigmas = torch.linspace(1, 0, num_train_timesteps + 1)
|
| 77 |
+
|
| 78 |
+
if not reverse:
|
| 79 |
+
sigmas = sigmas.flip(0)
|
| 80 |
+
|
| 81 |
+
self.sigmas = sigmas
|
| 82 |
+
# the value fed to model
|
| 83 |
+
self.timesteps = (sigmas[:-1] *
|
| 84 |
+
num_train_timesteps).to(dtype=torch.float32)
|
| 85 |
+
|
| 86 |
+
self._step_index = None
|
| 87 |
+
self._begin_index = None
|
| 88 |
+
|
| 89 |
+
self.supported_solver = ["euler"]
|
| 90 |
+
if solver not in self.supported_solver:
|
| 91 |
+
raise ValueError(
|
| 92 |
+
f"Solver {solver} not supported. Supported solvers: {self.supported_solver}"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def step_index(self):
|
| 97 |
+
"""
|
| 98 |
+
The index counter for current timestep. It will increase 1 after each scheduler step.
|
| 99 |
+
"""
|
| 100 |
+
return self._step_index
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def begin_index(self):
|
| 104 |
+
"""
|
| 105 |
+
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
|
| 106 |
+
"""
|
| 107 |
+
return self._begin_index
|
| 108 |
+
|
| 109 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
|
| 110 |
+
def set_begin_index(self, begin_index: int = 0):
|
| 111 |
+
"""
|
| 112 |
+
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
begin_index (`int`):
|
| 116 |
+
The begin index for the scheduler.
|
| 117 |
+
"""
|
| 118 |
+
self._begin_index = begin_index
|
| 119 |
+
|
| 120 |
+
def _sigma_to_t(self, sigma):
|
| 121 |
+
return sigma * self.config.num_train_timesteps
|
| 122 |
+
|
| 123 |
+
def set_timesteps(
|
| 124 |
+
self,
|
| 125 |
+
num_inference_steps: int,
|
| 126 |
+
device: Union[str, torch.device] = None,
|
| 127 |
+
n_tokens: int = None,
|
| 128 |
+
):
|
| 129 |
+
"""
|
| 130 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
num_inference_steps (`int`):
|
| 134 |
+
The number of diffusion steps used when generating samples with a pre-trained model.
|
| 135 |
+
device (`str` or `torch.device`, *optional*):
|
| 136 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 137 |
+
n_tokens (`int`, *optional*):
|
| 138 |
+
Number of tokens in the input sequence.
|
| 139 |
+
"""
|
| 140 |
+
self.num_inference_steps = num_inference_steps
|
| 141 |
+
|
| 142 |
+
sigmas = torch.linspace(1, 0, num_inference_steps + 1)
|
| 143 |
+
sigmas = self.sd3_time_shift(sigmas)
|
| 144 |
+
|
| 145 |
+
if not self.config.reverse:
|
| 146 |
+
sigmas = 1 - sigmas
|
| 147 |
+
|
| 148 |
+
self.sigmas = sigmas
|
| 149 |
+
self.timesteps = (sigmas[:-1] * self.config.num_train_timesteps).to(
|
| 150 |
+
dtype=torch.float32, device=device)
|
| 151 |
+
|
| 152 |
+
# Reset step index
|
| 153 |
+
self._step_index = None
|
| 154 |
+
|
| 155 |
+
def index_for_timestep(self, timestep, schedule_timesteps=None):
|
| 156 |
+
if schedule_timesteps is None:
|
| 157 |
+
schedule_timesteps = self.timesteps
|
| 158 |
+
|
| 159 |
+
indices = (schedule_timesteps == timestep).nonzero()
|
| 160 |
+
|
| 161 |
+
# The sigma index that is taken for the **very** first `step`
|
| 162 |
+
# is always the second index (or the last index if there is only 1)
|
| 163 |
+
# This way we can ensure we don't accidentally skip a sigma in
|
| 164 |
+
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
|
| 165 |
+
pos = 1 if len(indices) > 1 else 0
|
| 166 |
+
|
| 167 |
+
return indices[pos].item()
|
| 168 |
+
|
| 169 |
+
def _init_step_index(self, timestep):
|
| 170 |
+
if self.begin_index is None:
|
| 171 |
+
if isinstance(timestep, torch.Tensor):
|
| 172 |
+
timestep = timestep.to(self.timesteps.device)
|
| 173 |
+
self._step_index = self.index_for_timestep(timestep)
|
| 174 |
+
else:
|
| 175 |
+
self._step_index = self._begin_index
|
| 176 |
+
|
| 177 |
+
def scale_model_input(self,
|
| 178 |
+
sample: torch.Tensor,
|
| 179 |
+
timestep: Optional[int] = None) -> torch.Tensor:
|
| 180 |
+
return sample
|
| 181 |
+
|
| 182 |
+
def sd3_time_shift(self, t: torch.Tensor):
|
| 183 |
+
return (self.config.shift * t) / (1 + (self.config.shift - 1) * t)
|
| 184 |
+
|
| 185 |
+
def step(
|
| 186 |
+
self,
|
| 187 |
+
model_output: torch.FloatTensor,
|
| 188 |
+
timestep: Union[float, torch.FloatTensor],
|
| 189 |
+
sample: torch.FloatTensor,
|
| 190 |
+
return_dict: bool = True,
|
| 191 |
+
) -> Union[FlowMatchDiscreteSchedulerOutput, Tuple]:
|
| 192 |
+
"""
|
| 193 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 194 |
+
process from the learned model outputs (most often the predicted noise).
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
model_output (`torch.FloatTensor`):
|
| 198 |
+
The direct output from learned diffusion model.
|
| 199 |
+
timestep (`float`):
|
| 200 |
+
The current discrete timestep in the diffusion chain.
|
| 201 |
+
sample (`torch.FloatTensor`):
|
| 202 |
+
A current instance of a sample created by the diffusion process.
|
| 203 |
+
generator (`torch.Generator`, *optional*):
|
| 204 |
+
A random number generator.
|
| 205 |
+
n_tokens (`int`, *optional*):
|
| 206 |
+
Number of tokens in the input sequence.
|
| 207 |
+
return_dict (`bool`):
|
| 208 |
+
Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or
|
| 209 |
+
tuple.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
[`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`:
|
| 213 |
+
If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is
|
| 214 |
+
returned, otherwise a tuple is returned where the first element is the sample tensor.
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
if (isinstance(timestep, int) or isinstance(timestep, torch.IntTensor)
|
| 218 |
+
or isinstance(timestep, torch.LongTensor)):
|
| 219 |
+
raise ValueError((
|
| 220 |
+
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
|
| 221 |
+
" `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
|
| 222 |
+
" one of the `scheduler.timesteps` as a timestep."), )
|
| 223 |
+
|
| 224 |
+
if self.step_index is None:
|
| 225 |
+
self._init_step_index(timestep)
|
| 226 |
+
|
| 227 |
+
# Upcast to avoid precision issues when computing prev_sample
|
| 228 |
+
sample = sample.to(torch.float32)
|
| 229 |
+
|
| 230 |
+
dt = self.sigmas[self.step_index + 1] - self.sigmas[self.step_index]
|
| 231 |
+
|
| 232 |
+
if self.config.solver == "euler":
|
| 233 |
+
prev_sample = sample + model_output.to(torch.float32) * dt
|
| 234 |
+
else:
|
| 235 |
+
raise ValueError(
|
| 236 |
+
f"Solver {self.config.solver} not supported. Supported solvers: {self.supported_solver}"
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# upon completion increase step index by one
|
| 240 |
+
self._step_index += 1
|
| 241 |
+
|
| 242 |
+
if not return_dict:
|
| 243 |
+
return (prev_sample, )
|
| 244 |
+
|
| 245 |
+
return FlowMatchDiscreteSchedulerOutput(prev_sample=prev_sample)
|
| 246 |
+
|
| 247 |
+
def __len__(self):
|
| 248 |
+
return self.config.num_train_timesteps
|
fastvideo/models/hunyuan/modules/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .models import HUNYUAN_VIDEO_CONFIG, HYVideoDiffusionTransformer
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def load_model(args, in_channels, out_channels, factor_kwargs):
|
| 5 |
+
"""load hunyuan video model
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
args (dict): model args
|
| 9 |
+
in_channels (int): input channels number
|
| 10 |
+
out_channels (int): output channels number
|
| 11 |
+
factor_kwargs (dict): factor kwargs
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
model (nn.Module): The hunyuan video model
|
| 15 |
+
"""
|
| 16 |
+
if args.model in HUNYUAN_VIDEO_CONFIG.keys():
|
| 17 |
+
model = HYVideoDiffusionTransformer(
|
| 18 |
+
in_channels=in_channels,
|
| 19 |
+
out_channels=out_channels,
|
| 20 |
+
**HUNYUAN_VIDEO_CONFIG[args.model],
|
| 21 |
+
**factor_kwargs,
|
| 22 |
+
)
|
| 23 |
+
return model
|
| 24 |
+
else:
|
| 25 |
+
raise NotImplementedError()
|
fastvideo/models/hunyuan/modules/__pycache__/activation_layers.cpython-310.pyc
ADDED
|
Binary file (899 Bytes). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/activation_layers.cpython-312.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/attenion.cpython-312.pyc
ADDED
|
Binary file (3.63 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/embed_layers.cpython-310.pyc
ADDED
|
Binary file (4.61 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/mlp_layers.cpython-310.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/mlp_layers.cpython-312.pyc
ADDED
|
Binary file (5.98 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/modulate_layers.cpython-310.pyc
ADDED
|
Binary file (4.66 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/modulate_layers.cpython-312.pyc
ADDED
|
Binary file (6.27 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/norm_layers.cpython-310.pyc
ADDED
|
Binary file (2.5 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/posemb_layers.cpython-312.pyc
ADDED
|
Binary file (15.3 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/__pycache__/token_refiner.cpython-310.pyc
ADDED
|
Binary file (5.77 kB). View file
|
|
|
fastvideo/models/hunyuan/modules/activation_layers.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def get_activation_layer(act_type):
|
| 5 |
+
"""get activation layer
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
act_type (str): the activation type
|
| 9 |
+
|
| 10 |
+
Returns:
|
| 11 |
+
torch.nn.functional: the activation layer
|
| 12 |
+
"""
|
| 13 |
+
if act_type == "gelu":
|
| 14 |
+
return lambda: nn.GELU()
|
| 15 |
+
elif act_type == "gelu_tanh":
|
| 16 |
+
# Approximate `tanh` requires torch >= 1.13
|
| 17 |
+
return lambda: nn.GELU(approximate="tanh")
|
| 18 |
+
elif act_type == "relu":
|
| 19 |
+
return nn.ReLU
|
| 20 |
+
elif act_type == "silu":
|
| 21 |
+
return nn.SiLU
|
| 22 |
+
else:
|
| 23 |
+
raise ValueError(f"Unknown activation type: {act_type}")
|
fastvideo/models/hunyuan/modules/attenion.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
|
| 4 |
+
from fastvideo.models.flash_attn_no_pad import flash_attn_no_pad
|
| 5 |
+
from fastvideo.utils.communications import all_gather, all_to_all_4D
|
| 6 |
+
from fastvideo.utils.parallel_states import (get_sequence_parallel_state,
|
| 7 |
+
nccl_info)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def attention(
|
| 11 |
+
q,
|
| 12 |
+
k,
|
| 13 |
+
v,
|
| 14 |
+
drop_rate=0,
|
| 15 |
+
attn_mask=None,
|
| 16 |
+
causal=False,
|
| 17 |
+
):
|
| 18 |
+
|
| 19 |
+
qkv = torch.stack([q, k, v], dim=2)
|
| 20 |
+
|
| 21 |
+
if attn_mask is not None and attn_mask.dtype != torch.bool:
|
| 22 |
+
attn_mask = attn_mask.bool()
|
| 23 |
+
|
| 24 |
+
x = flash_attn_no_pad(qkv,
|
| 25 |
+
attn_mask,
|
| 26 |
+
causal=causal,
|
| 27 |
+
dropout_p=drop_rate,
|
| 28 |
+
softmax_scale=None)
|
| 29 |
+
|
| 30 |
+
b, s, a, d = x.shape
|
| 31 |
+
out = x.reshape(b, s, -1)
|
| 32 |
+
return out
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def parallel_attention(q, k, v, img_q_len, img_kv_len, text_mask):
|
| 36 |
+
# 1GPU torch.Size([1, 11264, 24, 128]) tensor([ 0, 11275, 11520], device='cuda:0', dtype=torch.int32)
|
| 37 |
+
# 2GPU torch.Size([1, 5632, 24, 128]) tensor([ 0, 5643, 5888], device='cuda:0', dtype=torch.int32)
|
| 38 |
+
query, encoder_query = q
|
| 39 |
+
key, encoder_key = k
|
| 40 |
+
value, encoder_value = v
|
| 41 |
+
if get_sequence_parallel_state():
|
| 42 |
+
# batch_size, seq_len, attn_heads, head_dim
|
| 43 |
+
query = all_to_all_4D(query, scatter_dim=2, gather_dim=1)
|
| 44 |
+
key = all_to_all_4D(key, scatter_dim=2, gather_dim=1)
|
| 45 |
+
value = all_to_all_4D(value, scatter_dim=2, gather_dim=1)
|
| 46 |
+
|
| 47 |
+
def shrink_head(encoder_state, dim):
|
| 48 |
+
local_heads = encoder_state.shape[dim] // nccl_info.sp_size
|
| 49 |
+
return encoder_state.narrow(
|
| 50 |
+
dim, nccl_info.rank_within_group * local_heads, local_heads)
|
| 51 |
+
|
| 52 |
+
encoder_query = shrink_head(encoder_query, dim=2)
|
| 53 |
+
encoder_key = shrink_head(encoder_key, dim=2)
|
| 54 |
+
encoder_value = shrink_head(encoder_value, dim=2)
|
| 55 |
+
# [b, s, h, d]
|
| 56 |
+
|
| 57 |
+
sequence_length = query.size(1)
|
| 58 |
+
encoder_sequence_length = encoder_query.size(1)
|
| 59 |
+
|
| 60 |
+
# Hint: please check encoder_query.shape
|
| 61 |
+
query = torch.cat([query, encoder_query], dim=1)
|
| 62 |
+
key = torch.cat([key, encoder_key], dim=1)
|
| 63 |
+
value = torch.cat([value, encoder_value], dim=1)
|
| 64 |
+
# B, S, 3, H, D
|
| 65 |
+
qkv = torch.stack([query, key, value], dim=2)
|
| 66 |
+
|
| 67 |
+
attn_mask = F.pad(text_mask, (sequence_length, 0), value=True)
|
| 68 |
+
hidden_states = flash_attn_no_pad(qkv,
|
| 69 |
+
attn_mask,
|
| 70 |
+
causal=False,
|
| 71 |
+
dropout_p=0.0,
|
| 72 |
+
softmax_scale=None)
|
| 73 |
+
|
| 74 |
+
hidden_states, encoder_hidden_states = hidden_states.split_with_sizes(
|
| 75 |
+
(sequence_length, encoder_sequence_length), dim=1)
|
| 76 |
+
if get_sequence_parallel_state():
|
| 77 |
+
hidden_states = all_to_all_4D(hidden_states,
|
| 78 |
+
scatter_dim=1,
|
| 79 |
+
gather_dim=2)
|
| 80 |
+
encoder_hidden_states = all_gather(encoder_hidden_states,
|
| 81 |
+
dim=2).contiguous()
|
| 82 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 83 |
+
encoder_hidden_states = encoder_hidden_states.to(query.dtype)
|
| 84 |
+
|
| 85 |
+
attn = torch.cat([hidden_states, encoder_hidden_states], dim=1)
|
| 86 |
+
|
| 87 |
+
b, s, a, d = attn.shape
|
| 88 |
+
attn = attn.reshape(b, s, -1)
|
| 89 |
+
|
| 90 |
+
return attn
|
fastvideo/models/hunyuan/modules/embed_layers.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from ..utils.helpers import to_2tuple
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class PatchEmbed(nn.Module):
|
| 10 |
+
"""2D Image to Patch Embedding
|
| 11 |
+
|
| 12 |
+
Image to Patch Embedding using Conv2d
|
| 13 |
+
|
| 14 |
+
A convolution based approach to patchifying a 2D image w/ embedding projection.
|
| 15 |
+
|
| 16 |
+
Based on the impl in https://github.com/google-research/vision_transformer
|
| 17 |
+
|
| 18 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
| 19 |
+
|
| 20 |
+
Remove the _assert function in forward function to be compatible with multi-resolution images.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
patch_size=16,
|
| 26 |
+
in_chans=3,
|
| 27 |
+
embed_dim=768,
|
| 28 |
+
norm_layer=None,
|
| 29 |
+
flatten=True,
|
| 30 |
+
bias=True,
|
| 31 |
+
dtype=None,
|
| 32 |
+
device=None,
|
| 33 |
+
):
|
| 34 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 35 |
+
super().__init__()
|
| 36 |
+
patch_size = to_2tuple(patch_size)
|
| 37 |
+
self.patch_size = patch_size
|
| 38 |
+
self.flatten = flatten
|
| 39 |
+
|
| 40 |
+
self.proj = nn.Conv3d(
|
| 41 |
+
in_chans,
|
| 42 |
+
embed_dim,
|
| 43 |
+
kernel_size=patch_size,
|
| 44 |
+
stride=patch_size,
|
| 45 |
+
bias=bias,
|
| 46 |
+
**factory_kwargs,
|
| 47 |
+
)
|
| 48 |
+
nn.init.xavier_uniform_(
|
| 49 |
+
self.proj.weight.view(self.proj.weight.size(0), -1))
|
| 50 |
+
if bias:
|
| 51 |
+
nn.init.zeros_(self.proj.bias)
|
| 52 |
+
|
| 53 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
| 54 |
+
|
| 55 |
+
def forward(self, x):
|
| 56 |
+
x = self.proj(x)
|
| 57 |
+
if self.flatten:
|
| 58 |
+
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
| 59 |
+
x = self.norm(x)
|
| 60 |
+
return x
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class TextProjection(nn.Module):
|
| 64 |
+
"""
|
| 65 |
+
Projects text embeddings. Also handles dropout for classifier-free guidance.
|
| 66 |
+
|
| 67 |
+
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def __init__(self,
|
| 71 |
+
in_channels,
|
| 72 |
+
hidden_size,
|
| 73 |
+
act_layer,
|
| 74 |
+
dtype=None,
|
| 75 |
+
device=None):
|
| 76 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 77 |
+
super().__init__()
|
| 78 |
+
self.linear_1 = nn.Linear(
|
| 79 |
+
in_features=in_channels,
|
| 80 |
+
out_features=hidden_size,
|
| 81 |
+
bias=True,
|
| 82 |
+
**factory_kwargs,
|
| 83 |
+
)
|
| 84 |
+
self.act_1 = act_layer()
|
| 85 |
+
self.linear_2 = nn.Linear(
|
| 86 |
+
in_features=hidden_size,
|
| 87 |
+
out_features=hidden_size,
|
| 88 |
+
bias=True,
|
| 89 |
+
**factory_kwargs,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
def forward(self, caption):
|
| 93 |
+
hidden_states = self.linear_1(caption)
|
| 94 |
+
hidden_states = self.act_1(hidden_states)
|
| 95 |
+
hidden_states = self.linear_2(hidden_states)
|
| 96 |
+
return hidden_states
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def timestep_embedding(t, dim, max_period=10000):
|
| 100 |
+
"""
|
| 101 |
+
Create sinusoidal timestep embeddings.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
t (torch.Tensor): a 1-D Tensor of N indices, one per batch element. These may be fractional.
|
| 105 |
+
dim (int): the dimension of the output.
|
| 106 |
+
max_period (int): controls the minimum frequency of the embeddings.
|
| 107 |
+
|
| 108 |
+
Returns:
|
| 109 |
+
embedding (torch.Tensor): An (N, D) Tensor of positional embeddings.
|
| 110 |
+
|
| 111 |
+
.. ref_link: https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
| 112 |
+
"""
|
| 113 |
+
half = dim // 2
|
| 114 |
+
freqs = torch.exp(-math.log(max_period) *
|
| 115 |
+
torch.arange(start=0, end=half, dtype=torch.float32) /
|
| 116 |
+
half).to(device=t.device)
|
| 117 |
+
args = t[:, None].float() * freqs[None]
|
| 118 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 119 |
+
if dim % 2:
|
| 120 |
+
embedding = torch.cat(
|
| 121 |
+
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 122 |
+
return embedding
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class TimestepEmbedder(nn.Module):
|
| 126 |
+
"""
|
| 127 |
+
Embeds scalar timesteps into vector representations.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
hidden_size,
|
| 133 |
+
act_layer,
|
| 134 |
+
frequency_embedding_size=256,
|
| 135 |
+
max_period=10000,
|
| 136 |
+
out_size=None,
|
| 137 |
+
dtype=None,
|
| 138 |
+
device=None,
|
| 139 |
+
):
|
| 140 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 141 |
+
super().__init__()
|
| 142 |
+
self.frequency_embedding_size = frequency_embedding_size
|
| 143 |
+
self.max_period = max_period
|
| 144 |
+
if out_size is None:
|
| 145 |
+
out_size = hidden_size
|
| 146 |
+
|
| 147 |
+
self.mlp = nn.Sequential(
|
| 148 |
+
nn.Linear(frequency_embedding_size,
|
| 149 |
+
hidden_size,
|
| 150 |
+
bias=True,
|
| 151 |
+
**factory_kwargs),
|
| 152 |
+
act_layer(),
|
| 153 |
+
nn.Linear(hidden_size, out_size, bias=True, **factory_kwargs),
|
| 154 |
+
)
|
| 155 |
+
nn.init.normal_(self.mlp[0].weight, std=0.02)
|
| 156 |
+
nn.init.normal_(self.mlp[2].weight, std=0.02)
|
| 157 |
+
|
| 158 |
+
def forward(self, t):
|
| 159 |
+
t_freq = timestep_embedding(t, self.frequency_embedding_size,
|
| 160 |
+
self.max_period).type(
|
| 161 |
+
self.mlp[0].weight.dtype)
|
| 162 |
+
t_emb = self.mlp(t_freq)
|
| 163 |
+
return t_emb
|
fastvideo/models/hunyuan/modules/models.py
ADDED
|
@@ -0,0 +1,750 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 6 |
+
from diffusers.models import ModelMixin
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
|
| 9 |
+
from fastvideo.models.hunyuan.modules.posemb_layers import \
|
| 10 |
+
get_nd_rotary_pos_embed
|
| 11 |
+
from fastvideo.utils.parallel_states import nccl_info
|
| 12 |
+
|
| 13 |
+
from .activation_layers import get_activation_layer
|
| 14 |
+
from .attenion import parallel_attention
|
| 15 |
+
from .embed_layers import PatchEmbed, TextProjection, TimestepEmbedder
|
| 16 |
+
from .mlp_layers import MLP, FinalLayer, MLPEmbedder
|
| 17 |
+
from .modulate_layers import ModulateDiT, apply_gate, modulate
|
| 18 |
+
from .norm_layers import get_norm_layer
|
| 19 |
+
from .posemb_layers import apply_rotary_emb
|
| 20 |
+
from .token_refiner import SingleTokenRefiner
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class MMDoubleStreamBlock(nn.Module):
|
| 24 |
+
"""
|
| 25 |
+
A multimodal dit block with separate modulation for
|
| 26 |
+
text and image/video, see more details (SD3): https://arxiv.org/abs/2403.03206
|
| 27 |
+
(Flux.1): https://github.com/black-forest-labs/flux
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
hidden_size: int,
|
| 33 |
+
heads_num: int,
|
| 34 |
+
mlp_width_ratio: float,
|
| 35 |
+
mlp_act_type: str = "gelu_tanh",
|
| 36 |
+
qk_norm: bool = True,
|
| 37 |
+
qk_norm_type: str = "rms",
|
| 38 |
+
qkv_bias: bool = False,
|
| 39 |
+
dtype: Optional[torch.dtype] = None,
|
| 40 |
+
device: Optional[torch.device] = None,
|
| 41 |
+
):
|
| 42 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 43 |
+
super().__init__()
|
| 44 |
+
|
| 45 |
+
self.deterministic = False
|
| 46 |
+
self.heads_num = heads_num
|
| 47 |
+
head_dim = hidden_size // heads_num
|
| 48 |
+
mlp_hidden_dim = int(hidden_size * mlp_width_ratio)
|
| 49 |
+
|
| 50 |
+
self.img_mod = ModulateDiT(
|
| 51 |
+
hidden_size,
|
| 52 |
+
factor=6,
|
| 53 |
+
act_layer=get_activation_layer("silu"),
|
| 54 |
+
**factory_kwargs,
|
| 55 |
+
)
|
| 56 |
+
self.img_norm1 = nn.LayerNorm(hidden_size,
|
| 57 |
+
elementwise_affine=False,
|
| 58 |
+
eps=1e-6,
|
| 59 |
+
**factory_kwargs)
|
| 60 |
+
|
| 61 |
+
self.img_attn_qkv = nn.Linear(hidden_size,
|
| 62 |
+
hidden_size * 3,
|
| 63 |
+
bias=qkv_bias,
|
| 64 |
+
**factory_kwargs)
|
| 65 |
+
qk_norm_layer = get_norm_layer(qk_norm_type)
|
| 66 |
+
self.img_attn_q_norm = (qk_norm_layer(
|
| 67 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 68 |
+
if qk_norm else nn.Identity())
|
| 69 |
+
self.img_attn_k_norm = (qk_norm_layer(
|
| 70 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 71 |
+
if qk_norm else nn.Identity())
|
| 72 |
+
self.img_attn_proj = nn.Linear(hidden_size,
|
| 73 |
+
hidden_size,
|
| 74 |
+
bias=qkv_bias,
|
| 75 |
+
**factory_kwargs)
|
| 76 |
+
|
| 77 |
+
self.img_norm2 = nn.LayerNorm(hidden_size,
|
| 78 |
+
elementwise_affine=False,
|
| 79 |
+
eps=1e-6,
|
| 80 |
+
**factory_kwargs)
|
| 81 |
+
self.img_mlp = MLP(
|
| 82 |
+
hidden_size,
|
| 83 |
+
mlp_hidden_dim,
|
| 84 |
+
act_layer=get_activation_layer(mlp_act_type),
|
| 85 |
+
bias=True,
|
| 86 |
+
**factory_kwargs,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self.txt_mod = ModulateDiT(
|
| 90 |
+
hidden_size,
|
| 91 |
+
factor=6,
|
| 92 |
+
act_layer=get_activation_layer("silu"),
|
| 93 |
+
**factory_kwargs,
|
| 94 |
+
)
|
| 95 |
+
self.txt_norm1 = nn.LayerNorm(hidden_size,
|
| 96 |
+
elementwise_affine=False,
|
| 97 |
+
eps=1e-6,
|
| 98 |
+
**factory_kwargs)
|
| 99 |
+
|
| 100 |
+
self.txt_attn_qkv = nn.Linear(hidden_size,
|
| 101 |
+
hidden_size * 3,
|
| 102 |
+
bias=qkv_bias,
|
| 103 |
+
**factory_kwargs)
|
| 104 |
+
self.txt_attn_q_norm = (qk_norm_layer(
|
| 105 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 106 |
+
if qk_norm else nn.Identity())
|
| 107 |
+
self.txt_attn_k_norm = (qk_norm_layer(
|
| 108 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 109 |
+
if qk_norm else nn.Identity())
|
| 110 |
+
self.txt_attn_proj = nn.Linear(hidden_size,
|
| 111 |
+
hidden_size,
|
| 112 |
+
bias=qkv_bias,
|
| 113 |
+
**factory_kwargs)
|
| 114 |
+
|
| 115 |
+
self.txt_norm2 = nn.LayerNorm(hidden_size,
|
| 116 |
+
elementwise_affine=False,
|
| 117 |
+
eps=1e-6,
|
| 118 |
+
**factory_kwargs)
|
| 119 |
+
self.txt_mlp = MLP(
|
| 120 |
+
hidden_size,
|
| 121 |
+
mlp_hidden_dim,
|
| 122 |
+
act_layer=get_activation_layer(mlp_act_type),
|
| 123 |
+
bias=True,
|
| 124 |
+
**factory_kwargs,
|
| 125 |
+
)
|
| 126 |
+
self.hybrid_seq_parallel_attn = None
|
| 127 |
+
|
| 128 |
+
def enable_deterministic(self):
|
| 129 |
+
self.deterministic = True
|
| 130 |
+
|
| 131 |
+
def disable_deterministic(self):
|
| 132 |
+
self.deterministic = False
|
| 133 |
+
|
| 134 |
+
def forward(
|
| 135 |
+
self,
|
| 136 |
+
img: torch.Tensor,
|
| 137 |
+
txt: torch.Tensor,
|
| 138 |
+
vec: torch.Tensor,
|
| 139 |
+
freqs_cis: tuple = None,
|
| 140 |
+
text_mask: torch.Tensor = None,
|
| 141 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 142 |
+
(
|
| 143 |
+
img_mod1_shift,
|
| 144 |
+
img_mod1_scale,
|
| 145 |
+
img_mod1_gate,
|
| 146 |
+
img_mod2_shift,
|
| 147 |
+
img_mod2_scale,
|
| 148 |
+
img_mod2_gate,
|
| 149 |
+
) = self.img_mod(vec).chunk(6, dim=-1)
|
| 150 |
+
(
|
| 151 |
+
txt_mod1_shift,
|
| 152 |
+
txt_mod1_scale,
|
| 153 |
+
txt_mod1_gate,
|
| 154 |
+
txt_mod2_shift,
|
| 155 |
+
txt_mod2_scale,
|
| 156 |
+
txt_mod2_gate,
|
| 157 |
+
) = self.txt_mod(vec).chunk(6, dim=-1)
|
| 158 |
+
|
| 159 |
+
# Prepare image for attention.
|
| 160 |
+
img_modulated = self.img_norm1(img)
|
| 161 |
+
img_modulated = modulate(img_modulated,
|
| 162 |
+
shift=img_mod1_shift,
|
| 163 |
+
scale=img_mod1_scale)
|
| 164 |
+
img_qkv = self.img_attn_qkv(img_modulated)
|
| 165 |
+
img_q, img_k, img_v = rearrange(img_qkv,
|
| 166 |
+
"B L (K H D) -> K B L H D",
|
| 167 |
+
K=3,
|
| 168 |
+
H=self.heads_num)
|
| 169 |
+
# Apply QK-Norm if needed
|
| 170 |
+
img_q = self.img_attn_q_norm(img_q).to(img_v)
|
| 171 |
+
img_k = self.img_attn_k_norm(img_k).to(img_v)
|
| 172 |
+
|
| 173 |
+
# Apply RoPE if needed.
|
| 174 |
+
if freqs_cis is not None:
|
| 175 |
+
|
| 176 |
+
def shrink_head(encoder_state, dim):
|
| 177 |
+
local_heads = encoder_state.shape[dim] // nccl_info.sp_size
|
| 178 |
+
return encoder_state.narrow(
|
| 179 |
+
dim, nccl_info.rank_within_group * local_heads,
|
| 180 |
+
local_heads)
|
| 181 |
+
|
| 182 |
+
freqs_cis = (
|
| 183 |
+
shrink_head(freqs_cis[0], dim=0),
|
| 184 |
+
shrink_head(freqs_cis[1], dim=0),
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
img_qq, img_kk = apply_rotary_emb(img_q,
|
| 188 |
+
img_k,
|
| 189 |
+
freqs_cis,
|
| 190 |
+
head_first=False)
|
| 191 |
+
assert (
|
| 192 |
+
img_qq.shape == img_q.shape and img_kk.shape == img_k.shape
|
| 193 |
+
), f"img_kk: {img_qq.shape}, img_q: {img_q.shape}, img_kk: {img_kk.shape}, img_k: {img_k.shape}"
|
| 194 |
+
img_q, img_k = img_qq, img_kk
|
| 195 |
+
|
| 196 |
+
# Prepare txt for attention.
|
| 197 |
+
txt_modulated = self.txt_norm1(txt)
|
| 198 |
+
txt_modulated = modulate(txt_modulated,
|
| 199 |
+
shift=txt_mod1_shift,
|
| 200 |
+
scale=txt_mod1_scale)
|
| 201 |
+
txt_qkv = self.txt_attn_qkv(txt_modulated)
|
| 202 |
+
txt_q, txt_k, txt_v = rearrange(txt_qkv,
|
| 203 |
+
"B L (K H D) -> K B L H D",
|
| 204 |
+
K=3,
|
| 205 |
+
H=self.heads_num)
|
| 206 |
+
# Apply QK-Norm if needed.
|
| 207 |
+
txt_q = self.txt_attn_q_norm(txt_q).to(txt_v)
|
| 208 |
+
txt_k = self.txt_attn_k_norm(txt_k).to(txt_v)
|
| 209 |
+
|
| 210 |
+
attn = parallel_attention(
|
| 211 |
+
(img_q, txt_q),
|
| 212 |
+
(img_k, txt_k),
|
| 213 |
+
(img_v, txt_v),
|
| 214 |
+
img_q_len=img_q.shape[1],
|
| 215 |
+
img_kv_len=img_k.shape[1],
|
| 216 |
+
text_mask=text_mask,
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
# attention computation end
|
| 220 |
+
|
| 221 |
+
img_attn, txt_attn = attn[:, :img.shape[1]], attn[:, img.shape[1]:]
|
| 222 |
+
|
| 223 |
+
# Calculate the img blocks.
|
| 224 |
+
img = img + apply_gate(self.img_attn_proj(img_attn),
|
| 225 |
+
gate=img_mod1_gate)
|
| 226 |
+
img = img + apply_gate(
|
| 227 |
+
self.img_mlp(
|
| 228 |
+
modulate(self.img_norm2(img),
|
| 229 |
+
shift=img_mod2_shift,
|
| 230 |
+
scale=img_mod2_scale)),
|
| 231 |
+
gate=img_mod2_gate,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
# Calculate the txt blocks.
|
| 235 |
+
txt = txt + apply_gate(self.txt_attn_proj(txt_attn),
|
| 236 |
+
gate=txt_mod1_gate)
|
| 237 |
+
txt = txt + apply_gate(
|
| 238 |
+
self.txt_mlp(
|
| 239 |
+
modulate(self.txt_norm2(txt),
|
| 240 |
+
shift=txt_mod2_shift,
|
| 241 |
+
scale=txt_mod2_scale)),
|
| 242 |
+
gate=txt_mod2_gate,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
return img, txt
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class MMSingleStreamBlock(nn.Module):
|
| 249 |
+
"""
|
| 250 |
+
A DiT block with parallel linear layers as described in
|
| 251 |
+
https://arxiv.org/abs/2302.05442 and adapted modulation interface.
|
| 252 |
+
Also refer to (SD3): https://arxiv.org/abs/2403.03206
|
| 253 |
+
(Flux.1): https://github.com/black-forest-labs/flux
|
| 254 |
+
"""
|
| 255 |
+
|
| 256 |
+
def __init__(
|
| 257 |
+
self,
|
| 258 |
+
hidden_size: int,
|
| 259 |
+
heads_num: int,
|
| 260 |
+
mlp_width_ratio: float = 4.0,
|
| 261 |
+
mlp_act_type: str = "gelu_tanh",
|
| 262 |
+
qk_norm: bool = True,
|
| 263 |
+
qk_norm_type: str = "rms",
|
| 264 |
+
qk_scale: float = None,
|
| 265 |
+
dtype: Optional[torch.dtype] = None,
|
| 266 |
+
device: Optional[torch.device] = None,
|
| 267 |
+
):
|
| 268 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 269 |
+
super().__init__()
|
| 270 |
+
|
| 271 |
+
self.deterministic = False
|
| 272 |
+
self.hidden_size = hidden_size
|
| 273 |
+
self.heads_num = heads_num
|
| 274 |
+
head_dim = hidden_size // heads_num
|
| 275 |
+
mlp_hidden_dim = int(hidden_size * mlp_width_ratio)
|
| 276 |
+
self.mlp_hidden_dim = mlp_hidden_dim
|
| 277 |
+
self.scale = qk_scale or head_dim**-0.5
|
| 278 |
+
|
| 279 |
+
# qkv and mlp_in
|
| 280 |
+
self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + mlp_hidden_dim,
|
| 281 |
+
**factory_kwargs)
|
| 282 |
+
# proj and mlp_out
|
| 283 |
+
self.linear2 = nn.Linear(hidden_size + mlp_hidden_dim, hidden_size,
|
| 284 |
+
**factory_kwargs)
|
| 285 |
+
|
| 286 |
+
qk_norm_layer = get_norm_layer(qk_norm_type)
|
| 287 |
+
self.q_norm = (qk_norm_layer(
|
| 288 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 289 |
+
if qk_norm else nn.Identity())
|
| 290 |
+
self.k_norm = (qk_norm_layer(
|
| 291 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 292 |
+
if qk_norm else nn.Identity())
|
| 293 |
+
|
| 294 |
+
self.pre_norm = nn.LayerNorm(hidden_size,
|
| 295 |
+
elementwise_affine=False,
|
| 296 |
+
eps=1e-6,
|
| 297 |
+
**factory_kwargs)
|
| 298 |
+
|
| 299 |
+
self.mlp_act = get_activation_layer(mlp_act_type)()
|
| 300 |
+
self.modulation = ModulateDiT(
|
| 301 |
+
hidden_size,
|
| 302 |
+
factor=3,
|
| 303 |
+
act_layer=get_activation_layer("silu"),
|
| 304 |
+
**factory_kwargs,
|
| 305 |
+
)
|
| 306 |
+
self.hybrid_seq_parallel_attn = None
|
| 307 |
+
|
| 308 |
+
def enable_deterministic(self):
|
| 309 |
+
self.deterministic = True
|
| 310 |
+
|
| 311 |
+
def disable_deterministic(self):
|
| 312 |
+
self.deterministic = False
|
| 313 |
+
|
| 314 |
+
def forward(
|
| 315 |
+
self,
|
| 316 |
+
x: torch.Tensor,
|
| 317 |
+
vec: torch.Tensor,
|
| 318 |
+
txt_len: int,
|
| 319 |
+
freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None,
|
| 320 |
+
text_mask: torch.Tensor = None,
|
| 321 |
+
) -> torch.Tensor:
|
| 322 |
+
mod_shift, mod_scale, mod_gate = self.modulation(vec).chunk(3, dim=-1)
|
| 323 |
+
x_mod = modulate(self.pre_norm(x), shift=mod_shift, scale=mod_scale)
|
| 324 |
+
qkv, mlp = torch.split(self.linear1(x_mod),
|
| 325 |
+
[3 * self.hidden_size, self.mlp_hidden_dim],
|
| 326 |
+
dim=-1)
|
| 327 |
+
|
| 328 |
+
q, k, v = rearrange(qkv,
|
| 329 |
+
"B L (K H D) -> K B L H D",
|
| 330 |
+
K=3,
|
| 331 |
+
H=self.heads_num)
|
| 332 |
+
|
| 333 |
+
# Apply QK-Norm if needed.
|
| 334 |
+
q = self.q_norm(q).to(v)
|
| 335 |
+
k = self.k_norm(k).to(v)
|
| 336 |
+
|
| 337 |
+
def shrink_head(encoder_state, dim):
|
| 338 |
+
local_heads = encoder_state.shape[dim] // nccl_info.sp_size
|
| 339 |
+
return encoder_state.narrow(
|
| 340 |
+
dim, nccl_info.rank_within_group * local_heads, local_heads)
|
| 341 |
+
|
| 342 |
+
freqs_cis = (shrink_head(freqs_cis[0],
|
| 343 |
+
dim=0), shrink_head(freqs_cis[1], dim=0))
|
| 344 |
+
|
| 345 |
+
img_q, txt_q = q[:, :-txt_len, :, :], q[:, -txt_len:, :, :]
|
| 346 |
+
img_k, txt_k = k[:, :-txt_len, :, :], k[:, -txt_len:, :, :]
|
| 347 |
+
img_v, txt_v = v[:, :-txt_len, :, :], v[:, -txt_len:, :, :]
|
| 348 |
+
img_qq, img_kk = apply_rotary_emb(img_q,
|
| 349 |
+
img_k,
|
| 350 |
+
freqs_cis,
|
| 351 |
+
head_first=False)
|
| 352 |
+
assert (
|
| 353 |
+
img_qq.shape == img_q.shape and img_kk.shape == img_k.shape
|
| 354 |
+
), f"img_kk: {img_qq.shape}, img_q: {img_q.shape}, img_kk: {img_kk.shape}, img_k: {img_k.shape}"
|
| 355 |
+
img_q, img_k = img_qq, img_kk
|
| 356 |
+
|
| 357 |
+
attn = parallel_attention(
|
| 358 |
+
(img_q, txt_q),
|
| 359 |
+
(img_k, txt_k),
|
| 360 |
+
(img_v, txt_v),
|
| 361 |
+
img_q_len=img_q.shape[1],
|
| 362 |
+
img_kv_len=img_k.shape[1],
|
| 363 |
+
text_mask=text_mask,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
# attention computation end
|
| 367 |
+
|
| 368 |
+
# Compute activation in mlp stream, cat again and run second linear layer.
|
| 369 |
+
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
|
| 370 |
+
return x + apply_gate(output, gate=mod_gate)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
class HYVideoDiffusionTransformer(ModelMixin, ConfigMixin):
|
| 374 |
+
"""
|
| 375 |
+
HunyuanVideo Transformer backbone
|
| 376 |
+
|
| 377 |
+
Inherited from ModelMixin and ConfigMixin for compatibility with diffusers' sampler StableDiffusionPipeline.
|
| 378 |
+
|
| 379 |
+
Reference:
|
| 380 |
+
[1] Flux.1: https://github.com/black-forest-labs/flux
|
| 381 |
+
[2] MMDiT: http://arxiv.org/abs/2403.03206
|
| 382 |
+
|
| 383 |
+
Parameters
|
| 384 |
+
----------
|
| 385 |
+
args: argparse.Namespace
|
| 386 |
+
The arguments parsed by argparse.
|
| 387 |
+
patch_size: list
|
| 388 |
+
The size of the patch.
|
| 389 |
+
in_channels: int
|
| 390 |
+
The number of input channels.
|
| 391 |
+
out_channels: int
|
| 392 |
+
The number of output channels.
|
| 393 |
+
hidden_size: int
|
| 394 |
+
The hidden size of the transformer backbone.
|
| 395 |
+
heads_num: int
|
| 396 |
+
The number of attention heads.
|
| 397 |
+
mlp_width_ratio: float
|
| 398 |
+
The ratio of the hidden size of the MLP in the transformer block.
|
| 399 |
+
mlp_act_type: str
|
| 400 |
+
The activation function of the MLP in the transformer block.
|
| 401 |
+
depth_double_blocks: int
|
| 402 |
+
The number of transformer blocks in the double blocks.
|
| 403 |
+
depth_single_blocks: int
|
| 404 |
+
The number of transformer blocks in the single blocks.
|
| 405 |
+
rope_dim_list: list
|
| 406 |
+
The dimension of the rotary embedding for t, h, w.
|
| 407 |
+
qkv_bias: bool
|
| 408 |
+
Whether to use bias in the qkv linear layer.
|
| 409 |
+
qk_norm: bool
|
| 410 |
+
Whether to use qk norm.
|
| 411 |
+
qk_norm_type: str
|
| 412 |
+
The type of qk norm.
|
| 413 |
+
guidance_embed: bool
|
| 414 |
+
Whether to use guidance embedding for distillation.
|
| 415 |
+
text_projection: str
|
| 416 |
+
The type of the text projection, default is single_refiner.
|
| 417 |
+
use_attention_mask: bool
|
| 418 |
+
Whether to use attention mask for text encoder.
|
| 419 |
+
dtype: torch.dtype
|
| 420 |
+
The dtype of the model.
|
| 421 |
+
device: torch.device
|
| 422 |
+
The device of the model.
|
| 423 |
+
"""
|
| 424 |
+
|
| 425 |
+
@register_to_config
|
| 426 |
+
def __init__(
|
| 427 |
+
self,
|
| 428 |
+
patch_size: list = [1, 2, 2],
|
| 429 |
+
in_channels: int = 4, # Should be VAE.config.latent_channels.
|
| 430 |
+
out_channels: int = None,
|
| 431 |
+
hidden_size: int = 3072,
|
| 432 |
+
heads_num: int = 24,
|
| 433 |
+
mlp_width_ratio: float = 4.0,
|
| 434 |
+
mlp_act_type: str = "gelu_tanh",
|
| 435 |
+
mm_double_blocks_depth: int = 20,
|
| 436 |
+
mm_single_blocks_depth: int = 40,
|
| 437 |
+
rope_dim_list: List[int] = [16, 56, 56],
|
| 438 |
+
qkv_bias: bool = True,
|
| 439 |
+
qk_norm: bool = True,
|
| 440 |
+
qk_norm_type: str = "rms",
|
| 441 |
+
guidance_embed: bool = False, # For modulation.
|
| 442 |
+
text_projection: str = "single_refiner",
|
| 443 |
+
use_attention_mask: bool = True,
|
| 444 |
+
dtype: Optional[torch.dtype] = None,
|
| 445 |
+
device: Optional[torch.device] = None,
|
| 446 |
+
text_states_dim: int = 4096,
|
| 447 |
+
text_states_dim_2: int = 768,
|
| 448 |
+
rope_theta: int = 256,
|
| 449 |
+
):
|
| 450 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 451 |
+
super().__init__()
|
| 452 |
+
|
| 453 |
+
self.patch_size = patch_size
|
| 454 |
+
self.in_channels = in_channels
|
| 455 |
+
self.out_channels = in_channels if out_channels is None else out_channels
|
| 456 |
+
self.unpatchify_channels = self.out_channels
|
| 457 |
+
self.guidance_embed = guidance_embed
|
| 458 |
+
self.rope_dim_list = rope_dim_list
|
| 459 |
+
self.rope_theta = rope_theta
|
| 460 |
+
# Text projection. Default to linear projection.
|
| 461 |
+
# Alternative: TokenRefiner. See more details (LI-DiT): http://arxiv.org/abs/2406.11831
|
| 462 |
+
self.use_attention_mask = use_attention_mask
|
| 463 |
+
self.text_projection = text_projection
|
| 464 |
+
|
| 465 |
+
if hidden_size % heads_num != 0:
|
| 466 |
+
raise ValueError(
|
| 467 |
+
f"Hidden size {hidden_size} must be divisible by heads_num {heads_num}"
|
| 468 |
+
)
|
| 469 |
+
pe_dim = hidden_size // heads_num
|
| 470 |
+
if sum(rope_dim_list) != pe_dim:
|
| 471 |
+
raise ValueError(
|
| 472 |
+
f"Got {rope_dim_list} but expected positional dim {pe_dim}")
|
| 473 |
+
self.hidden_size = hidden_size
|
| 474 |
+
self.heads_num = heads_num
|
| 475 |
+
|
| 476 |
+
# image projection
|
| 477 |
+
self.img_in = PatchEmbed(self.patch_size, self.in_channels,
|
| 478 |
+
self.hidden_size, **factory_kwargs)
|
| 479 |
+
|
| 480 |
+
# text projection
|
| 481 |
+
if self.text_projection == "linear":
|
| 482 |
+
self.txt_in = TextProjection(
|
| 483 |
+
self.config.text_states_dim,
|
| 484 |
+
self.hidden_size,
|
| 485 |
+
get_activation_layer("silu"),
|
| 486 |
+
**factory_kwargs,
|
| 487 |
+
)
|
| 488 |
+
elif self.text_projection == "single_refiner":
|
| 489 |
+
self.txt_in = SingleTokenRefiner(
|
| 490 |
+
self.config.text_states_dim,
|
| 491 |
+
hidden_size,
|
| 492 |
+
heads_num,
|
| 493 |
+
depth=2,
|
| 494 |
+
**factory_kwargs,
|
| 495 |
+
)
|
| 496 |
+
else:
|
| 497 |
+
raise NotImplementedError(
|
| 498 |
+
f"Unsupported text_projection: {self.text_projection}")
|
| 499 |
+
|
| 500 |
+
# time modulation
|
| 501 |
+
self.time_in = TimestepEmbedder(self.hidden_size,
|
| 502 |
+
get_activation_layer("silu"),
|
| 503 |
+
**factory_kwargs)
|
| 504 |
+
|
| 505 |
+
# text modulation
|
| 506 |
+
self.vector_in = MLPEmbedder(self.config.text_states_dim_2,
|
| 507 |
+
self.hidden_size, **factory_kwargs)
|
| 508 |
+
|
| 509 |
+
# guidance modulation
|
| 510 |
+
self.guidance_in = (TimestepEmbedder(
|
| 511 |
+
self.hidden_size, get_activation_layer("silu"), **factory_kwargs)
|
| 512 |
+
if guidance_embed else None)
|
| 513 |
+
|
| 514 |
+
# double blocks
|
| 515 |
+
self.double_blocks = nn.ModuleList([
|
| 516 |
+
MMDoubleStreamBlock(
|
| 517 |
+
self.hidden_size,
|
| 518 |
+
self.heads_num,
|
| 519 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 520 |
+
mlp_act_type=mlp_act_type,
|
| 521 |
+
qk_norm=qk_norm,
|
| 522 |
+
qk_norm_type=qk_norm_type,
|
| 523 |
+
qkv_bias=qkv_bias,
|
| 524 |
+
**factory_kwargs,
|
| 525 |
+
) for _ in range(mm_double_blocks_depth)
|
| 526 |
+
])
|
| 527 |
+
|
| 528 |
+
# single blocks
|
| 529 |
+
self.single_blocks = nn.ModuleList([
|
| 530 |
+
MMSingleStreamBlock(
|
| 531 |
+
self.hidden_size,
|
| 532 |
+
self.heads_num,
|
| 533 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 534 |
+
mlp_act_type=mlp_act_type,
|
| 535 |
+
qk_norm=qk_norm,
|
| 536 |
+
qk_norm_type=qk_norm_type,
|
| 537 |
+
**factory_kwargs,
|
| 538 |
+
) for _ in range(mm_single_blocks_depth)
|
| 539 |
+
])
|
| 540 |
+
|
| 541 |
+
self.final_layer = FinalLayer(
|
| 542 |
+
self.hidden_size,
|
| 543 |
+
self.patch_size,
|
| 544 |
+
self.out_channels,
|
| 545 |
+
get_activation_layer("silu"),
|
| 546 |
+
**factory_kwargs,
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
def enable_deterministic(self):
|
| 550 |
+
for block in self.double_blocks:
|
| 551 |
+
block.enable_deterministic()
|
| 552 |
+
for block in self.single_blocks:
|
| 553 |
+
block.enable_deterministic()
|
| 554 |
+
|
| 555 |
+
def disable_deterministic(self):
|
| 556 |
+
for block in self.double_blocks:
|
| 557 |
+
block.disable_deterministic()
|
| 558 |
+
for block in self.single_blocks:
|
| 559 |
+
block.disable_deterministic()
|
| 560 |
+
|
| 561 |
+
def get_rotary_pos_embed(self, rope_sizes):
|
| 562 |
+
target_ndim = 3
|
| 563 |
+
|
| 564 |
+
head_dim = self.hidden_size // self.heads_num
|
| 565 |
+
rope_dim_list = self.rope_dim_list
|
| 566 |
+
if rope_dim_list is None:
|
| 567 |
+
rope_dim_list = [
|
| 568 |
+
head_dim // target_ndim for _ in range(target_ndim)
|
| 569 |
+
]
|
| 570 |
+
assert (
|
| 571 |
+
sum(rope_dim_list) == head_dim
|
| 572 |
+
), "sum(rope_dim_list) should equal to head_dim of attention layer"
|
| 573 |
+
freqs_cos, freqs_sin = get_nd_rotary_pos_embed(
|
| 574 |
+
rope_dim_list,
|
| 575 |
+
rope_sizes,
|
| 576 |
+
theta=self.rope_theta,
|
| 577 |
+
use_real=True,
|
| 578 |
+
theta_rescale_factor=1,
|
| 579 |
+
)
|
| 580 |
+
return freqs_cos, freqs_sin
|
| 581 |
+
# x: torch.Tensor,
|
| 582 |
+
# t: torch.Tensor, # Should be in range(0, 1000).
|
| 583 |
+
# text_states: torch.Tensor = None,
|
| 584 |
+
# text_mask: torch.Tensor = None, # Now we don't use it.
|
| 585 |
+
# text_states_2: Optional[torch.Tensor] = None, # Text embedding for modulation.
|
| 586 |
+
# guidance: torch.Tensor = None, # Guidance for modulation, should be cfg_scale x 1000.
|
| 587 |
+
# return_dict: bool = True,
|
| 588 |
+
|
| 589 |
+
def forward(
|
| 590 |
+
self,
|
| 591 |
+
hidden_states: torch.Tensor,
|
| 592 |
+
encoder_hidden_states: torch.Tensor,
|
| 593 |
+
timestep: torch.LongTensor,
|
| 594 |
+
encoder_attention_mask: torch.Tensor,
|
| 595 |
+
output_features=False,
|
| 596 |
+
output_features_stride=8,
|
| 597 |
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 598 |
+
return_dict: bool = False,
|
| 599 |
+
guidance=None,
|
| 600 |
+
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 601 |
+
if guidance is None:
|
| 602 |
+
guidance = torch.tensor([6016.0],
|
| 603 |
+
device=hidden_states.device,
|
| 604 |
+
dtype=torch.bfloat16)
|
| 605 |
+
img = x = hidden_states
|
| 606 |
+
text_mask = encoder_attention_mask
|
| 607 |
+
t = timestep
|
| 608 |
+
txt = encoder_hidden_states[:, 1:]
|
| 609 |
+
text_states_2 = encoder_hidden_states[:, 0, :self.config.
|
| 610 |
+
text_states_dim_2]
|
| 611 |
+
_, _, ot, oh, ow = x.shape # codespell:ignore
|
| 612 |
+
tt, th, tw = (
|
| 613 |
+
ot // self.patch_size[0], # codespell:ignore
|
| 614 |
+
oh // self.patch_size[1], # codespell:ignore
|
| 615 |
+
ow // self.patch_size[2], # codespell:ignore
|
| 616 |
+
)
|
| 617 |
+
original_tt = nccl_info.sp_size * tt
|
| 618 |
+
freqs_cos, freqs_sin = self.get_rotary_pos_embed((original_tt, th, tw))
|
| 619 |
+
# Prepare modulation vectors.
|
| 620 |
+
vec = self.time_in(t)
|
| 621 |
+
|
| 622 |
+
# text modulation
|
| 623 |
+
vec = vec + self.vector_in(text_states_2)
|
| 624 |
+
|
| 625 |
+
# guidance modulation
|
| 626 |
+
if self.guidance_embed:
|
| 627 |
+
if guidance is None:
|
| 628 |
+
raise ValueError(
|
| 629 |
+
"Didn't get guidance strength for guidance distilled model."
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
# our timestep_embedding is merged into guidance_in(TimestepEmbedder)
|
| 633 |
+
vec = vec + self.guidance_in(guidance)
|
| 634 |
+
|
| 635 |
+
# Embed image and text.
|
| 636 |
+
img = self.img_in(img)
|
| 637 |
+
if self.text_projection == "linear":
|
| 638 |
+
txt = self.txt_in(txt)
|
| 639 |
+
elif self.text_projection == "single_refiner":
|
| 640 |
+
txt = self.txt_in(txt, t,
|
| 641 |
+
text_mask if self.use_attention_mask else None)
|
| 642 |
+
else:
|
| 643 |
+
raise NotImplementedError(
|
| 644 |
+
f"Unsupported text_projection: {self.text_projection}")
|
| 645 |
+
|
| 646 |
+
txt_seq_len = txt.shape[1]
|
| 647 |
+
img_seq_len = img.shape[1]
|
| 648 |
+
|
| 649 |
+
freqs_cis = (freqs_cos, freqs_sin) if freqs_cos is not None else None
|
| 650 |
+
# --------------------- Pass through DiT blocks ------------------------
|
| 651 |
+
for _, block in enumerate(self.double_blocks):
|
| 652 |
+
double_block_args = [img, txt, vec, freqs_cis, text_mask]
|
| 653 |
+
|
| 654 |
+
img, txt = block(*double_block_args)
|
| 655 |
+
|
| 656 |
+
# Merge txt and img to pass through single stream blocks.
|
| 657 |
+
x = torch.cat((img, txt), 1)
|
| 658 |
+
if output_features:
|
| 659 |
+
features_list = []
|
| 660 |
+
if len(self.single_blocks) > 0:
|
| 661 |
+
for _, block in enumerate(self.single_blocks):
|
| 662 |
+
single_block_args = [
|
| 663 |
+
x,
|
| 664 |
+
vec,
|
| 665 |
+
txt_seq_len,
|
| 666 |
+
(freqs_cos, freqs_sin),
|
| 667 |
+
text_mask,
|
| 668 |
+
]
|
| 669 |
+
|
| 670 |
+
x = block(*single_block_args)
|
| 671 |
+
if output_features and _ % output_features_stride == 0:
|
| 672 |
+
features_list.append(x[:, :img_seq_len, ...])
|
| 673 |
+
|
| 674 |
+
img = x[:, :img_seq_len, ...]
|
| 675 |
+
|
| 676 |
+
# ---------------------------- Final layer ------------------------------
|
| 677 |
+
img = self.final_layer(img,
|
| 678 |
+
vec) # (N, T, patch_size ** 2 * out_channels)
|
| 679 |
+
|
| 680 |
+
img = self.unpatchify(img, tt, th, tw)
|
| 681 |
+
assert not return_dict, "return_dict is not supported."
|
| 682 |
+
if output_features:
|
| 683 |
+
features_list = torch.stack(features_list, dim=0)
|
| 684 |
+
else:
|
| 685 |
+
features_list = None
|
| 686 |
+
return (img, features_list)
|
| 687 |
+
|
| 688 |
+
def unpatchify(self, x, t, h, w):
|
| 689 |
+
"""
|
| 690 |
+
x: (N, T, patch_size**2 * C)
|
| 691 |
+
imgs: (N, H, W, C)
|
| 692 |
+
"""
|
| 693 |
+
c = self.unpatchify_channels
|
| 694 |
+
pt, ph, pw = self.patch_size
|
| 695 |
+
assert t * h * w == x.shape[1]
|
| 696 |
+
|
| 697 |
+
x = x.reshape(shape=(x.shape[0], t, h, w, c, pt, ph, pw))
|
| 698 |
+
x = torch.einsum("nthwcopq->nctohpwq", x)
|
| 699 |
+
imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw))
|
| 700 |
+
|
| 701 |
+
return imgs
|
| 702 |
+
|
| 703 |
+
def params_count(self):
|
| 704 |
+
counts = {
|
| 705 |
+
"double":
|
| 706 |
+
sum([
|
| 707 |
+
sum(p.numel() for p in block.img_attn_qkv.parameters()) +
|
| 708 |
+
sum(p.numel() for p in block.img_attn_proj.parameters()) +
|
| 709 |
+
sum(p.numel() for p in block.img_mlp.parameters()) +
|
| 710 |
+
sum(p.numel() for p in block.txt_attn_qkv.parameters()) +
|
| 711 |
+
sum(p.numel() for p in block.txt_attn_proj.parameters()) +
|
| 712 |
+
sum(p.numel() for p in block.txt_mlp.parameters())
|
| 713 |
+
for block in self.double_blocks
|
| 714 |
+
]),
|
| 715 |
+
"single":
|
| 716 |
+
sum([
|
| 717 |
+
sum(p.numel() for p in block.linear1.parameters()) +
|
| 718 |
+
sum(p.numel() for p in block.linear2.parameters())
|
| 719 |
+
for block in self.single_blocks
|
| 720 |
+
]),
|
| 721 |
+
"total":
|
| 722 |
+
sum(p.numel() for p in self.parameters()),
|
| 723 |
+
}
|
| 724 |
+
counts["attn+mlp"] = counts["double"] + counts["single"]
|
| 725 |
+
return counts
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
#################################################################################
|
| 729 |
+
# HunyuanVideo Configs #
|
| 730 |
+
#################################################################################
|
| 731 |
+
|
| 732 |
+
HUNYUAN_VIDEO_CONFIG = {
|
| 733 |
+
"HYVideo-T/2": {
|
| 734 |
+
"mm_double_blocks_depth": 20,
|
| 735 |
+
"mm_single_blocks_depth": 40,
|
| 736 |
+
"rope_dim_list": [16, 56, 56],
|
| 737 |
+
"hidden_size": 3072,
|
| 738 |
+
"heads_num": 24,
|
| 739 |
+
"mlp_width_ratio": 4,
|
| 740 |
+
},
|
| 741 |
+
"HYVideo-T/2-cfgdistill": {
|
| 742 |
+
"mm_double_blocks_depth": 20,
|
| 743 |
+
"mm_single_blocks_depth": 40,
|
| 744 |
+
"rope_dim_list": [16, 56, 56],
|
| 745 |
+
"hidden_size": 3072,
|
| 746 |
+
"heads_num": 24,
|
| 747 |
+
"mlp_width_ratio": 4,
|
| 748 |
+
"guidance_embed": True,
|
| 749 |
+
},
|
| 750 |
+
}
|
fastvideo/models/hunyuan/modules/modulate_layers.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ModulateDiT(nn.Module):
|
| 8 |
+
"""Modulation layer for DiT."""
|
| 9 |
+
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
hidden_size: int,
|
| 13 |
+
factor: int,
|
| 14 |
+
act_layer: Callable,
|
| 15 |
+
dtype=None,
|
| 16 |
+
device=None,
|
| 17 |
+
):
|
| 18 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 19 |
+
super().__init__()
|
| 20 |
+
self.act = act_layer()
|
| 21 |
+
self.linear = nn.Linear(hidden_size,
|
| 22 |
+
factor * hidden_size,
|
| 23 |
+
bias=True,
|
| 24 |
+
**factory_kwargs)
|
| 25 |
+
# Zero-initialize the modulation
|
| 26 |
+
nn.init.zeros_(self.linear.weight)
|
| 27 |
+
nn.init.zeros_(self.linear.bias)
|
| 28 |
+
|
| 29 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 30 |
+
return self.linear(self.act(x))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def modulate(x, shift=None, scale=None):
|
| 34 |
+
"""modulate by shift and scale
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
x (torch.Tensor): input tensor.
|
| 38 |
+
shift (torch.Tensor, optional): shift tensor. Defaults to None.
|
| 39 |
+
scale (torch.Tensor, optional): scale tensor. Defaults to None.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
torch.Tensor: the output tensor after modulate.
|
| 43 |
+
"""
|
| 44 |
+
if scale is None and shift is None:
|
| 45 |
+
return x
|
| 46 |
+
elif shift is None:
|
| 47 |
+
return x * (1 + scale.unsqueeze(1))
|
| 48 |
+
elif scale is None:
|
| 49 |
+
return x + shift.unsqueeze(1)
|
| 50 |
+
else:
|
| 51 |
+
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def apply_gate(x, gate=None, tanh=False):
|
| 55 |
+
"""AI is creating summary for apply_gate
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
x (torch.Tensor): input tensor.
|
| 59 |
+
gate (torch.Tensor, optional): gate tensor. Defaults to None.
|
| 60 |
+
tanh (bool, optional): whether to use tanh function. Defaults to False.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
torch.Tensor: the output tensor after apply gate.
|
| 64 |
+
"""
|
| 65 |
+
if gate is None:
|
| 66 |
+
return x
|
| 67 |
+
if tanh:
|
| 68 |
+
return x * gate.unsqueeze(1).tanh()
|
| 69 |
+
else:
|
| 70 |
+
return x * gate.unsqueeze(1)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def ckpt_wrapper(module):
|
| 74 |
+
|
| 75 |
+
def ckpt_forward(*inputs):
|
| 76 |
+
outputs = module(*inputs)
|
| 77 |
+
return outputs
|
| 78 |
+
|
| 79 |
+
return ckpt_forward
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class RMSNorm(nn.Module):
|
| 83 |
+
|
| 84 |
+
def __init__(
|
| 85 |
+
self,
|
| 86 |
+
dim: int,
|
| 87 |
+
elementwise_affine=True,
|
| 88 |
+
eps: float = 1e-6,
|
| 89 |
+
device=None,
|
| 90 |
+
dtype=None,
|
| 91 |
+
):
|
| 92 |
+
"""
|
| 93 |
+
Initialize the RMSNorm normalization layer.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
dim (int): The dimension of the input tensor.
|
| 97 |
+
eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
|
| 98 |
+
|
| 99 |
+
Attributes:
|
| 100 |
+
eps (float): A small value added to the denominator for numerical stability.
|
| 101 |
+
weight (nn.Parameter): Learnable scaling parameter.
|
| 102 |
+
|
| 103 |
+
"""
|
| 104 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 105 |
+
super().__init__()
|
| 106 |
+
self.eps = eps
|
| 107 |
+
if elementwise_affine:
|
| 108 |
+
self.weight = nn.Parameter(torch.ones(dim, **factory_kwargs))
|
| 109 |
+
|
| 110 |
+
def _norm(self, x):
|
| 111 |
+
"""
|
| 112 |
+
Apply the RMSNorm normalization to the input tensor.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
x (torch.Tensor): The input tensor.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
torch.Tensor: The normalized tensor.
|
| 119 |
+
|
| 120 |
+
"""
|
| 121 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 122 |
+
|
| 123 |
+
def forward(self, x):
|
| 124 |
+
"""
|
| 125 |
+
Forward pass through the RMSNorm layer.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
x (torch.Tensor): The input tensor.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
torch.Tensor: The output tensor after applying RMSNorm.
|
| 132 |
+
|
| 133 |
+
"""
|
| 134 |
+
output = self._norm(x.float()).type_as(x)
|
| 135 |
+
if hasattr(self, "weight"):
|
| 136 |
+
output = output * self.weight
|
| 137 |
+
return output
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def get_norm_layer(norm_layer):
|
| 141 |
+
"""
|
| 142 |
+
Get the normalization layer.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
norm_layer (str): The type of normalization layer.
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
norm_layer (nn.Module): The normalization layer.
|
| 149 |
+
"""
|
| 150 |
+
if norm_layer == "layer":
|
| 151 |
+
return nn.LayerNorm
|
| 152 |
+
elif norm_layer == "rms":
|
| 153 |
+
return RMSNorm
|
| 154 |
+
else:
|
| 155 |
+
raise NotImplementedError(
|
| 156 |
+
f"Norm layer {norm_layer} is not implemented")
|
fastvideo/models/hunyuan/modules/norm_layers.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class RMSNorm(nn.Module):
|
| 6 |
+
|
| 7 |
+
def __init__(
|
| 8 |
+
self,
|
| 9 |
+
dim: int,
|
| 10 |
+
elementwise_affine=True,
|
| 11 |
+
eps: float = 1e-6,
|
| 12 |
+
device=None,
|
| 13 |
+
dtype=None,
|
| 14 |
+
):
|
| 15 |
+
"""
|
| 16 |
+
Initialize the RMSNorm normalization layer.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
dim (int): The dimension of the input tensor.
|
| 20 |
+
eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
|
| 21 |
+
|
| 22 |
+
Attributes:
|
| 23 |
+
eps (float): A small value added to the denominator for numerical stability.
|
| 24 |
+
weight (nn.Parameter): Learnable scaling parameter.
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.eps = eps
|
| 30 |
+
if elementwise_affine:
|
| 31 |
+
self.weight = nn.Parameter(torch.ones(dim, **factory_kwargs))
|
| 32 |
+
|
| 33 |
+
def _norm(self, x):
|
| 34 |
+
"""
|
| 35 |
+
Apply the RMSNorm normalization to the input tensor.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
x (torch.Tensor): The input tensor.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
torch.Tensor: The normalized tensor.
|
| 42 |
+
|
| 43 |
+
"""
|
| 44 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
"""
|
| 48 |
+
Forward pass through the RMSNorm layer.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
x (torch.Tensor): The input tensor.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
torch.Tensor: The output tensor after applying RMSNorm.
|
| 55 |
+
|
| 56 |
+
"""
|
| 57 |
+
output = self._norm(x.float()).type_as(x)
|
| 58 |
+
if hasattr(self, "weight"):
|
| 59 |
+
output = output * self.weight
|
| 60 |
+
return output
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def get_norm_layer(norm_layer):
|
| 64 |
+
"""
|
| 65 |
+
Get the normalization layer.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
norm_layer (str): The type of normalization layer.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
norm_layer (nn.Module): The normalization layer.
|
| 72 |
+
"""
|
| 73 |
+
if norm_layer == "layer":
|
| 74 |
+
return nn.LayerNorm
|
| 75 |
+
elif norm_layer == "rms":
|
| 76 |
+
return RMSNorm
|
| 77 |
+
else:
|
| 78 |
+
raise NotImplementedError(
|
| 79 |
+
f"Norm layer {norm_layer} is not implemented")
|
fastvideo/models/hunyuan/modules/posemb_layers.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _to_tuple(x, dim=2):
|
| 7 |
+
if isinstance(x, int):
|
| 8 |
+
return (x, ) * dim
|
| 9 |
+
elif len(x) == dim:
|
| 10 |
+
return x
|
| 11 |
+
else:
|
| 12 |
+
raise ValueError(f"Expected length {dim} or int, but got {x}")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_meshgrid_nd(start, *args, dim=2):
|
| 16 |
+
"""
|
| 17 |
+
Get n-D meshgrid with start, stop and num.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
start (int or tuple): If len(args) == 0, start is num; If len(args) == 1, start is start, args[0] is stop,
|
| 21 |
+
step is 1; If len(args) == 2, start is start, args[0] is stop, args[1] is num. For n-dim, start/stop/num
|
| 22 |
+
should be int or n-tuple. If n-tuple is provided, the meshgrid will be stacked following the dim order in
|
| 23 |
+
n-tuples.
|
| 24 |
+
*args: See above.
|
| 25 |
+
dim (int): Dimension of the meshgrid. Defaults to 2.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
grid (np.ndarray): [dim, ...]
|
| 29 |
+
"""
|
| 30 |
+
if len(args) == 0:
|
| 31 |
+
# start is grid_size
|
| 32 |
+
num = _to_tuple(start, dim=dim)
|
| 33 |
+
start = (0, ) * dim
|
| 34 |
+
stop = num
|
| 35 |
+
elif len(args) == 1:
|
| 36 |
+
# start is start, args[0] is stop, step is 1
|
| 37 |
+
start = _to_tuple(start, dim=dim)
|
| 38 |
+
stop = _to_tuple(args[0], dim=dim)
|
| 39 |
+
num = [stop[i] - start[i] for i in range(dim)]
|
| 40 |
+
elif len(args) == 2:
|
| 41 |
+
# start is start, args[0] is stop, args[1] is num
|
| 42 |
+
start = _to_tuple(start, dim=dim) # Left-Top eg: 12,0
|
| 43 |
+
stop = _to_tuple(args[0], dim=dim) # Right-Bottom eg: 20,32
|
| 44 |
+
num = _to_tuple(args[1], dim=dim) # Target Size eg: 32,124
|
| 45 |
+
else:
|
| 46 |
+
raise ValueError(f"len(args) should be 0, 1 or 2, but got {len(args)}")
|
| 47 |
+
|
| 48 |
+
# PyTorch implement of np.linspace(start[i], stop[i], num[i], endpoint=False)
|
| 49 |
+
axis_grid = []
|
| 50 |
+
for i in range(dim):
|
| 51 |
+
a, b, n = start[i], stop[i], num[i]
|
| 52 |
+
g = torch.linspace(a, b, n + 1, dtype=torch.float32)[:n]
|
| 53 |
+
axis_grid.append(g)
|
| 54 |
+
grid = torch.meshgrid(*axis_grid, indexing="ij") # dim x [W, H, D]
|
| 55 |
+
grid = torch.stack(grid, dim=0) # [dim, W, H, D]
|
| 56 |
+
|
| 57 |
+
return grid
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
#################################################################################
|
| 61 |
+
# Rotary Positional Embedding Functions #
|
| 62 |
+
#################################################################################
|
| 63 |
+
# https://github.com/meta-llama/llama/blob/be327c427cc5e89cc1d3ab3d3fec4484df771245/llama/model.py#L80
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def reshape_for_broadcast(
|
| 67 |
+
freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]],
|
| 68 |
+
x: torch.Tensor,
|
| 69 |
+
head_first=False,
|
| 70 |
+
):
|
| 71 |
+
"""
|
| 72 |
+
Reshape frequency tensor for broadcasting it with another tensor.
|
| 73 |
+
|
| 74 |
+
This function reshapes the frequency tensor to have the same shape as the target tensor 'x'
|
| 75 |
+
for the purpose of broadcasting the frequency tensor during element-wise operations.
|
| 76 |
+
|
| 77 |
+
Notes:
|
| 78 |
+
When using FlashMHAModified, head_first should be False.
|
| 79 |
+
When using Attention, head_first should be True.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
freqs_cis (Union[torch.Tensor, Tuple[torch.Tensor]]): Frequency tensor to be reshaped.
|
| 83 |
+
x (torch.Tensor): Target tensor for broadcasting compatibility.
|
| 84 |
+
head_first (bool): head dimension first (except batch dim) or not.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
torch.Tensor: Reshaped frequency tensor.
|
| 88 |
+
|
| 89 |
+
Raises:
|
| 90 |
+
AssertionError: If the frequency tensor doesn't match the expected shape.
|
| 91 |
+
AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions.
|
| 92 |
+
"""
|
| 93 |
+
ndim = x.ndim
|
| 94 |
+
assert 0 <= 1 < ndim
|
| 95 |
+
|
| 96 |
+
if isinstance(freqs_cis, tuple):
|
| 97 |
+
# freqs_cis: (cos, sin) in real space
|
| 98 |
+
if head_first:
|
| 99 |
+
assert freqs_cis[0].shape == (
|
| 100 |
+
x.shape[-2],
|
| 101 |
+
x.shape[-1],
|
| 102 |
+
), f"freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}"
|
| 103 |
+
shape = [
|
| 104 |
+
d if i == ndim - 2 or i == ndim - 1 else 1
|
| 105 |
+
for i, d in enumerate(x.shape)
|
| 106 |
+
]
|
| 107 |
+
else:
|
| 108 |
+
assert freqs_cis[0].shape == (
|
| 109 |
+
x.shape[1],
|
| 110 |
+
x.shape[-1],
|
| 111 |
+
), f"freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}"
|
| 112 |
+
shape = [
|
| 113 |
+
d if i == 1 or i == ndim - 1 else 1
|
| 114 |
+
for i, d in enumerate(x.shape)
|
| 115 |
+
]
|
| 116 |
+
return freqs_cis[0].view(*shape), freqs_cis[1].view(*shape)
|
| 117 |
+
else:
|
| 118 |
+
# freqs_cis: values in complex space
|
| 119 |
+
if head_first:
|
| 120 |
+
assert freqs_cis.shape == (
|
| 121 |
+
x.shape[-2],
|
| 122 |
+
x.shape[-1],
|
| 123 |
+
), f"freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}"
|
| 124 |
+
shape = [
|
| 125 |
+
d if i == ndim - 2 or i == ndim - 1 else 1
|
| 126 |
+
for i, d in enumerate(x.shape)
|
| 127 |
+
]
|
| 128 |
+
else:
|
| 129 |
+
assert freqs_cis.shape == (
|
| 130 |
+
x.shape[1],
|
| 131 |
+
x.shape[-1],
|
| 132 |
+
), f"freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}"
|
| 133 |
+
shape = [
|
| 134 |
+
d if i == 1 or i == ndim - 1 else 1
|
| 135 |
+
for i, d in enumerate(x.shape)
|
| 136 |
+
]
|
| 137 |
+
return freqs_cis.view(*shape)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def rotate_half(x):
|
| 141 |
+
x_real, x_imag = (x.float().reshape(*x.shape[:-1], -1,
|
| 142 |
+
2).unbind(-1)) # [B, S, H, D//2]
|
| 143 |
+
return torch.stack([-x_imag, x_real], dim=-1).flatten(3)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def apply_rotary_emb(
|
| 147 |
+
xq: torch.Tensor,
|
| 148 |
+
xk: torch.Tensor,
|
| 149 |
+
freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
| 150 |
+
head_first: bool = False,
|
| 151 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 152 |
+
"""
|
| 153 |
+
Apply rotary embeddings to input tensors using the given frequency tensor.
|
| 154 |
+
|
| 155 |
+
This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided
|
| 156 |
+
frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor
|
| 157 |
+
is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are
|
| 158 |
+
returned as real tensors.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
xq (torch.Tensor): Query tensor to apply rotary embeddings. [B, S, H, D]
|
| 162 |
+
xk (torch.Tensor): Key tensor to apply rotary embeddings. [B, S, H, D]
|
| 163 |
+
freqs_cis (torch.Tensor or tuple): Precomputed frequency tensor for complex exponential.
|
| 164 |
+
head_first (bool): head dimension first (except batch dim) or not.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.
|
| 168 |
+
|
| 169 |
+
"""
|
| 170 |
+
xk_out = None
|
| 171 |
+
if isinstance(freqs_cis, tuple):
|
| 172 |
+
cos, sin = reshape_for_broadcast(freqs_cis, xq, head_first) # [S, D]
|
| 173 |
+
cos, sin = cos.to(xq.device), sin.to(xq.device)
|
| 174 |
+
# real * cos - imag * sin
|
| 175 |
+
# imag * cos + real * sin
|
| 176 |
+
xq_out = (xq.float() * cos + rotate_half(xq.float()) * sin).type_as(xq)
|
| 177 |
+
xk_out = (xk.float() * cos + rotate_half(xk.float()) * sin).type_as(xk)
|
| 178 |
+
else:
|
| 179 |
+
# view_as_complex will pack [..., D/2, 2](real) to [..., D/2](complex)
|
| 180 |
+
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1,
|
| 181 |
+
2)) # [B, S, H, D//2]
|
| 182 |
+
freqs_cis = reshape_for_broadcast(freqs_cis, xq_, head_first).to(
|
| 183 |
+
xq.device) # [S, D//2] --> [1, S, 1, D//2]
|
| 184 |
+
# (real, imag) * (cos, sin) = (real * cos - imag * sin, imag * cos + real * sin)
|
| 185 |
+
# view_as_real will expand [..., D/2](complex) to [..., D/2, 2](real)
|
| 186 |
+
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3).type_as(xq)
|
| 187 |
+
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1,
|
| 188 |
+
2)) # [B, S, H, D//2]
|
| 189 |
+
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3).type_as(xk)
|
| 190 |
+
|
| 191 |
+
return xq_out, xk_out
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def get_nd_rotary_pos_embed(
|
| 195 |
+
rope_dim_list,
|
| 196 |
+
start,
|
| 197 |
+
*args,
|
| 198 |
+
theta=10000.0,
|
| 199 |
+
use_real=False,
|
| 200 |
+
theta_rescale_factor: Union[float, List[float]] = 1.0,
|
| 201 |
+
interpolation_factor: Union[float, List[float]] = 1.0,
|
| 202 |
+
):
|
| 203 |
+
"""
|
| 204 |
+
This is a n-d version of precompute_freqs_cis, which is a RoPE for tokens with n-d structure.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
rope_dim_list (list of int): Dimension of each rope. len(rope_dim_list) should equal to n.
|
| 208 |
+
sum(rope_dim_list) should equal to head_dim of attention layer.
|
| 209 |
+
start (int | tuple of int | list of int): If len(args) == 0, start is num; If len(args) == 1, start is start,
|
| 210 |
+
args[0] is stop, step is 1; If len(args) == 2, start is start, args[0] is stop, args[1] is num.
|
| 211 |
+
*args: See above.
|
| 212 |
+
theta (float): Scaling factor for frequency computation. Defaults to 10000.0.
|
| 213 |
+
use_real (bool): If True, return real part and imaginary part separately. Otherwise, return complex numbers.
|
| 214 |
+
Some libraries such as TensorRT does not support complex64 data type. So it is useful to provide a real
|
| 215 |
+
part and an imaginary part separately.
|
| 216 |
+
theta_rescale_factor (float): Rescale factor for theta. Defaults to 1.0.
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
pos_embed (torch.Tensor): [HW, D/2]
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
grid = get_meshgrid_nd(start, *args,
|
| 223 |
+
dim=len(rope_dim_list)) # [3, W, H, D] / [2, W, H]
|
| 224 |
+
|
| 225 |
+
if isinstance(theta_rescale_factor, int) or isinstance(
|
| 226 |
+
theta_rescale_factor, float):
|
| 227 |
+
theta_rescale_factor = [theta_rescale_factor] * len(rope_dim_list)
|
| 228 |
+
elif isinstance(theta_rescale_factor,
|
| 229 |
+
list) and len(theta_rescale_factor) == 1:
|
| 230 |
+
theta_rescale_factor = [theta_rescale_factor[0]] * len(rope_dim_list)
|
| 231 |
+
assert len(theta_rescale_factor) == len(
|
| 232 |
+
rope_dim_list
|
| 233 |
+
), "len(theta_rescale_factor) should equal to len(rope_dim_list)"
|
| 234 |
+
|
| 235 |
+
if isinstance(interpolation_factor, int) or isinstance(
|
| 236 |
+
interpolation_factor, float):
|
| 237 |
+
interpolation_factor = [interpolation_factor] * len(rope_dim_list)
|
| 238 |
+
elif isinstance(interpolation_factor,
|
| 239 |
+
list) and len(interpolation_factor) == 1:
|
| 240 |
+
interpolation_factor = [interpolation_factor[0]] * len(rope_dim_list)
|
| 241 |
+
assert len(interpolation_factor) == len(
|
| 242 |
+
rope_dim_list
|
| 243 |
+
), "len(interpolation_factor) should equal to len(rope_dim_list)"
|
| 244 |
+
|
| 245 |
+
# use 1/ndim of dimensions to encode grid_axis
|
| 246 |
+
embs = []
|
| 247 |
+
for i in range(len(rope_dim_list)):
|
| 248 |
+
emb = get_1d_rotary_pos_embed(
|
| 249 |
+
rope_dim_list[i],
|
| 250 |
+
grid[i].reshape(-1),
|
| 251 |
+
theta,
|
| 252 |
+
use_real=use_real,
|
| 253 |
+
theta_rescale_factor=theta_rescale_factor[i],
|
| 254 |
+
interpolation_factor=interpolation_factor[i],
|
| 255 |
+
) # 2 x [WHD, rope_dim_list[i]]
|
| 256 |
+
embs.append(emb)
|
| 257 |
+
|
| 258 |
+
if use_real:
|
| 259 |
+
cos = torch.cat([emb[0] for emb in embs], dim=1) # (WHD, D/2)
|
| 260 |
+
sin = torch.cat([emb[1] for emb in embs], dim=1) # (WHD, D/2)
|
| 261 |
+
return cos, sin
|
| 262 |
+
else:
|
| 263 |
+
emb = torch.cat(embs, dim=1) # (WHD, D/2)
|
| 264 |
+
return emb
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def get_1d_rotary_pos_embed(
|
| 268 |
+
dim: int,
|
| 269 |
+
pos: Union[torch.FloatTensor, int],
|
| 270 |
+
theta: float = 10000.0,
|
| 271 |
+
use_real: bool = False,
|
| 272 |
+
theta_rescale_factor: float = 1.0,
|
| 273 |
+
interpolation_factor: float = 1.0,
|
| 274 |
+
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
|
| 275 |
+
"""
|
| 276 |
+
Precompute the frequency tensor for complex exponential (cis) with given dimensions.
|
| 277 |
+
(Note: `cis` means `cos + i * sin`, where i is the imaginary unit.)
|
| 278 |
+
|
| 279 |
+
This function calculates a frequency tensor with complex exponential using the given dimension 'dim'
|
| 280 |
+
and the end index 'end'. The 'theta' parameter scales the frequencies.
|
| 281 |
+
The returned tensor contains complex values in complex64 data type.
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
dim (int): Dimension of the frequency tensor.
|
| 285 |
+
pos (int or torch.FloatTensor): Position indices for the frequency tensor. [S] or scalar
|
| 286 |
+
theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0.
|
| 287 |
+
use_real (bool, optional): If True, return real part and imaginary part separately.
|
| 288 |
+
Otherwise, return complex numbers.
|
| 289 |
+
theta_rescale_factor (float, optional): Rescale factor for theta. Defaults to 1.0.
|
| 290 |
+
|
| 291 |
+
Returns:
|
| 292 |
+
freqs_cis: Precomputed frequency tensor with complex exponential. [S, D/2]
|
| 293 |
+
freqs_cos, freqs_sin: Precomputed frequency tensor with real and imaginary parts separately. [S, D]
|
| 294 |
+
"""
|
| 295 |
+
if isinstance(pos, int):
|
| 296 |
+
pos = torch.arange(pos).float()
|
| 297 |
+
|
| 298 |
+
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
|
| 299 |
+
# has some connection to NTK literature
|
| 300 |
+
if theta_rescale_factor != 1.0:
|
| 301 |
+
theta *= theta_rescale_factor**(dim / (dim - 2))
|
| 302 |
+
|
| 303 |
+
freqs = 1.0 / (theta**(torch.arange(0, dim, 2)[:(dim // 2)].float() / dim)
|
| 304 |
+
) # [D/2]
|
| 305 |
+
# assert interpolation_factor == 1.0, f"interpolation_factor: {interpolation_factor}"
|
| 306 |
+
freqs = torch.outer(pos * interpolation_factor, freqs) # [S, D/2]
|
| 307 |
+
if use_real:
|
| 308 |
+
freqs_cos = freqs.cos().repeat_interleave(2, dim=1) # [S, D]
|
| 309 |
+
freqs_sin = freqs.sin().repeat_interleave(2, dim=1) # [S, D]
|
| 310 |
+
return freqs_cos, freqs_sin
|
| 311 |
+
else:
|
| 312 |
+
freqs_cis = torch.polar(torch.ones_like(freqs),
|
| 313 |
+
freqs) # complex64 # [S, D/2]
|
| 314 |
+
return freqs_cis
|
fastvideo/models/hunyuan/modules/token_refiner.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
|
| 7 |
+
from .activation_layers import get_activation_layer
|
| 8 |
+
from .attenion import attention
|
| 9 |
+
from .embed_layers import TextProjection, TimestepEmbedder
|
| 10 |
+
from .mlp_layers import MLP
|
| 11 |
+
from .modulate_layers import apply_gate
|
| 12 |
+
from .norm_layers import get_norm_layer
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class IndividualTokenRefinerBlock(nn.Module):
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
hidden_size,
|
| 20 |
+
heads_num,
|
| 21 |
+
mlp_width_ratio: str = 4.0,
|
| 22 |
+
mlp_drop_rate: float = 0.0,
|
| 23 |
+
act_type: str = "silu",
|
| 24 |
+
qk_norm: bool = False,
|
| 25 |
+
qk_norm_type: str = "layer",
|
| 26 |
+
qkv_bias: bool = True,
|
| 27 |
+
dtype: Optional[torch.dtype] = None,
|
| 28 |
+
device: Optional[torch.device] = None,
|
| 29 |
+
):
|
| 30 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 31 |
+
super().__init__()
|
| 32 |
+
self.heads_num = heads_num
|
| 33 |
+
head_dim = hidden_size // heads_num
|
| 34 |
+
mlp_hidden_dim = int(hidden_size * mlp_width_ratio)
|
| 35 |
+
|
| 36 |
+
self.norm1 = nn.LayerNorm(hidden_size,
|
| 37 |
+
elementwise_affine=True,
|
| 38 |
+
eps=1e-6,
|
| 39 |
+
**factory_kwargs)
|
| 40 |
+
self.self_attn_qkv = nn.Linear(hidden_size,
|
| 41 |
+
hidden_size * 3,
|
| 42 |
+
bias=qkv_bias,
|
| 43 |
+
**factory_kwargs)
|
| 44 |
+
qk_norm_layer = get_norm_layer(qk_norm_type)
|
| 45 |
+
self.self_attn_q_norm = (qk_norm_layer(
|
| 46 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 47 |
+
if qk_norm else nn.Identity())
|
| 48 |
+
self.self_attn_k_norm = (qk_norm_layer(
|
| 49 |
+
head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 50 |
+
if qk_norm else nn.Identity())
|
| 51 |
+
self.self_attn_proj = nn.Linear(hidden_size,
|
| 52 |
+
hidden_size,
|
| 53 |
+
bias=qkv_bias,
|
| 54 |
+
**factory_kwargs)
|
| 55 |
+
|
| 56 |
+
self.norm2 = nn.LayerNorm(hidden_size,
|
| 57 |
+
elementwise_affine=True,
|
| 58 |
+
eps=1e-6,
|
| 59 |
+
**factory_kwargs)
|
| 60 |
+
act_layer = get_activation_layer(act_type)
|
| 61 |
+
self.mlp = MLP(
|
| 62 |
+
in_channels=hidden_size,
|
| 63 |
+
hidden_channels=mlp_hidden_dim,
|
| 64 |
+
act_layer=act_layer,
|
| 65 |
+
drop=mlp_drop_rate,
|
| 66 |
+
**factory_kwargs,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
self.adaLN_modulation = nn.Sequential(
|
| 70 |
+
act_layer(),
|
| 71 |
+
nn.Linear(hidden_size,
|
| 72 |
+
2 * hidden_size,
|
| 73 |
+
bias=True,
|
| 74 |
+
**factory_kwargs),
|
| 75 |
+
)
|
| 76 |
+
# Zero-initialize the modulation
|
| 77 |
+
nn.init.zeros_(self.adaLN_modulation[1].weight)
|
| 78 |
+
nn.init.zeros_(self.adaLN_modulation[1].bias)
|
| 79 |
+
|
| 80 |
+
def forward(
|
| 81 |
+
self,
|
| 82 |
+
x: torch.Tensor,
|
| 83 |
+
c: torch.
|
| 84 |
+
Tensor, # timestep_aware_representations + context_aware_representations
|
| 85 |
+
attn_mask: torch.Tensor = None,
|
| 86 |
+
):
|
| 87 |
+
gate_msa, gate_mlp = self.adaLN_modulation(c).chunk(2, dim=1)
|
| 88 |
+
|
| 89 |
+
norm_x = self.norm1(x)
|
| 90 |
+
qkv = self.self_attn_qkv(norm_x)
|
| 91 |
+
q, k, v = rearrange(qkv,
|
| 92 |
+
"B L (K H D) -> K B L H D",
|
| 93 |
+
K=3,
|
| 94 |
+
H=self.heads_num)
|
| 95 |
+
# Apply QK-Norm if needed
|
| 96 |
+
q = self.self_attn_q_norm(q).to(v)
|
| 97 |
+
k = self.self_attn_k_norm(k).to(v)
|
| 98 |
+
|
| 99 |
+
# Self-Attention
|
| 100 |
+
attn = attention(q, k, v, attn_mask=attn_mask)
|
| 101 |
+
|
| 102 |
+
x = x + apply_gate(self.self_attn_proj(attn), gate_msa)
|
| 103 |
+
|
| 104 |
+
# FFN Layer
|
| 105 |
+
x = x + apply_gate(self.mlp(self.norm2(x)), gate_mlp)
|
| 106 |
+
|
| 107 |
+
return x
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class IndividualTokenRefiner(nn.Module):
|
| 111 |
+
|
| 112 |
+
def __init__(
|
| 113 |
+
self,
|
| 114 |
+
hidden_size,
|
| 115 |
+
heads_num,
|
| 116 |
+
depth,
|
| 117 |
+
mlp_width_ratio: float = 4.0,
|
| 118 |
+
mlp_drop_rate: float = 0.0,
|
| 119 |
+
act_type: str = "silu",
|
| 120 |
+
qk_norm: bool = False,
|
| 121 |
+
qk_norm_type: str = "layer",
|
| 122 |
+
qkv_bias: bool = True,
|
| 123 |
+
dtype: Optional[torch.dtype] = None,
|
| 124 |
+
device: Optional[torch.device] = None,
|
| 125 |
+
):
|
| 126 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 127 |
+
super().__init__()
|
| 128 |
+
self.blocks = nn.ModuleList([
|
| 129 |
+
IndividualTokenRefinerBlock(
|
| 130 |
+
hidden_size=hidden_size,
|
| 131 |
+
heads_num=heads_num,
|
| 132 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 133 |
+
mlp_drop_rate=mlp_drop_rate,
|
| 134 |
+
act_type=act_type,
|
| 135 |
+
qk_norm=qk_norm,
|
| 136 |
+
qk_norm_type=qk_norm_type,
|
| 137 |
+
qkv_bias=qkv_bias,
|
| 138 |
+
**factory_kwargs,
|
| 139 |
+
) for _ in range(depth)
|
| 140 |
+
])
|
| 141 |
+
|
| 142 |
+
def forward(
|
| 143 |
+
self,
|
| 144 |
+
x: torch.Tensor,
|
| 145 |
+
c: torch.LongTensor,
|
| 146 |
+
mask: Optional[torch.Tensor] = None,
|
| 147 |
+
):
|
| 148 |
+
mask = mask.clone().bool()
|
| 149 |
+
# avoid attention weight become NaN
|
| 150 |
+
mask[:, 0] = True
|
| 151 |
+
for block in self.blocks:
|
| 152 |
+
x = block(x, c, mask)
|
| 153 |
+
return x
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class SingleTokenRefiner(nn.Module):
|
| 157 |
+
"""
|
| 158 |
+
A single token refiner block for llm text embedding refine.
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
def __init__(
|
| 162 |
+
self,
|
| 163 |
+
in_channels,
|
| 164 |
+
hidden_size,
|
| 165 |
+
heads_num,
|
| 166 |
+
depth,
|
| 167 |
+
mlp_width_ratio: float = 4.0,
|
| 168 |
+
mlp_drop_rate: float = 0.0,
|
| 169 |
+
act_type: str = "silu",
|
| 170 |
+
qk_norm: bool = False,
|
| 171 |
+
qk_norm_type: str = "layer",
|
| 172 |
+
qkv_bias: bool = True,
|
| 173 |
+
attn_mode: str = "torch",
|
| 174 |
+
dtype: Optional[torch.dtype] = None,
|
| 175 |
+
device: Optional[torch.device] = None,
|
| 176 |
+
):
|
| 177 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 178 |
+
super().__init__()
|
| 179 |
+
self.attn_mode = attn_mode
|
| 180 |
+
assert self.attn_mode == "torch", "Only support 'torch' mode for token refiner."
|
| 181 |
+
|
| 182 |
+
self.input_embedder = nn.Linear(in_channels,
|
| 183 |
+
hidden_size,
|
| 184 |
+
bias=True,
|
| 185 |
+
**factory_kwargs)
|
| 186 |
+
|
| 187 |
+
act_layer = get_activation_layer(act_type)
|
| 188 |
+
# Build timestep embedding layer
|
| 189 |
+
self.t_embedder = TimestepEmbedder(hidden_size, act_layer,
|
| 190 |
+
**factory_kwargs)
|
| 191 |
+
# Build context embedding layer
|
| 192 |
+
self.c_embedder = TextProjection(in_channels, hidden_size, act_layer,
|
| 193 |
+
**factory_kwargs)
|
| 194 |
+
|
| 195 |
+
self.individual_token_refiner = IndividualTokenRefiner(
|
| 196 |
+
hidden_size=hidden_size,
|
| 197 |
+
heads_num=heads_num,
|
| 198 |
+
depth=depth,
|
| 199 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 200 |
+
mlp_drop_rate=mlp_drop_rate,
|
| 201 |
+
act_type=act_type,
|
| 202 |
+
qk_norm=qk_norm,
|
| 203 |
+
qk_norm_type=qk_norm_type,
|
| 204 |
+
qkv_bias=qkv_bias,
|
| 205 |
+
**factory_kwargs,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
def forward(
|
| 209 |
+
self,
|
| 210 |
+
x: torch.Tensor,
|
| 211 |
+
t: torch.LongTensor,
|
| 212 |
+
mask: Optional[torch.LongTensor] = None,
|
| 213 |
+
):
|
| 214 |
+
timestep_aware_representations = self.t_embedder(t)
|
| 215 |
+
|
| 216 |
+
if mask is None:
|
| 217 |
+
context_aware_representations = x.mean(dim=1)
|
| 218 |
+
else:
|
| 219 |
+
mask_float = mask.float().unsqueeze(-1) # [b, s1, 1]
|
| 220 |
+
context_aware_representations = (x * mask_float).sum(
|
| 221 |
+
dim=1) / mask_float.sum(dim=1)
|
| 222 |
+
context_aware_representations = self.c_embedder(
|
| 223 |
+
context_aware_representations)
|
| 224 |
+
c = timestep_aware_representations + context_aware_representations
|
| 225 |
+
|
| 226 |
+
x = self.input_embedder(x)
|
| 227 |
+
|
| 228 |
+
x = self.individual_token_refiner(x, c, mask)
|
| 229 |
+
|
| 230 |
+
return x
|
fastvideo/models/hunyuan/text_encoder/__init__.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from transformers import AutoModel, AutoTokenizer, CLIPTextModel, CLIPTokenizer
|
| 7 |
+
from transformers.utils import ModelOutput
|
| 8 |
+
|
| 9 |
+
from ..constants import PRECISION_TO_TYPE, TEXT_ENCODER_PATH, TOKENIZER_PATH
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def use_default(value, default):
|
| 13 |
+
return value if value is not None else default
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def load_text_encoder(
|
| 17 |
+
text_encoder_type,
|
| 18 |
+
text_encoder_precision=None,
|
| 19 |
+
text_encoder_path=None,
|
| 20 |
+
logger=None,
|
| 21 |
+
device=None,
|
| 22 |
+
):
|
| 23 |
+
if text_encoder_path is None:
|
| 24 |
+
text_encoder_path = TEXT_ENCODER_PATH[text_encoder_type]
|
| 25 |
+
if logger is not None:
|
| 26 |
+
logger.info(
|
| 27 |
+
f"Loading text encoder model ({text_encoder_type}) from: {text_encoder_path}"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
if text_encoder_type == "clipL":
|
| 31 |
+
text_encoder = CLIPTextModel.from_pretrained(text_encoder_path)
|
| 32 |
+
text_encoder.final_layer_norm = text_encoder.text_model.final_layer_norm
|
| 33 |
+
elif text_encoder_type == "llm":
|
| 34 |
+
text_encoder = AutoModel.from_pretrained(text_encoder_path,
|
| 35 |
+
low_cpu_mem_usage=True)
|
| 36 |
+
text_encoder.final_layer_norm = text_encoder.norm
|
| 37 |
+
else:
|
| 38 |
+
raise ValueError(f"Unsupported text encoder type: {text_encoder_type}")
|
| 39 |
+
# from_pretrained will ensure that the model is in eval mode.
|
| 40 |
+
|
| 41 |
+
if text_encoder_precision is not None:
|
| 42 |
+
text_encoder = text_encoder.to(
|
| 43 |
+
dtype=PRECISION_TO_TYPE[text_encoder_precision])
|
| 44 |
+
|
| 45 |
+
text_encoder.requires_grad_(False)
|
| 46 |
+
|
| 47 |
+
if logger is not None:
|
| 48 |
+
logger.info(f"Text encoder to dtype: {text_encoder.dtype}")
|
| 49 |
+
|
| 50 |
+
if device is not None:
|
| 51 |
+
text_encoder = text_encoder.to(device)
|
| 52 |
+
|
| 53 |
+
return text_encoder, text_encoder_path
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def load_tokenizer(tokenizer_type,
|
| 57 |
+
tokenizer_path=None,
|
| 58 |
+
padding_side="right",
|
| 59 |
+
logger=None):
|
| 60 |
+
if tokenizer_path is None:
|
| 61 |
+
tokenizer_path = TOKENIZER_PATH[tokenizer_type]
|
| 62 |
+
if logger is not None:
|
| 63 |
+
logger.info(
|
| 64 |
+
f"Loading tokenizer ({tokenizer_type}) from: {tokenizer_path}")
|
| 65 |
+
|
| 66 |
+
if tokenizer_type == "clipL":
|
| 67 |
+
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path,
|
| 68 |
+
max_length=77)
|
| 69 |
+
elif tokenizer_type == "llm":
|
| 70 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path,
|
| 71 |
+
padding_side=padding_side)
|
| 72 |
+
else:
|
| 73 |
+
raise ValueError(f"Unsupported tokenizer type: {tokenizer_type}")
|
| 74 |
+
|
| 75 |
+
return tokenizer, tokenizer_path
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@dataclass
|
| 79 |
+
class TextEncoderModelOutput(ModelOutput):
|
| 80 |
+
"""
|
| 81 |
+
Base class for model's outputs that also contains a pooling of the last hidden states.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 85 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 86 |
+
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 87 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
|
| 88 |
+
hidden_states_list (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed):
|
| 89 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 90 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 91 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 92 |
+
text_outputs (`list`, *optional*, returned when `return_texts=True` is passed):
|
| 93 |
+
List of decoded texts.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
hidden_state: torch.FloatTensor = None
|
| 97 |
+
attention_mask: Optional[torch.LongTensor] = None
|
| 98 |
+
hidden_states_list: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 99 |
+
text_outputs: Optional[list] = None
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class TextEncoder(nn.Module):
|
| 103 |
+
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
text_encoder_type: str,
|
| 107 |
+
max_length: int,
|
| 108 |
+
text_encoder_precision: Optional[str] = None,
|
| 109 |
+
text_encoder_path: Optional[str] = None,
|
| 110 |
+
tokenizer_type: Optional[str] = None,
|
| 111 |
+
tokenizer_path: Optional[str] = None,
|
| 112 |
+
output_key: Optional[str] = None,
|
| 113 |
+
use_attention_mask: bool = True,
|
| 114 |
+
input_max_length: Optional[int] = None,
|
| 115 |
+
prompt_template: Optional[dict] = None,
|
| 116 |
+
prompt_template_video: Optional[dict] = None,
|
| 117 |
+
hidden_state_skip_layer: Optional[int] = None,
|
| 118 |
+
apply_final_norm: bool = False,
|
| 119 |
+
reproduce: bool = False,
|
| 120 |
+
logger=None,
|
| 121 |
+
device=None,
|
| 122 |
+
):
|
| 123 |
+
super().__init__()
|
| 124 |
+
self.text_encoder_type = text_encoder_type
|
| 125 |
+
self.max_length = max_length
|
| 126 |
+
self.precision = text_encoder_precision
|
| 127 |
+
self.model_path = text_encoder_path
|
| 128 |
+
self.tokenizer_type = (tokenizer_type if tokenizer_type is not None
|
| 129 |
+
else text_encoder_type)
|
| 130 |
+
self.tokenizer_path = (tokenizer_path if tokenizer_path is not None
|
| 131 |
+
else text_encoder_path)
|
| 132 |
+
self.use_attention_mask = use_attention_mask
|
| 133 |
+
if prompt_template_video is not None:
|
| 134 |
+
assert (use_attention_mask is True
|
| 135 |
+
), "Attention mask is True required when training videos."
|
| 136 |
+
self.input_max_length = (input_max_length if input_max_length
|
| 137 |
+
is not None else max_length)
|
| 138 |
+
self.prompt_template = prompt_template
|
| 139 |
+
self.prompt_template_video = prompt_template_video
|
| 140 |
+
self.hidden_state_skip_layer = hidden_state_skip_layer
|
| 141 |
+
self.apply_final_norm = apply_final_norm
|
| 142 |
+
self.reproduce = reproduce
|
| 143 |
+
self.logger = logger
|
| 144 |
+
|
| 145 |
+
self.use_template = self.prompt_template is not None
|
| 146 |
+
if self.use_template:
|
| 147 |
+
assert (
|
| 148 |
+
isinstance(self.prompt_template, dict)
|
| 149 |
+
and "template" in self.prompt_template
|
| 150 |
+
), f"`prompt_template` must be a dictionary with a key 'template', got {self.prompt_template}"
|
| 151 |
+
assert "{}" in str(self.prompt_template["template"]), (
|
| 152 |
+
"`prompt_template['template']` must contain a placeholder `{}` for the input text, "
|
| 153 |
+
f"got {self.prompt_template['template']}")
|
| 154 |
+
|
| 155 |
+
self.use_video_template = self.prompt_template_video is not None
|
| 156 |
+
if self.use_video_template:
|
| 157 |
+
if self.prompt_template_video is not None:
|
| 158 |
+
assert (
|
| 159 |
+
isinstance(self.prompt_template_video, dict)
|
| 160 |
+
and "template" in self.prompt_template_video
|
| 161 |
+
), f"`prompt_template_video` must be a dictionary with a key 'template', got {self.prompt_template_video}"
|
| 162 |
+
assert "{}" in str(self.prompt_template_video["template"]), (
|
| 163 |
+
"`prompt_template_video['template']` must contain a placeholder `{}` for the input text, "
|
| 164 |
+
f"got {self.prompt_template_video['template']}")
|
| 165 |
+
|
| 166 |
+
if "t5" in text_encoder_type:
|
| 167 |
+
self.output_key = output_key or "last_hidden_state"
|
| 168 |
+
elif "clip" in text_encoder_type:
|
| 169 |
+
self.output_key = output_key or "pooler_output"
|
| 170 |
+
elif "llm" in text_encoder_type or "glm" in text_encoder_type:
|
| 171 |
+
self.output_key = output_key or "last_hidden_state"
|
| 172 |
+
else:
|
| 173 |
+
raise ValueError(
|
| 174 |
+
f"Unsupported text encoder type: {text_encoder_type}")
|
| 175 |
+
|
| 176 |
+
self.model, self.model_path = load_text_encoder(
|
| 177 |
+
text_encoder_type=self.text_encoder_type,
|
| 178 |
+
text_encoder_precision=self.precision,
|
| 179 |
+
text_encoder_path=self.model_path,
|
| 180 |
+
logger=self.logger,
|
| 181 |
+
device=device,
|
| 182 |
+
)
|
| 183 |
+
self.dtype = self.model.dtype
|
| 184 |
+
self.device = self.model.device
|
| 185 |
+
|
| 186 |
+
self.tokenizer, self.tokenizer_path = load_tokenizer(
|
| 187 |
+
tokenizer_type=self.tokenizer_type,
|
| 188 |
+
tokenizer_path=self.tokenizer_path,
|
| 189 |
+
padding_side="right",
|
| 190 |
+
logger=self.logger,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
def __repr__(self):
|
| 194 |
+
return f"{self.text_encoder_type} ({self.precision} - {self.model_path})"
|
| 195 |
+
|
| 196 |
+
@staticmethod
|
| 197 |
+
def apply_text_to_template(text, template, prevent_empty_text=True):
|
| 198 |
+
"""
|
| 199 |
+
Apply text to template.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
text (str): Input text.
|
| 203 |
+
template (str or list): Template string or list of chat conversation.
|
| 204 |
+
prevent_empty_text (bool): If True, we will prevent the user text from being empty
|
| 205 |
+
by adding a space. Defaults to True.
|
| 206 |
+
"""
|
| 207 |
+
if isinstance(template, str):
|
| 208 |
+
# Will send string to tokenizer. Used for llm
|
| 209 |
+
return template.format(text)
|
| 210 |
+
else:
|
| 211 |
+
raise TypeError(f"Unsupported template type: {type(template)}")
|
| 212 |
+
|
| 213 |
+
def text2tokens(self, text, data_type="image"):
|
| 214 |
+
"""
|
| 215 |
+
Tokenize the input text.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
text (str or list): Input text.
|
| 219 |
+
"""
|
| 220 |
+
tokenize_input_type = "str"
|
| 221 |
+
if self.use_template:
|
| 222 |
+
if data_type == "image":
|
| 223 |
+
prompt_template = self.prompt_template["template"]
|
| 224 |
+
elif data_type == "video":
|
| 225 |
+
prompt_template = self.prompt_template_video["template"]
|
| 226 |
+
else:
|
| 227 |
+
raise ValueError(f"Unsupported data type: {data_type}")
|
| 228 |
+
if isinstance(text, (list, tuple)):
|
| 229 |
+
text = [
|
| 230 |
+
self.apply_text_to_template(one_text, prompt_template)
|
| 231 |
+
for one_text in text
|
| 232 |
+
]
|
| 233 |
+
if isinstance(text[0], list):
|
| 234 |
+
tokenize_input_type = "list"
|
| 235 |
+
elif isinstance(text, str):
|
| 236 |
+
text = self.apply_text_to_template(text, prompt_template)
|
| 237 |
+
if isinstance(text, list):
|
| 238 |
+
tokenize_input_type = "list"
|
| 239 |
+
else:
|
| 240 |
+
raise TypeError(f"Unsupported text type: {type(text)}")
|
| 241 |
+
|
| 242 |
+
kwargs = dict(
|
| 243 |
+
truncation=True,
|
| 244 |
+
max_length=self.max_length,
|
| 245 |
+
padding="max_length",
|
| 246 |
+
return_tensors="pt",
|
| 247 |
+
)
|
| 248 |
+
if tokenize_input_type == "str":
|
| 249 |
+
return self.tokenizer(
|
| 250 |
+
text,
|
| 251 |
+
return_length=False,
|
| 252 |
+
return_overflowing_tokens=False,
|
| 253 |
+
return_attention_mask=True,
|
| 254 |
+
**kwargs,
|
| 255 |
+
)
|
| 256 |
+
elif tokenize_input_type == "list":
|
| 257 |
+
return self.tokenizer.apply_chat_template(
|
| 258 |
+
text,
|
| 259 |
+
add_generation_prompt=True,
|
| 260 |
+
tokenize=True,
|
| 261 |
+
return_dict=True,
|
| 262 |
+
**kwargs,
|
| 263 |
+
)
|
| 264 |
+
else:
|
| 265 |
+
raise ValueError(
|
| 266 |
+
f"Unsupported tokenize_input_type: {tokenize_input_type}")
|
| 267 |
+
|
| 268 |
+
def encode(
|
| 269 |
+
self,
|
| 270 |
+
batch_encoding,
|
| 271 |
+
use_attention_mask=None,
|
| 272 |
+
output_hidden_states=False,
|
| 273 |
+
do_sample=None,
|
| 274 |
+
hidden_state_skip_layer=None,
|
| 275 |
+
return_texts=False,
|
| 276 |
+
data_type="image",
|
| 277 |
+
device=None,
|
| 278 |
+
):
|
| 279 |
+
"""
|
| 280 |
+
Args:
|
| 281 |
+
batch_encoding (dict): Batch encoding from tokenizer.
|
| 282 |
+
use_attention_mask (bool): Whether to use attention mask. If None, use self.use_attention_mask.
|
| 283 |
+
Defaults to None.
|
| 284 |
+
output_hidden_states (bool): Whether to output hidden states. If False, return the value of
|
| 285 |
+
self.output_key. If True, return the entire output. If set self.hidden_state_skip_layer,
|
| 286 |
+
output_hidden_states will be set True. Defaults to False.
|
| 287 |
+
do_sample (bool): Whether to sample from the model. Used for Decoder-Only LLMs. Defaults to None.
|
| 288 |
+
When self.produce is False, do_sample is set to True by default.
|
| 289 |
+
hidden_state_skip_layer (int): Number of hidden states to hidden_state_skip_layer. 0 means the last layer.
|
| 290 |
+
If None, self.output_key will be used. Defaults to None.
|
| 291 |
+
return_texts (bool): Whether to return the decoded texts. Defaults to False.
|
| 292 |
+
"""
|
| 293 |
+
device = self.model.device if device is None else device
|
| 294 |
+
use_attention_mask = use_default(use_attention_mask,
|
| 295 |
+
self.use_attention_mask)
|
| 296 |
+
hidden_state_skip_layer = use_default(hidden_state_skip_layer,
|
| 297 |
+
self.hidden_state_skip_layer)
|
| 298 |
+
do_sample = use_default(do_sample, not self.reproduce)
|
| 299 |
+
attention_mask = (batch_encoding["attention_mask"].to(device)
|
| 300 |
+
if use_attention_mask else None)
|
| 301 |
+
outputs = self.model(
|
| 302 |
+
input_ids=batch_encoding["input_ids"].to(device),
|
| 303 |
+
attention_mask=attention_mask,
|
| 304 |
+
output_hidden_states=output_hidden_states
|
| 305 |
+
or hidden_state_skip_layer is not None,
|
| 306 |
+
)
|
| 307 |
+
if hidden_state_skip_layer is not None:
|
| 308 |
+
last_hidden_state = outputs.hidden_states[-(
|
| 309 |
+
hidden_state_skip_layer + 1)]
|
| 310 |
+
# Real last hidden state already has layer norm applied. So here we only apply it
|
| 311 |
+
# for intermediate layers.
|
| 312 |
+
if hidden_state_skip_layer > 0 and self.apply_final_norm:
|
| 313 |
+
last_hidden_state = self.model.final_layer_norm(
|
| 314 |
+
last_hidden_state)
|
| 315 |
+
else:
|
| 316 |
+
last_hidden_state = outputs[self.output_key]
|
| 317 |
+
|
| 318 |
+
# Remove hidden states of instruction tokens, only keep prompt tokens.
|
| 319 |
+
if self.use_template:
|
| 320 |
+
if data_type == "image":
|
| 321 |
+
crop_start = self.prompt_template.get("crop_start", -1)
|
| 322 |
+
elif data_type == "video":
|
| 323 |
+
crop_start = self.prompt_template_video.get("crop_start", -1)
|
| 324 |
+
else:
|
| 325 |
+
raise ValueError(f"Unsupported data type: {data_type}")
|
| 326 |
+
if crop_start > 0:
|
| 327 |
+
last_hidden_state = last_hidden_state[:, crop_start:]
|
| 328 |
+
attention_mask = (attention_mask[:, crop_start:]
|
| 329 |
+
if use_attention_mask else None)
|
| 330 |
+
|
| 331 |
+
if output_hidden_states:
|
| 332 |
+
return TextEncoderModelOutput(last_hidden_state, attention_mask,
|
| 333 |
+
outputs.hidden_states)
|
| 334 |
+
return TextEncoderModelOutput(last_hidden_state, attention_mask)
|
| 335 |
+
|
| 336 |
+
def forward(
|
| 337 |
+
self,
|
| 338 |
+
text,
|
| 339 |
+
use_attention_mask=None,
|
| 340 |
+
output_hidden_states=False,
|
| 341 |
+
do_sample=False,
|
| 342 |
+
hidden_state_skip_layer=None,
|
| 343 |
+
return_texts=False,
|
| 344 |
+
):
|
| 345 |
+
batch_encoding = self.text2tokens(text)
|
| 346 |
+
return self.encode(
|
| 347 |
+
batch_encoding,
|
| 348 |
+
use_attention_mask=use_attention_mask,
|
| 349 |
+
output_hidden_states=output_hidden_states,
|
| 350 |
+
do_sample=do_sample,
|
| 351 |
+
hidden_state_skip_layer=hidden_state_skip_layer,
|
| 352 |
+
return_texts=return_texts,
|
| 353 |
+
)
|
fastvideo/models/hunyuan/text_encoder/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (9.73 kB). View file
|
|
|
fastvideo/models/hunyuan/text_encoder/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (14.3 kB). View file
|
|
|
fastvideo/models/hunyuan/utils/__init__.py
ADDED
|
File without changes
|
fastvideo/models/hunyuan/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (183 Bytes). View file
|
|
|
fastvideo/models/hunyuan/utils/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (238 Bytes). View file
|
|
|
fastvideo/models/hunyuan/utils/__pycache__/helpers.cpython-310.pyc
ADDED
|
Binary file (1.33 kB). View file
|
|
|
fastvideo/models/hunyuan/utils/__pycache__/helpers.cpython-312.pyc
ADDED
|
Binary file (2.22 kB). View file
|
|
|
fastvideo/models/hunyuan/utils/data_utils.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def align_to(value, alignment):
|
| 5 |
+
"""align height, width according to alignment
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
value (int): height or width
|
| 9 |
+
alignment (int): target alignment factor
|
| 10 |
+
|
| 11 |
+
Returns:
|
| 12 |
+
int: the aligned value
|
| 13 |
+
"""
|
| 14 |
+
return int(math.ceil(value / alignment) * alignment)
|
fastvideo/models/hunyuan/utils/file_utils.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import imageio
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import torchvision
|
| 8 |
+
from einops import rearrange
|
| 9 |
+
|
| 10 |
+
CODE_SUFFIXES = {
|
| 11 |
+
".py", # Python codes
|
| 12 |
+
".sh", # Shell scripts
|
| 13 |
+
".yaml",
|
| 14 |
+
".yml", # Configuration files
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def safe_dir(path):
|
| 19 |
+
"""
|
| 20 |
+
Create a directory (or the parent directory of a file) if it does not exist.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
path (str or Path): Path to the directory.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
path (Path): Path object of the directory.
|
| 27 |
+
"""
|
| 28 |
+
path = Path(path)
|
| 29 |
+
path.mkdir(exist_ok=True, parents=True)
|
| 30 |
+
return path
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def safe_file(path):
|
| 34 |
+
"""
|
| 35 |
+
Create the parent directory of a file if it does not exist.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
path (str or Path): Path to the file.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
path (Path): Path object of the file.
|
| 42 |
+
"""
|
| 43 |
+
path = Path(path)
|
| 44 |
+
path.parent.mkdir(exist_ok=True, parents=True)
|
| 45 |
+
return path
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def save_videos_grid(videos: torch.Tensor,
|
| 49 |
+
path: str,
|
| 50 |
+
rescale=False,
|
| 51 |
+
n_rows=1,
|
| 52 |
+
fps=24):
|
| 53 |
+
"""save videos by video tensor
|
| 54 |
+
copy from https://github.com/guoyww/AnimateDiff/blob/e92bd5671ba62c0d774a32951453e328018b7c5b/animatediff/utils/util.py#L61
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
videos (torch.Tensor): video tensor predicted by the model
|
| 58 |
+
path (str): path to save video
|
| 59 |
+
rescale (bool, optional): rescale the video tensor from [-1, 1] to . Defaults to False.
|
| 60 |
+
n_rows (int, optional): Defaults to 1.
|
| 61 |
+
fps (int, optional): video save fps. Defaults to 8.
|
| 62 |
+
"""
|
| 63 |
+
videos = rearrange(videos, "b c t h w -> t b c h w")
|
| 64 |
+
outputs = []
|
| 65 |
+
for x in videos:
|
| 66 |
+
x = torchvision.utils.make_grid(x, nrow=n_rows)
|
| 67 |
+
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
|
| 68 |
+
if rescale:
|
| 69 |
+
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
|
| 70 |
+
x = torch.clamp(x, 0, 1)
|
| 71 |
+
x = (x * 255).numpy().astype(np.uint8)
|
| 72 |
+
outputs.append(x)
|
| 73 |
+
|
| 74 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
| 75 |
+
imageio.mimsave(path, outputs, fps=fps)
|
fastvideo/models/hunyuan/utils/helpers.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections.abc
|
| 2 |
+
from itertools import repeat
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def _ntuple(n):
|
| 6 |
+
|
| 7 |
+
def parse(x):
|
| 8 |
+
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
|
| 9 |
+
x = tuple(x)
|
| 10 |
+
if len(x) == 1:
|
| 11 |
+
x = tuple(repeat(x[0], n))
|
| 12 |
+
return x
|
| 13 |
+
return tuple(repeat(x, n))
|
| 14 |
+
|
| 15 |
+
return parse
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
to_1tuple = _ntuple(1)
|
| 19 |
+
to_2tuple = _ntuple(2)
|
| 20 |
+
to_3tuple = _ntuple(3)
|
| 21 |
+
to_4tuple = _ntuple(4)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def as_tuple(x):
|
| 25 |
+
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
|
| 26 |
+
return tuple(x)
|
| 27 |
+
if x is None or isinstance(x, (int, float, str)):
|
| 28 |
+
return (x, )
|
| 29 |
+
else:
|
| 30 |
+
raise ValueError(f"Unknown type {type(x)}")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def as_list_of_2tuple(x):
|
| 34 |
+
x = as_tuple(x)
|
| 35 |
+
if len(x) == 1:
|
| 36 |
+
x = (x[0], x[0])
|
| 37 |
+
assert len(x) % 2 == 0, f"Expect even length, got {len(x)}."
|
| 38 |
+
lst = []
|
| 39 |
+
for i in range(0, len(x), 2):
|
| 40 |
+
lst.append((x[i], x[i + 1]))
|
| 41 |
+
return lst
|
fastvideo/models/hunyuan/utils/preprocess_text_encoder_tokenizer_utils.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import AutoProcessor, LlavaForConditionalGeneration
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def preprocess_text_encoder_tokenizer(args):
|
| 8 |
+
|
| 9 |
+
processor = AutoProcessor.from_pretrained(args.input_dir)
|
| 10 |
+
model = LlavaForConditionalGeneration.from_pretrained(
|
| 11 |
+
args.input_dir,
|
| 12 |
+
torch_dtype=torch.float16,
|
| 13 |
+
low_cpu_mem_usage=True,
|
| 14 |
+
).to(0)
|
| 15 |
+
|
| 16 |
+
model.language_model.save_pretrained(f"{args.output_dir}")
|
| 17 |
+
processor.tokenizer.save_pretrained(f"{args.output_dir}")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
|
| 22 |
+
parser = argparse.ArgumentParser()
|
| 23 |
+
parser.add_argument(
|
| 24 |
+
"--input_dir",
|
| 25 |
+
type=str,
|
| 26 |
+
required=True,
|
| 27 |
+
help="The path to the llava-llama-3-8b-v1_1-transformers.",
|
| 28 |
+
)
|
| 29 |
+
parser.add_argument(
|
| 30 |
+
"--output_dir",
|
| 31 |
+
type=str,
|
| 32 |
+
default="",
|
| 33 |
+
help="The output path of the llava-llama-3-8b-text-encoder-tokenizer."
|
| 34 |
+
"if '', the parent dir of output will be the same as input dir.",
|
| 35 |
+
)
|
| 36 |
+
args = parser.parse_args()
|
| 37 |
+
|
| 38 |
+
if len(args.output_dir) == 0:
|
| 39 |
+
args.output_dir = "/".join(args.input_dir.split("/")[:-1])
|
| 40 |
+
|
| 41 |
+
preprocess_text_encoder_tokenizer(args)
|
fastvideo/models/hunyuan/vae/__init__.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ..constants import PRECISION_TO_TYPE, VAE_PATH
|
| 6 |
+
from .autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def load_vae(
|
| 10 |
+
vae_type: str = "884-16c-hy",
|
| 11 |
+
vae_precision: str = None,
|
| 12 |
+
sample_size: tuple = None,
|
| 13 |
+
vae_path: str = None,
|
| 14 |
+
logger=None,
|
| 15 |
+
device=None,
|
| 16 |
+
):
|
| 17 |
+
"""the function to load the 3D VAE model
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
vae_type (str): the type of the 3D VAE model. Defaults to "884-16c-hy".
|
| 21 |
+
vae_precision (str, optional): the precision to load vae. Defaults to None.
|
| 22 |
+
sample_size (tuple, optional): the tiling size. Defaults to None.
|
| 23 |
+
vae_path (str, optional): the path to vae. Defaults to None.
|
| 24 |
+
logger (_type_, optional): logger. Defaults to None.
|
| 25 |
+
device (_type_, optional): device to load vae. Defaults to None.
|
| 26 |
+
"""
|
| 27 |
+
if vae_path is None:
|
| 28 |
+
vae_path = VAE_PATH[vae_type]
|
| 29 |
+
|
| 30 |
+
if logger is not None:
|
| 31 |
+
logger.info(f"Loading 3D VAE model ({vae_type}) from: {vae_path}")
|
| 32 |
+
config = AutoencoderKLCausal3D.load_config(vae_path)
|
| 33 |
+
if sample_size:
|
| 34 |
+
vae = AutoencoderKLCausal3D.from_config(config,
|
| 35 |
+
sample_size=sample_size)
|
| 36 |
+
else:
|
| 37 |
+
vae = AutoencoderKLCausal3D.from_config(config)
|
| 38 |
+
|
| 39 |
+
vae_ckpt = Path(vae_path) / "pytorch_model.pt"
|
| 40 |
+
assert vae_ckpt.exists(), f"VAE checkpoint not found: {vae_ckpt}"
|
| 41 |
+
|
| 42 |
+
ckpt = torch.load(vae_ckpt, map_location=vae.device)
|
| 43 |
+
if "state_dict" in ckpt:
|
| 44 |
+
ckpt = ckpt["state_dict"]
|
| 45 |
+
if any(k.startswith("vae.") for k in ckpt.keys()):
|
| 46 |
+
ckpt = {
|
| 47 |
+
k.replace("vae.", ""): v
|
| 48 |
+
for k, v in ckpt.items() if k.startswith("vae.")
|
| 49 |
+
}
|
| 50 |
+
vae.load_state_dict(ckpt)
|
| 51 |
+
|
| 52 |
+
spatial_compression_ratio = vae.config.spatial_compression_ratio
|
| 53 |
+
time_compression_ratio = vae.config.time_compression_ratio
|
| 54 |
+
|
| 55 |
+
if vae_precision is not None:
|
| 56 |
+
vae = vae.to(dtype=PRECISION_TO_TYPE[vae_precision])
|
| 57 |
+
|
| 58 |
+
vae.requires_grad_(False)
|
| 59 |
+
|
| 60 |
+
if logger is not None:
|
| 61 |
+
logger.info(f"VAE to dtype: {vae.dtype}")
|
| 62 |
+
|
| 63 |
+
if device is not None:
|
| 64 |
+
vae = vae.to(device)
|
| 65 |
+
|
| 66 |
+
vae.eval()
|
| 67 |
+
|
| 68 |
+
return vae, vae_path, spatial_compression_ratio, time_compression_ratio
|
fastvideo/models/hunyuan/vae/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.25 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (3.27 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/autoencoder_kl_causal_3d.cpython-310.pyc
ADDED
|
Binary file (25.4 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/autoencoder_kl_causal_3d.cpython-312.pyc
ADDED
|
Binary file (39.7 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/unet_causal_3d_blocks.cpython-310.pyc
ADDED
|
Binary file (16.3 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/unet_causal_3d_blocks.cpython-312.pyc
ADDED
|
Binary file (26.1 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/vae.cpython-310.pyc
ADDED
|
Binary file (9.39 kB). View file
|
|
|
fastvideo/models/hunyuan/vae/__pycache__/vae.cpython-312.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|