Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- CCEdit-main/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml +115 -0
- CCEdit-main/configs/example_training/imagenet-f8_cond.yaml +188 -0
- CCEdit-main/configs/example_training/sd_1_5_controlldm-test-cp-no2ndca-add-cfca-depthmidas.yaml +270 -0
- CCEdit-main/configs/example_training/sd_1_5_controlldm-test-tvi2v-cp-no2ndca-add-cfca-depthmidas.yaml +269 -0
- CCEdit-main/configs/example_training/toy/cifar10_cond.yaml +99 -0
- CCEdit-main/configs/example_training/toy/mnist.yaml +80 -0
- CCEdit-main/configs/example_training/toy/mnist_cond.yaml +99 -0
- CCEdit-main/configs/example_training/toy/mnist_cond_discrete_eps.yaml +104 -0
- CCEdit-main/configs/example_training/toy/mnist_cond_l1_loss.yaml +104 -0
- CCEdit-main/configs/example_training/toy/mnist_cond_with_ema.yaml +101 -0
- CCEdit-main/configs/inference/sd_2_1.yaml +66 -0
- CCEdit-main/configs/inference/sd_2_1_768.yaml +66 -0
- CCEdit-main/configs/inference/sd_xl_refiner.yaml +91 -0
- CCEdit-main/configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml +93 -0
- CCEdit-main/configs/inference_ccedit/keyframe_ref_cp_no2ndca_add_cfca_depthzoe.yaml +130 -0
- CCEdit-main/scripts/__init__.py +0 -0
- CCEdit-main/sgm/__init__.py +3 -0
- CCEdit-main/sgm/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/__pycache__/util.cpython-39.pyc +0 -0
- CCEdit-main/sgm/data/__init__.py +1 -0
- CCEdit-main/sgm/data/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/data/__pycache__/dataset.cpython-39.pyc +0 -0
- CCEdit-main/sgm/data/cifar10.py +67 -0
- CCEdit-main/sgm/data/dataset.py +80 -0
- CCEdit-main/sgm/data/detaset_webvid.py +182 -0
- CCEdit-main/sgm/data/mnist.py +85 -0
- CCEdit-main/sgm/data/webvid/base_video_dataset.py +521 -0
- CCEdit-main/sgm/data/webvid/webvid_dataset.py +152 -0
- CCEdit-main/sgm/lr_scheduler.py +135 -0
- CCEdit-main/sgm/models/__init__.py +2 -0
- CCEdit-main/sgm/models/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/models/__pycache__/autoencoder.cpython-39.pyc +0 -0
- CCEdit-main/sgm/models/__pycache__/diffusion.cpython-39.pyc +0 -0
- CCEdit-main/sgm/models/autoencoder.py +357 -0
- CCEdit-main/sgm/models/diffusion-ori.py +320 -0
- CCEdit-main/sgm/models/diffusion.py +910 -0
- CCEdit-main/sgm/modules/__init__.py +6 -0
- CCEdit-main/sgm/modules/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/__pycache__/attention.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/__pycache__/ema.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/attention.py +1663 -0
- CCEdit-main/sgm/modules/autoencoding/__init__.py +0 -0
- CCEdit-main/sgm/modules/autoencoding/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/autoencoding/losses/__init__.py +246 -0
- CCEdit-main/sgm/modules/autoencoding/regularizers/__init__.py +53 -0
- CCEdit-main/sgm/modules/autoencoding/regularizers/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_scaling.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/model.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/openaimodel.cpython-39.pyc +0 -0
CCEdit-main/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 4.5e-6
|
| 3 |
+
target: sgm.models.autoencoder.AutoencodingEngine
|
| 4 |
+
params:
|
| 5 |
+
input_key: jpg
|
| 6 |
+
monitor: val/rec_loss
|
| 7 |
+
|
| 8 |
+
loss_config:
|
| 9 |
+
target: sgm.modules.autoencoding.losses.GeneralLPIPSWithDiscriminator
|
| 10 |
+
params:
|
| 11 |
+
perceptual_weight: 0.25
|
| 12 |
+
disc_start: 20001
|
| 13 |
+
disc_weight: 0.5
|
| 14 |
+
learn_logvar: True
|
| 15 |
+
|
| 16 |
+
regularization_weights:
|
| 17 |
+
kl_loss: 1.0
|
| 18 |
+
|
| 19 |
+
regularizer_config:
|
| 20 |
+
target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
|
| 21 |
+
|
| 22 |
+
encoder_config:
|
| 23 |
+
target: sgm.modules.diffusionmodules.model.Encoder
|
| 24 |
+
params:
|
| 25 |
+
attn_type: none
|
| 26 |
+
double_z: True
|
| 27 |
+
z_channels: 4
|
| 28 |
+
resolution: 256
|
| 29 |
+
in_channels: 3
|
| 30 |
+
out_ch: 3
|
| 31 |
+
ch: 128
|
| 32 |
+
ch_mult: [ 1, 2, 4 ]
|
| 33 |
+
num_res_blocks: 4
|
| 34 |
+
attn_resolutions: [ ]
|
| 35 |
+
dropout: 0.0
|
| 36 |
+
|
| 37 |
+
decoder_config:
|
| 38 |
+
target: sgm.modules.diffusionmodules.model.Decoder
|
| 39 |
+
params:
|
| 40 |
+
attn_type: none
|
| 41 |
+
double_z: False
|
| 42 |
+
z_channels: 4
|
| 43 |
+
resolution: 256
|
| 44 |
+
in_channels: 3
|
| 45 |
+
out_ch: 3
|
| 46 |
+
ch: 128
|
| 47 |
+
ch_mult: [ 1, 2, 4 ]
|
| 48 |
+
num_res_blocks: 4
|
| 49 |
+
attn_resolutions: [ ]
|
| 50 |
+
dropout: 0.0
|
| 51 |
+
|
| 52 |
+
data:
|
| 53 |
+
target: sgm.data.dataset.StableDataModuleFromConfig
|
| 54 |
+
params:
|
| 55 |
+
train:
|
| 56 |
+
datapipeline:
|
| 57 |
+
urls:
|
| 58 |
+
- "DATA-PATH"
|
| 59 |
+
pipeline_config:
|
| 60 |
+
shardshuffle: 10000
|
| 61 |
+
sample_shuffle: 10000
|
| 62 |
+
|
| 63 |
+
decoders:
|
| 64 |
+
- "pil"
|
| 65 |
+
|
| 66 |
+
postprocessors:
|
| 67 |
+
- target: sdata.mappers.TorchVisionImageTransforms
|
| 68 |
+
params:
|
| 69 |
+
key: 'jpg'
|
| 70 |
+
transforms:
|
| 71 |
+
- target: torchvision.transforms.Resize
|
| 72 |
+
params:
|
| 73 |
+
size: 256
|
| 74 |
+
interpolation: 3
|
| 75 |
+
- target: torchvision.transforms.ToTensor
|
| 76 |
+
- target: sdata.mappers.Rescaler
|
| 77 |
+
- target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
|
| 78 |
+
params:
|
| 79 |
+
h_key: height
|
| 80 |
+
w_key: width
|
| 81 |
+
|
| 82 |
+
loader:
|
| 83 |
+
batch_size: 8
|
| 84 |
+
num_workers: 4
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
lightning:
|
| 88 |
+
strategy:
|
| 89 |
+
target: pytorch_lightning.strategies.DDPStrategy
|
| 90 |
+
params:
|
| 91 |
+
find_unused_parameters: True
|
| 92 |
+
|
| 93 |
+
modelcheckpoint:
|
| 94 |
+
params:
|
| 95 |
+
every_n_train_steps: 5000
|
| 96 |
+
|
| 97 |
+
callbacks:
|
| 98 |
+
metrics_over_trainsteps_checkpoint:
|
| 99 |
+
params:
|
| 100 |
+
every_n_train_steps: 50000
|
| 101 |
+
|
| 102 |
+
image_logger:
|
| 103 |
+
target: main.ImageLogger
|
| 104 |
+
params:
|
| 105 |
+
enable_autocast: False
|
| 106 |
+
batch_frequency: 1000
|
| 107 |
+
max_images: 8
|
| 108 |
+
increase_log_steps: True
|
| 109 |
+
|
| 110 |
+
trainer:
|
| 111 |
+
devices: 0,
|
| 112 |
+
limit_val_batches: 50
|
| 113 |
+
benchmark: True
|
| 114 |
+
accumulate_grad_batches: 1
|
| 115 |
+
val_check_interval: 10000
|
CCEdit-main/configs/example_training/imagenet-f8_cond.yaml
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
scale_factor: 0.13025
|
| 6 |
+
disable_first_stage_autocast: True
|
| 7 |
+
log_keys:
|
| 8 |
+
- cls
|
| 9 |
+
|
| 10 |
+
scheduler_config:
|
| 11 |
+
target: sgm.lr_scheduler.LambdaLinearScheduler
|
| 12 |
+
params:
|
| 13 |
+
warm_up_steps: [10000]
|
| 14 |
+
cycle_lengths: [10000000000000]
|
| 15 |
+
f_start: [1.e-6]
|
| 16 |
+
f_max: [1.]
|
| 17 |
+
f_min: [1.]
|
| 18 |
+
|
| 19 |
+
denoiser_config:
|
| 20 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 21 |
+
params:
|
| 22 |
+
num_idx: 1000
|
| 23 |
+
|
| 24 |
+
weighting_config:
|
| 25 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 26 |
+
scaling_config:
|
| 27 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 28 |
+
discretization_config:
|
| 29 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 30 |
+
|
| 31 |
+
network_config:
|
| 32 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 33 |
+
params:
|
| 34 |
+
use_checkpoint: True
|
| 35 |
+
use_fp16: True
|
| 36 |
+
in_channels: 4
|
| 37 |
+
out_channels: 4
|
| 38 |
+
model_channels: 256
|
| 39 |
+
attention_resolutions: [1, 2, 4]
|
| 40 |
+
num_res_blocks: 2
|
| 41 |
+
channel_mult: [1, 2, 4]
|
| 42 |
+
num_head_channels: 64
|
| 43 |
+
num_classes: sequential
|
| 44 |
+
adm_in_channels: 1024
|
| 45 |
+
use_spatial_transformer: true
|
| 46 |
+
transformer_depth: 1
|
| 47 |
+
context_dim: 1024
|
| 48 |
+
spatial_transformer_attn_type: softmax-xformers
|
| 49 |
+
|
| 50 |
+
conditioner_config:
|
| 51 |
+
target: sgm.modules.GeneralConditioner
|
| 52 |
+
params:
|
| 53 |
+
emb_models:
|
| 54 |
+
# crossattn cond
|
| 55 |
+
- is_trainable: True
|
| 56 |
+
input_key: cls
|
| 57 |
+
ucg_rate: 0.2
|
| 58 |
+
target: sgm.modules.encoders.modules.ClassEmbedder
|
| 59 |
+
params:
|
| 60 |
+
add_sequence_dim: True # will be used through crossattn then
|
| 61 |
+
embed_dim: 1024
|
| 62 |
+
n_classes: 1000
|
| 63 |
+
# vector cond
|
| 64 |
+
- is_trainable: False
|
| 65 |
+
ucg_rate: 0.2
|
| 66 |
+
input_key: original_size_as_tuple
|
| 67 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
| 68 |
+
params:
|
| 69 |
+
outdim: 256 # multiplied by two
|
| 70 |
+
# vector cond
|
| 71 |
+
- is_trainable: False
|
| 72 |
+
input_key: crop_coords_top_left
|
| 73 |
+
ucg_rate: 0.2
|
| 74 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
| 75 |
+
params:
|
| 76 |
+
outdim: 256 # multiplied by two
|
| 77 |
+
|
| 78 |
+
first_stage_config:
|
| 79 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 80 |
+
params:
|
| 81 |
+
ckpt_path: CKPT_PATH
|
| 82 |
+
embed_dim: 4
|
| 83 |
+
monitor: val/rec_loss
|
| 84 |
+
ddconfig:
|
| 85 |
+
attn_type: vanilla-xformers
|
| 86 |
+
double_z: true
|
| 87 |
+
z_channels: 4
|
| 88 |
+
resolution: 256
|
| 89 |
+
in_channels: 3
|
| 90 |
+
out_ch: 3
|
| 91 |
+
ch: 128
|
| 92 |
+
ch_mult: [1, 2, 4, 4]
|
| 93 |
+
num_res_blocks: 2
|
| 94 |
+
attn_resolutions: []
|
| 95 |
+
dropout: 0.0
|
| 96 |
+
lossconfig:
|
| 97 |
+
target: torch.nn.Identity
|
| 98 |
+
|
| 99 |
+
loss_fn_config:
|
| 100 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 101 |
+
params:
|
| 102 |
+
sigma_sampler_config:
|
| 103 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
|
| 104 |
+
params:
|
| 105 |
+
num_idx: 1000
|
| 106 |
+
|
| 107 |
+
discretization_config:
|
| 108 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 109 |
+
|
| 110 |
+
sampler_config:
|
| 111 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 112 |
+
params:
|
| 113 |
+
num_steps: 50
|
| 114 |
+
|
| 115 |
+
discretization_config:
|
| 116 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 117 |
+
|
| 118 |
+
guider_config:
|
| 119 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFG
|
| 120 |
+
params:
|
| 121 |
+
scale: 5.0
|
| 122 |
+
|
| 123 |
+
data:
|
| 124 |
+
target: sgm.data.dataset.StableDataModuleFromConfig
|
| 125 |
+
params:
|
| 126 |
+
train:
|
| 127 |
+
datapipeline:
|
| 128 |
+
urls:
|
| 129 |
+
# USER: adapt this path the root of your custom dataset
|
| 130 |
+
- "DATA_PATH"
|
| 131 |
+
pipeline_config:
|
| 132 |
+
shardshuffle: 10000
|
| 133 |
+
sample_shuffle: 10000 # USER: you might wanna adapt depending on your available RAM
|
| 134 |
+
|
| 135 |
+
decoders:
|
| 136 |
+
- "pil"
|
| 137 |
+
|
| 138 |
+
postprocessors:
|
| 139 |
+
- target: sdata.mappers.TorchVisionImageTransforms
|
| 140 |
+
params:
|
| 141 |
+
key: 'jpg' # USER: you might wanna adapt this for your custom dataset
|
| 142 |
+
transforms:
|
| 143 |
+
- target: torchvision.transforms.Resize
|
| 144 |
+
params:
|
| 145 |
+
size: 256
|
| 146 |
+
interpolation: 3
|
| 147 |
+
- target: torchvision.transforms.ToTensor
|
| 148 |
+
- target: sdata.mappers.Rescaler
|
| 149 |
+
|
| 150 |
+
- target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare
|
| 151 |
+
params:
|
| 152 |
+
h_key: height # USER: you might wanna adapt this for your custom dataset
|
| 153 |
+
w_key: width # USER: you might wanna adapt this for your custom dataset
|
| 154 |
+
|
| 155 |
+
loader:
|
| 156 |
+
batch_size: 64
|
| 157 |
+
num_workers: 6
|
| 158 |
+
|
| 159 |
+
lightning:
|
| 160 |
+
modelcheckpoint:
|
| 161 |
+
params:
|
| 162 |
+
every_n_train_steps: 5000
|
| 163 |
+
|
| 164 |
+
callbacks:
|
| 165 |
+
metrics_over_trainsteps_checkpoint:
|
| 166 |
+
params:
|
| 167 |
+
every_n_train_steps: 25000
|
| 168 |
+
|
| 169 |
+
image_logger:
|
| 170 |
+
target: main.ImageLogger
|
| 171 |
+
params:
|
| 172 |
+
disabled: False
|
| 173 |
+
enable_autocast: False
|
| 174 |
+
batch_frequency: 1000
|
| 175 |
+
max_images: 8
|
| 176 |
+
increase_log_steps: True
|
| 177 |
+
log_first_step: False
|
| 178 |
+
log_images_kwargs:
|
| 179 |
+
use_ema_scope: False
|
| 180 |
+
N: 8
|
| 181 |
+
n_rows: 2
|
| 182 |
+
|
| 183 |
+
trainer:
|
| 184 |
+
devices: 0,
|
| 185 |
+
benchmark: True
|
| 186 |
+
num_sanity_val_steps: 0
|
| 187 |
+
accumulate_grad_batches: 1
|
| 188 |
+
max_epochs: 1000
|
CCEdit-main/configs/example_training/sd_1_5_controlldm-test-cp-no2ndca-add-cfca-depthmidas.yaml
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# tvi2v: condition on the frame, text, and video to generate the video
|
| 2 |
+
# cp: copy weights from diffusion_model/unet to controlnet_img. Use VAE to extract the 8x downsampled image.
|
| 3 |
+
# no2ndca: no second cross attention (text cross attention) in the temporal layers
|
| 4 |
+
# add: add the features of reference image on the features of center frame of the main SD model
|
| 5 |
+
# cfca: cross-frame cross-attention, in the main SD model, for each token as query, take the features of center image and current frame as the key and value
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
InputFPS: &InputFPS 4
|
| 10 |
+
FrameLength: &FrameLength 17
|
| 11 |
+
BatchSize: &BatchSize 1
|
| 12 |
+
NumGPU: &NumGPU 1
|
| 13 |
+
NumNodes: &NumNodes 1
|
| 14 |
+
BaseLearningRate: &BaseLearningRate 5.0e-5
|
| 15 |
+
DataDir: &DataDir /PATH/TO/YOUR/DATA # specify your data dir
|
| 16 |
+
MetadataDir: &MetadataDir /PATH/TO/YOUR/METADATA # specify your metadata dir
|
| 17 |
+
ResolutionH: &ResolutionH 384
|
| 18 |
+
ResolutionW: &ResolutionW 512
|
| 19 |
+
Split: &Split "val" # * Debug setting
|
| 20 |
+
Cut: &Cut "10M" # * Debug setting
|
| 21 |
+
CkptPath: &CkptPath /PATH/TO/YOUR/CHECKPOINT # specify your checkpoint dir
|
| 22 |
+
|
| 23 |
+
Ckpt_log_every: &Ckpt_log_every 20000 # * Debug setting, 4000
|
| 24 |
+
Image_log_every: &Image_log_every 10 # * Debug setting, 2000
|
| 25 |
+
AccumulateGradBatches: &AccumulateGradBatches 1
|
| 26 |
+
# DEBUG SETTINGS
|
| 27 |
+
# Model_channels: &Model_channels 64
|
| 28 |
+
Model_channels: &Model_channels 320
|
| 29 |
+
|
| 30 |
+
model:
|
| 31 |
+
base_learning_rate: *BaseLearningRate
|
| 32 |
+
target: sgm.models.diffusion.VideoDiffusionEngineTV2V
|
| 33 |
+
params:
|
| 34 |
+
use_ema: False # Default is False
|
| 35 |
+
scale_factor: 0.18215
|
| 36 |
+
disable_first_stage_autocast: True
|
| 37 |
+
log_keys:
|
| 38 |
+
- txt
|
| 39 |
+
ckpt_path: *CkptPath
|
| 40 |
+
freeze_model: spatial # none indicates no freezing
|
| 41 |
+
|
| 42 |
+
scheduler_config:
|
| 43 |
+
target: sgm.lr_scheduler.LambdaLinearScheduler
|
| 44 |
+
params:
|
| 45 |
+
warm_up_steps: [ 100 ]
|
| 46 |
+
cycle_lengths: [ 10000000000000 ]
|
| 47 |
+
f_start: [ 1.e-6 ]
|
| 48 |
+
f_max: [ 1. ]
|
| 49 |
+
f_min: [ 1. ]
|
| 50 |
+
|
| 51 |
+
denoiser_config:
|
| 52 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 53 |
+
params:
|
| 54 |
+
num_idx: 1000
|
| 55 |
+
|
| 56 |
+
weighting_config:
|
| 57 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 58 |
+
scaling_config:
|
| 59 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 60 |
+
discretization_config:
|
| 61 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 62 |
+
|
| 63 |
+
network_config:
|
| 64 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V
|
| 65 |
+
params:
|
| 66 |
+
use_checkpoint: True
|
| 67 |
+
in_channels: 4
|
| 68 |
+
out_channels: 4
|
| 69 |
+
model_channels: *Model_channels
|
| 70 |
+
attention_resolutions: [4, 2, 1]
|
| 71 |
+
num_res_blocks: 2
|
| 72 |
+
channel_mult: [1, 2, 4, 4]
|
| 73 |
+
num_heads: 8
|
| 74 |
+
use_spatial_transformer: True
|
| 75 |
+
transformer_depth: 1
|
| 76 |
+
context_dim: 768
|
| 77 |
+
legacy: False
|
| 78 |
+
disable_temporal_text_ca: True
|
| 79 |
+
# -> use "temporal_ca" modules
|
| 80 |
+
enable_attention3d_crossframe: True
|
| 81 |
+
ST3DCA_ca_type: 'center_self'
|
| 82 |
+
# crossframe_type: 'reference' # not use the reference image as k,v, so comment it.
|
| 83 |
+
controlnet_config:
|
| 84 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 85 |
+
params:
|
| 86 |
+
use_checkpoint: True
|
| 87 |
+
in_channels: 4
|
| 88 |
+
hint_channels: 3
|
| 89 |
+
model_channels: *Model_channels
|
| 90 |
+
attention_resolutions: [4, 2, 1]
|
| 91 |
+
num_res_blocks: 2
|
| 92 |
+
channel_mult: [1, 2, 4, 4]
|
| 93 |
+
num_heads: 8
|
| 94 |
+
use_spatial_transformer: True
|
| 95 |
+
transformer_depth: 1
|
| 96 |
+
context_dim: 768
|
| 97 |
+
legacy: False
|
| 98 |
+
control_scales: 1.0
|
| 99 |
+
controlnet_img_config: # process the anchor frame
|
| 100 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 101 |
+
params:
|
| 102 |
+
use_checkpoint: True
|
| 103 |
+
in_channels: 4
|
| 104 |
+
hint_channels: 3
|
| 105 |
+
model_channels: *Model_channels
|
| 106 |
+
attention_resolutions: [4, 2, 1]
|
| 107 |
+
num_res_blocks: 2
|
| 108 |
+
channel_mult: [1, 2, 4, 4]
|
| 109 |
+
num_heads: 8
|
| 110 |
+
use_spatial_transformer: True
|
| 111 |
+
transformer_depth: 1
|
| 112 |
+
context_dim: 768
|
| 113 |
+
legacy: False
|
| 114 |
+
# -> add on center frame, strengthen the control
|
| 115 |
+
control_scales: 1.0
|
| 116 |
+
# control_scales: 0.0 # use crossattention, instead of add in controlnet
|
| 117 |
+
# -> not add the noised x to controlnet_img
|
| 118 |
+
no_add_x: True # no need to add x
|
| 119 |
+
set_input_hint_block_as_identity: True # ATTENTION: newly added. default: False
|
| 120 |
+
# -> disbale the text cross attention in controlnet_img
|
| 121 |
+
disable_text_ca: True
|
| 122 |
+
|
| 123 |
+
conditioner_config:
|
| 124 |
+
target: sgm.modules.GeneralConditioner
|
| 125 |
+
params:
|
| 126 |
+
emb_models:
|
| 127 |
+
# crossattn cond
|
| 128 |
+
- is_trainable: False
|
| 129 |
+
input_key: txt
|
| 130 |
+
ucg_rate: 0.5
|
| 131 |
+
legacy_ucg_value: ""
|
| 132 |
+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
| 133 |
+
params:
|
| 134 |
+
freeze: true
|
| 135 |
+
- is_trainable: False
|
| 136 |
+
input_key: control_hint
|
| 137 |
+
ucg_rate: 0.0
|
| 138 |
+
target: sgm.modules.encoders.modules.DepthMidasEncoder
|
| 139 |
+
- is_trainable: False
|
| 140 |
+
input_key: cond_img
|
| 141 |
+
ucg_rate: 0.0
|
| 142 |
+
target: sgm.modules.encoders.modules.VAEEmbedder
|
| 143 |
+
|
| 144 |
+
first_stage_config:
|
| 145 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 146 |
+
params:
|
| 147 |
+
embed_dim: 4
|
| 148 |
+
monitor: val/rec_loss
|
| 149 |
+
ddconfig:
|
| 150 |
+
double_z: true
|
| 151 |
+
z_channels: 4
|
| 152 |
+
resolution: 256
|
| 153 |
+
in_channels: 3
|
| 154 |
+
out_ch: 3
|
| 155 |
+
ch: 128
|
| 156 |
+
ch_mult: [1, 2, 4, 4]
|
| 157 |
+
num_res_blocks: 2
|
| 158 |
+
attn_resolutions: []
|
| 159 |
+
dropout: 0.0
|
| 160 |
+
lossconfig:
|
| 161 |
+
target: torch.nn.Identity
|
| 162 |
+
|
| 163 |
+
loss_fn_config:
|
| 164 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 165 |
+
params:
|
| 166 |
+
sigma_sampler_config:
|
| 167 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
|
| 168 |
+
params:
|
| 169 |
+
num_idx: 1000
|
| 170 |
+
|
| 171 |
+
discretization_config:
|
| 172 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 173 |
+
offset_noise_level: 0.1
|
| 174 |
+
offset_noise_varying_dim: 3
|
| 175 |
+
|
| 176 |
+
sampler_config:
|
| 177 |
+
target: sgm.modules.diffusionmodules.sampling.EulerAncestralSampler # ATTENTION: newly add. Default: EulerEDMSampler
|
| 178 |
+
params:
|
| 179 |
+
num_steps: 30 # ATTENTION: newly add. default: 50
|
| 180 |
+
|
| 181 |
+
discretization_config:
|
| 182 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 183 |
+
|
| 184 |
+
guider_config:
|
| 185 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V
|
| 186 |
+
params:
|
| 187 |
+
scale: 7.5
|
| 188 |
+
|
| 189 |
+
data:
|
| 190 |
+
target: sgm.data.detaset_webvid.DataModuleFromConfig
|
| 191 |
+
params:
|
| 192 |
+
batch_size: *BatchSize # TODO need to change batch_size
|
| 193 |
+
num_workers: 8
|
| 194 |
+
wrap: False
|
| 195 |
+
train:
|
| 196 |
+
target: sgm.data.webvid.webvid_dataset.WebVid
|
| 197 |
+
params:
|
| 198 |
+
dataset_name: WebVid
|
| 199 |
+
data_dir: *DataDir # TODO check the data_dir
|
| 200 |
+
metadata_dir: *MetadataDir
|
| 201 |
+
split: *Split
|
| 202 |
+
cut: *Cut
|
| 203 |
+
# key: *Key # TODO check data file name, default cleaned
|
| 204 |
+
subsample: 1
|
| 205 |
+
text_params:
|
| 206 |
+
input: text
|
| 207 |
+
video_params:
|
| 208 |
+
input_res_h: *ResolutionH
|
| 209 |
+
input_res_w: *ResolutionW
|
| 210 |
+
tsfm_params:
|
| 211 |
+
norm_mean: [0.5, 0.5, 0.5]
|
| 212 |
+
norm_std: [0.5, 0.5, 0.5]
|
| 213 |
+
randcrop_scale: [0.8, 1.0] # ATTENTION: newly add.
|
| 214 |
+
num_frames: *FrameLength
|
| 215 |
+
prop_factor: *InputFPS
|
| 216 |
+
loading: lax
|
| 217 |
+
metadata_folder_name: webvid10m_meta
|
| 218 |
+
first_stage_key: jpg
|
| 219 |
+
cond_stage_key: txt
|
| 220 |
+
skip_missing_files: True
|
| 221 |
+
use_control_hint: True
|
| 222 |
+
# -> use center frame as the condition image
|
| 223 |
+
# random_cond_img: True
|
| 224 |
+
|
| 225 |
+
lightning:
|
| 226 |
+
modelcheckpoint:
|
| 227 |
+
params:
|
| 228 |
+
every_n_train_steps: *Ckpt_log_every
|
| 229 |
+
|
| 230 |
+
callbacks:
|
| 231 |
+
metrics_over_trainsteps_checkpoint:
|
| 232 |
+
params:
|
| 233 |
+
every_n_train_steps: 25000
|
| 234 |
+
|
| 235 |
+
image_logger:
|
| 236 |
+
target: main.ImageLogger
|
| 237 |
+
params:
|
| 238 |
+
disabled: False
|
| 239 |
+
enable_autocast: False
|
| 240 |
+
batch_frequency: *Image_log_every
|
| 241 |
+
max_images: 32
|
| 242 |
+
increase_log_steps: False # default is True
|
| 243 |
+
log_first_step: False
|
| 244 |
+
log_images_kwargs:
|
| 245 |
+
use_ema_scope: False
|
| 246 |
+
N: 8
|
| 247 |
+
n_rows: *FrameLength
|
| 248 |
+
video_fps: *InputFPS
|
| 249 |
+
|
| 250 |
+
trainer:
|
| 251 |
+
precision: 16
|
| 252 |
+
devices: *NumGPU
|
| 253 |
+
num_nodes: *NumNodes
|
| 254 |
+
benchmark: True
|
| 255 |
+
num_sanity_val_steps: 0
|
| 256 |
+
accumulate_grad_batches: *AccumulateGradBatches
|
| 257 |
+
max_epochs: 1000
|
| 258 |
+
|
| 259 |
+
strategy:
|
| 260 |
+
target: pytorch_lightning.strategies.DDPStrategy
|
| 261 |
+
params:
|
| 262 |
+
find_unused_parameters: True
|
| 263 |
+
|
| 264 |
+
# strategy:
|
| 265 |
+
# target: pytorch_lightning.strategies.DeepSpeedStrategy
|
| 266 |
+
# params:
|
| 267 |
+
# stage: 2
|
| 268 |
+
# allgather_bucket_size: 8e8
|
| 269 |
+
# reduce_bucket_size: 8e8
|
| 270 |
+
# load_full_weights: True
|
CCEdit-main/configs/example_training/sd_1_5_controlldm-test-tvi2v-cp-no2ndca-add-cfca-depthmidas.yaml
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# tvi2v: condition on the frame, text, and video to generate the video
|
| 2 |
+
# cp: copy weights from diffusion_model/unet to controlnet_img. Use VAE to extract the 8x downsampled image.
|
| 3 |
+
# no2ndca: no second cross attention (text cross attention) in the temporal layers
|
| 4 |
+
# add: add the features of reference image on the features of center frame of the main SD model
|
| 5 |
+
# cfca: cross-frame cross-attention, in the main SD model, for each token as query, take the features of center image and current frame as the key and value
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
InputFPS: &InputFPS 4
|
| 10 |
+
FrameLength: &FrameLength 17
|
| 11 |
+
BatchSize: &BatchSize 1
|
| 12 |
+
NumGPU: &NumGPU 1
|
| 13 |
+
NumNodes: &NumNodes 1
|
| 14 |
+
BaseLearningRate: &BaseLearningRate 5.0e-5
|
| 15 |
+
DataDir: &DataDir /PATH/TO/YOUR/DATA # specify your data dir
|
| 16 |
+
MetadataDir: &MetadataDir /PATH/TO/YOUR/METADATA # specify your metadata dir
|
| 17 |
+
ResolutionH: &ResolutionH 384
|
| 18 |
+
ResolutionW: &ResolutionW 512
|
| 19 |
+
Split: &Split "val" # * Debug setting
|
| 20 |
+
Cut: &Cut "10M" # * Debug setting
|
| 21 |
+
CkptPath: &CkptPath /PATH/TO/YOUR/CHECKPOINT # specify your checkpoint dir
|
| 22 |
+
Ckpt_log_every: &Ckpt_log_every 20000 # * Debug setting, 4000
|
| 23 |
+
Image_log_every: &Image_log_every 10 # * Debug setting, 2000
|
| 24 |
+
AccumulateGradBatches: &AccumulateGradBatches 1
|
| 25 |
+
# DEBUG SETTINGS
|
| 26 |
+
# Model_channels: &Model_channels 64
|
| 27 |
+
Model_channels: &Model_channels 320
|
| 28 |
+
|
| 29 |
+
model:
|
| 30 |
+
base_learning_rate: *BaseLearningRate
|
| 31 |
+
target: sgm.models.diffusion.VideoDiffusionEngineTV2V
|
| 32 |
+
params:
|
| 33 |
+
use_ema: False # Default is False
|
| 34 |
+
scale_factor: 0.18215
|
| 35 |
+
disable_first_stage_autocast: True
|
| 36 |
+
log_keys:
|
| 37 |
+
- txt
|
| 38 |
+
ckpt_path: *CkptPath # TODO: for fast debugging, I comment this line
|
| 39 |
+
freeze_model: spatial # none indicates no freezing
|
| 40 |
+
|
| 41 |
+
scheduler_config:
|
| 42 |
+
target: sgm.lr_scheduler.LambdaLinearScheduler
|
| 43 |
+
params:
|
| 44 |
+
warm_up_steps: [ 100 ]
|
| 45 |
+
cycle_lengths: [ 10000000000000 ]
|
| 46 |
+
f_start: [ 1.e-6 ]
|
| 47 |
+
f_max: [ 1. ]
|
| 48 |
+
f_min: [ 1. ]
|
| 49 |
+
|
| 50 |
+
denoiser_config:
|
| 51 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 52 |
+
params:
|
| 53 |
+
num_idx: 1000
|
| 54 |
+
|
| 55 |
+
weighting_config:
|
| 56 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 57 |
+
scaling_config:
|
| 58 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 59 |
+
discretization_config:
|
| 60 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 61 |
+
|
| 62 |
+
network_config:
|
| 63 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V
|
| 64 |
+
params:
|
| 65 |
+
use_checkpoint: True
|
| 66 |
+
in_channels: 4
|
| 67 |
+
out_channels: 4
|
| 68 |
+
model_channels: *Model_channels
|
| 69 |
+
attention_resolutions: [4, 2, 1]
|
| 70 |
+
num_res_blocks: 2
|
| 71 |
+
channel_mult: [1, 2, 4, 4]
|
| 72 |
+
num_heads: 8
|
| 73 |
+
use_spatial_transformer: True
|
| 74 |
+
transformer_depth: 1
|
| 75 |
+
context_dim: 768
|
| 76 |
+
legacy: False
|
| 77 |
+
disable_temporal_text_ca: True
|
| 78 |
+
# -> use "temporal_ca" modules
|
| 79 |
+
enable_attention3d_crossframe: True
|
| 80 |
+
ST3DCA_ca_type: 'center_self'
|
| 81 |
+
# crossframe_type: 'reference' # not use the reference image as k,v, so comment it.
|
| 82 |
+
controlnet_config:
|
| 83 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 84 |
+
params:
|
| 85 |
+
use_checkpoint: True
|
| 86 |
+
in_channels: 4
|
| 87 |
+
hint_channels: 3
|
| 88 |
+
model_channels: *Model_channels
|
| 89 |
+
attention_resolutions: [4, 2, 1]
|
| 90 |
+
num_res_blocks: 2
|
| 91 |
+
channel_mult: [1, 2, 4, 4]
|
| 92 |
+
num_heads: 8
|
| 93 |
+
use_spatial_transformer: True
|
| 94 |
+
transformer_depth: 1
|
| 95 |
+
context_dim: 768
|
| 96 |
+
legacy: False
|
| 97 |
+
control_scales: 1.0
|
| 98 |
+
controlnet_img_config: # process the anchor frame
|
| 99 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 100 |
+
params:
|
| 101 |
+
use_checkpoint: True
|
| 102 |
+
in_channels: 4
|
| 103 |
+
hint_channels: 3
|
| 104 |
+
model_channels: *Model_channels
|
| 105 |
+
attention_resolutions: [4, 2, 1]
|
| 106 |
+
num_res_blocks: 2
|
| 107 |
+
channel_mult: [1, 2, 4, 4]
|
| 108 |
+
num_heads: 8
|
| 109 |
+
use_spatial_transformer: True
|
| 110 |
+
transformer_depth: 1
|
| 111 |
+
context_dim: 768
|
| 112 |
+
legacy: False
|
| 113 |
+
# -> add on center frame, strengthen the control
|
| 114 |
+
control_scales: 1.0
|
| 115 |
+
# control_scales: 0.0 # use crossattention, instead of add in controlnet
|
| 116 |
+
# -> not add the noised x to controlnet_img
|
| 117 |
+
no_add_x: True # no need to add x
|
| 118 |
+
set_input_hint_block_as_identity: True # ATTENTION: newly added. default: False
|
| 119 |
+
# -> disbale the text cross attention in controlnet_img
|
| 120 |
+
disable_text_ca: True
|
| 121 |
+
|
| 122 |
+
conditioner_config:
|
| 123 |
+
target: sgm.modules.GeneralConditioner
|
| 124 |
+
params:
|
| 125 |
+
emb_models:
|
| 126 |
+
# crossattn cond
|
| 127 |
+
- is_trainable: False
|
| 128 |
+
input_key: txt
|
| 129 |
+
ucg_rate: 0.5
|
| 130 |
+
legacy_ucg_value: ""
|
| 131 |
+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
| 132 |
+
params:
|
| 133 |
+
freeze: true
|
| 134 |
+
- is_trainable: False
|
| 135 |
+
input_key: control_hint
|
| 136 |
+
ucg_rate: 0.0
|
| 137 |
+
target: sgm.modules.encoders.modules.DepthMidasEncoder
|
| 138 |
+
- is_trainable: False
|
| 139 |
+
input_key: cond_img
|
| 140 |
+
ucg_rate: 0.0
|
| 141 |
+
target: sgm.modules.encoders.modules.VAEEmbedder
|
| 142 |
+
|
| 143 |
+
first_stage_config:
|
| 144 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 145 |
+
params:
|
| 146 |
+
embed_dim: 4
|
| 147 |
+
monitor: val/rec_loss
|
| 148 |
+
ddconfig:
|
| 149 |
+
double_z: true
|
| 150 |
+
z_channels: 4
|
| 151 |
+
resolution: 256
|
| 152 |
+
in_channels: 3
|
| 153 |
+
out_ch: 3
|
| 154 |
+
ch: 128
|
| 155 |
+
ch_mult: [1, 2, 4, 4]
|
| 156 |
+
num_res_blocks: 2
|
| 157 |
+
attn_resolutions: []
|
| 158 |
+
dropout: 0.0
|
| 159 |
+
lossconfig:
|
| 160 |
+
target: torch.nn.Identity
|
| 161 |
+
|
| 162 |
+
loss_fn_config:
|
| 163 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 164 |
+
params:
|
| 165 |
+
sigma_sampler_config:
|
| 166 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
|
| 167 |
+
params:
|
| 168 |
+
num_idx: 1000
|
| 169 |
+
|
| 170 |
+
discretization_config:
|
| 171 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 172 |
+
offset_noise_level: 0.1
|
| 173 |
+
offset_noise_varying_dim: 3
|
| 174 |
+
|
| 175 |
+
sampler_config:
|
| 176 |
+
target: sgm.modules.diffusionmodules.sampling.EulerAncestralSampler # ATTENTION: newly add. Default: EulerEDMSampler
|
| 177 |
+
params:
|
| 178 |
+
num_steps: 30 # ATTENTION: newly add. default: 50
|
| 179 |
+
|
| 180 |
+
discretization_config:
|
| 181 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 182 |
+
|
| 183 |
+
guider_config:
|
| 184 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V
|
| 185 |
+
params:
|
| 186 |
+
scale: 7.5
|
| 187 |
+
|
| 188 |
+
data:
|
| 189 |
+
target: sgm.data.detaset_webvid.DataModuleFromConfig
|
| 190 |
+
params:
|
| 191 |
+
batch_size: *BatchSize # TODO need to change batch_size
|
| 192 |
+
num_workers: 8
|
| 193 |
+
wrap: False
|
| 194 |
+
train:
|
| 195 |
+
target: sgm.data.webvid.webvid_dataset.WebVid
|
| 196 |
+
params:
|
| 197 |
+
dataset_name: WebVid
|
| 198 |
+
data_dir: *DataDir # TODO check the data_dir
|
| 199 |
+
metadata_dir: *MetadataDir
|
| 200 |
+
split: *Split
|
| 201 |
+
cut: *Cut
|
| 202 |
+
# key: *Key # TODO check data file name, default cleaned
|
| 203 |
+
subsample: 1
|
| 204 |
+
text_params:
|
| 205 |
+
input: text
|
| 206 |
+
video_params:
|
| 207 |
+
input_res_h: *ResolutionH
|
| 208 |
+
input_res_w: *ResolutionW
|
| 209 |
+
tsfm_params:
|
| 210 |
+
norm_mean: [0.5, 0.5, 0.5]
|
| 211 |
+
norm_std: [0.5, 0.5, 0.5]
|
| 212 |
+
randcrop_scale: [0.8, 1.0] # ATTENTION: newly add.
|
| 213 |
+
num_frames: *FrameLength
|
| 214 |
+
prop_factor: *InputFPS
|
| 215 |
+
loading: lax
|
| 216 |
+
metadata_folder_name: webvid10m_meta
|
| 217 |
+
first_stage_key: jpg
|
| 218 |
+
cond_stage_key: txt
|
| 219 |
+
skip_missing_files: True
|
| 220 |
+
use_control_hint: True
|
| 221 |
+
# -> use center frame as the condition image
|
| 222 |
+
# random_cond_img: True
|
| 223 |
+
|
| 224 |
+
lightning:
|
| 225 |
+
modelcheckpoint:
|
| 226 |
+
params:
|
| 227 |
+
every_n_train_steps: *Ckpt_log_every
|
| 228 |
+
|
| 229 |
+
callbacks:
|
| 230 |
+
metrics_over_trainsteps_checkpoint:
|
| 231 |
+
params:
|
| 232 |
+
every_n_train_steps: 25000
|
| 233 |
+
|
| 234 |
+
image_logger:
|
| 235 |
+
target: main.ImageLogger
|
| 236 |
+
params:
|
| 237 |
+
disabled: False
|
| 238 |
+
enable_autocast: False
|
| 239 |
+
batch_frequency: *Image_log_every
|
| 240 |
+
max_images: 32
|
| 241 |
+
increase_log_steps: False # default is True
|
| 242 |
+
log_first_step: False
|
| 243 |
+
log_images_kwargs:
|
| 244 |
+
use_ema_scope: False
|
| 245 |
+
N: 8
|
| 246 |
+
n_rows: *FrameLength
|
| 247 |
+
video_fps: *InputFPS
|
| 248 |
+
|
| 249 |
+
trainer:
|
| 250 |
+
precision: 16
|
| 251 |
+
devices: *NumGPU
|
| 252 |
+
num_nodes: *NumNodes
|
| 253 |
+
benchmark: True
|
| 254 |
+
num_sanity_val_steps: 0
|
| 255 |
+
accumulate_grad_batches: *AccumulateGradBatches
|
| 256 |
+
max_epochs: 1000
|
| 257 |
+
|
| 258 |
+
strategy:
|
| 259 |
+
target: pytorch_lightning.strategies.DDPStrategy
|
| 260 |
+
params:
|
| 261 |
+
find_unused_parameters: True
|
| 262 |
+
|
| 263 |
+
# strategy:
|
| 264 |
+
# target: pytorch_lightning.strategies.DeepSpeedStrategy
|
| 265 |
+
# params:
|
| 266 |
+
# stage: 2
|
| 267 |
+
# allgather_bucket_size: 8e8
|
| 268 |
+
# reduce_bucket_size: 8e8
|
| 269 |
+
# load_full_weights: True
|
CCEdit-main/configs/example_training/toy/cifar10_cond.yaml
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
denoiser_config:
|
| 6 |
+
target: sgm.modules.diffusionmodules.denoiser.Denoiser
|
| 7 |
+
params:
|
| 8 |
+
weighting_config:
|
| 9 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting
|
| 10 |
+
params:
|
| 11 |
+
sigma_data: 1.0
|
| 12 |
+
scaling_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
|
| 14 |
+
params:
|
| 15 |
+
sigma_data: 1.0
|
| 16 |
+
|
| 17 |
+
network_config:
|
| 18 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 19 |
+
params:
|
| 20 |
+
use_checkpoint: True
|
| 21 |
+
in_channels: 3
|
| 22 |
+
out_channels: 3
|
| 23 |
+
model_channels: 32
|
| 24 |
+
attention_resolutions: []
|
| 25 |
+
num_res_blocks: 4
|
| 26 |
+
channel_mult: [1, 2, 2]
|
| 27 |
+
num_head_channels: 32
|
| 28 |
+
num_classes: sequential
|
| 29 |
+
adm_in_channels: 128
|
| 30 |
+
|
| 31 |
+
conditioner_config:
|
| 32 |
+
target: sgm.modules.GeneralConditioner
|
| 33 |
+
params:
|
| 34 |
+
emb_models:
|
| 35 |
+
- is_trainable: True
|
| 36 |
+
input_key: cls
|
| 37 |
+
ucg_rate: 0.2
|
| 38 |
+
target: sgm.modules.encoders.modules.ClassEmbedder
|
| 39 |
+
params:
|
| 40 |
+
embed_dim: 128
|
| 41 |
+
n_classes: 10
|
| 42 |
+
|
| 43 |
+
first_stage_config:
|
| 44 |
+
target: sgm.models.autoencoder.IdentityFirstStage
|
| 45 |
+
|
| 46 |
+
loss_fn_config:
|
| 47 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 48 |
+
params:
|
| 49 |
+
sigma_sampler_config:
|
| 50 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
|
| 51 |
+
|
| 52 |
+
sampler_config:
|
| 53 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 54 |
+
params:
|
| 55 |
+
num_steps: 50
|
| 56 |
+
|
| 57 |
+
discretization_config:
|
| 58 |
+
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
|
| 59 |
+
|
| 60 |
+
guider_config:
|
| 61 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFG
|
| 62 |
+
params:
|
| 63 |
+
scale: 3.0
|
| 64 |
+
|
| 65 |
+
data:
|
| 66 |
+
target: sgm.data.cifar10.CIFAR10Loader
|
| 67 |
+
params:
|
| 68 |
+
batch_size: 512
|
| 69 |
+
num_workers: 1
|
| 70 |
+
|
| 71 |
+
lightning:
|
| 72 |
+
modelcheckpoint:
|
| 73 |
+
params:
|
| 74 |
+
every_n_train_steps: 5000
|
| 75 |
+
|
| 76 |
+
callbacks:
|
| 77 |
+
metrics_over_trainsteps_checkpoint:
|
| 78 |
+
params:
|
| 79 |
+
every_n_train_steps: 25000
|
| 80 |
+
|
| 81 |
+
image_logger:
|
| 82 |
+
target: main.ImageLogger
|
| 83 |
+
params:
|
| 84 |
+
disabled: False
|
| 85 |
+
batch_frequency: 1000
|
| 86 |
+
max_images: 64
|
| 87 |
+
increase_log_steps: True
|
| 88 |
+
log_first_step: False
|
| 89 |
+
log_images_kwargs:
|
| 90 |
+
use_ema_scope: False
|
| 91 |
+
N: 64
|
| 92 |
+
n_rows: 8
|
| 93 |
+
|
| 94 |
+
trainer:
|
| 95 |
+
devices: 0,
|
| 96 |
+
benchmark: True
|
| 97 |
+
num_sanity_val_steps: 0
|
| 98 |
+
accumulate_grad_batches: 1
|
| 99 |
+
max_epochs: 20
|
CCEdit-main/configs/example_training/toy/mnist.yaml
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
denoiser_config:
|
| 6 |
+
target: sgm.modules.diffusionmodules.denoiser.Denoiser
|
| 7 |
+
params:
|
| 8 |
+
weighting_config:
|
| 9 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting
|
| 10 |
+
params:
|
| 11 |
+
sigma_data: 1.0
|
| 12 |
+
scaling_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
|
| 14 |
+
params:
|
| 15 |
+
sigma_data: 1.0
|
| 16 |
+
|
| 17 |
+
network_config:
|
| 18 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 19 |
+
params:
|
| 20 |
+
use_checkpoint: True
|
| 21 |
+
in_channels: 1
|
| 22 |
+
out_channels: 1
|
| 23 |
+
model_channels: 32
|
| 24 |
+
attention_resolutions: []
|
| 25 |
+
num_res_blocks: 4
|
| 26 |
+
channel_mult: [1, 2, 2]
|
| 27 |
+
num_head_channels: 32
|
| 28 |
+
|
| 29 |
+
first_stage_config:
|
| 30 |
+
target: sgm.models.autoencoder.IdentityFirstStage
|
| 31 |
+
|
| 32 |
+
loss_fn_config:
|
| 33 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 34 |
+
params:
|
| 35 |
+
sigma_sampler_config:
|
| 36 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
|
| 37 |
+
|
| 38 |
+
sampler_config:
|
| 39 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 40 |
+
params:
|
| 41 |
+
num_steps: 50
|
| 42 |
+
|
| 43 |
+
discretization_config:
|
| 44 |
+
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
|
| 45 |
+
|
| 46 |
+
data:
|
| 47 |
+
target: sgm.data.mnist.MNISTLoader
|
| 48 |
+
params:
|
| 49 |
+
batch_size: 512
|
| 50 |
+
num_workers: 1
|
| 51 |
+
|
| 52 |
+
lightning:
|
| 53 |
+
modelcheckpoint:
|
| 54 |
+
params:
|
| 55 |
+
every_n_train_steps: 5000
|
| 56 |
+
|
| 57 |
+
callbacks:
|
| 58 |
+
metrics_over_trainsteps_checkpoint:
|
| 59 |
+
params:
|
| 60 |
+
every_n_train_steps: 25000
|
| 61 |
+
|
| 62 |
+
image_logger:
|
| 63 |
+
target: main.ImageLogger
|
| 64 |
+
params:
|
| 65 |
+
disabled: False
|
| 66 |
+
batch_frequency: 1000
|
| 67 |
+
max_images: 64
|
| 68 |
+
increase_log_steps: False
|
| 69 |
+
log_first_step: False
|
| 70 |
+
log_images_kwargs:
|
| 71 |
+
use_ema_scope: False
|
| 72 |
+
N: 64
|
| 73 |
+
n_rows: 8
|
| 74 |
+
|
| 75 |
+
trainer:
|
| 76 |
+
devices: 0,
|
| 77 |
+
benchmark: True
|
| 78 |
+
num_sanity_val_steps: 0
|
| 79 |
+
accumulate_grad_batches: 1
|
| 80 |
+
max_epochs: 10
|
CCEdit-main/configs/example_training/toy/mnist_cond.yaml
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
denoiser_config:
|
| 6 |
+
target: sgm.modules.diffusionmodules.denoiser.Denoiser
|
| 7 |
+
params:
|
| 8 |
+
weighting_config:
|
| 9 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting
|
| 10 |
+
params:
|
| 11 |
+
sigma_data: 1.0
|
| 12 |
+
scaling_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
|
| 14 |
+
params:
|
| 15 |
+
sigma_data: 1.0
|
| 16 |
+
|
| 17 |
+
network_config:
|
| 18 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 19 |
+
params:
|
| 20 |
+
use_checkpoint: True
|
| 21 |
+
in_channels: 1
|
| 22 |
+
out_channels: 1
|
| 23 |
+
model_channels: 32
|
| 24 |
+
attention_resolutions: [ ]
|
| 25 |
+
num_res_blocks: 4
|
| 26 |
+
channel_mult: [ 1, 2, 2 ]
|
| 27 |
+
num_head_channels: 32
|
| 28 |
+
num_classes: sequential
|
| 29 |
+
adm_in_channels: 128
|
| 30 |
+
|
| 31 |
+
conditioner_config:
|
| 32 |
+
target: sgm.modules.GeneralConditioner
|
| 33 |
+
params:
|
| 34 |
+
emb_models:
|
| 35 |
+
- is_trainable: True
|
| 36 |
+
input_key: "cls"
|
| 37 |
+
ucg_rate: 0.2
|
| 38 |
+
target: sgm.modules.encoders.modules.ClassEmbedder
|
| 39 |
+
params:
|
| 40 |
+
embed_dim: 128
|
| 41 |
+
n_classes: 10
|
| 42 |
+
|
| 43 |
+
first_stage_config:
|
| 44 |
+
target: sgm.models.autoencoder.IdentityFirstStage
|
| 45 |
+
|
| 46 |
+
loss_fn_config:
|
| 47 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 48 |
+
params:
|
| 49 |
+
sigma_sampler_config:
|
| 50 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
|
| 51 |
+
|
| 52 |
+
sampler_config:
|
| 53 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 54 |
+
params:
|
| 55 |
+
num_steps: 50
|
| 56 |
+
|
| 57 |
+
discretization_config:
|
| 58 |
+
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
|
| 59 |
+
|
| 60 |
+
guider_config:
|
| 61 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFG
|
| 62 |
+
params:
|
| 63 |
+
scale: 3.0
|
| 64 |
+
|
| 65 |
+
data:
|
| 66 |
+
target: sgm.data.mnist.MNISTLoader
|
| 67 |
+
params:
|
| 68 |
+
batch_size: 512
|
| 69 |
+
num_workers: 1
|
| 70 |
+
|
| 71 |
+
lightning:
|
| 72 |
+
modelcheckpoint:
|
| 73 |
+
params:
|
| 74 |
+
every_n_train_steps: 5000
|
| 75 |
+
|
| 76 |
+
callbacks:
|
| 77 |
+
metrics_over_trainsteps_checkpoint:
|
| 78 |
+
params:
|
| 79 |
+
every_n_train_steps: 25000
|
| 80 |
+
|
| 81 |
+
image_logger:
|
| 82 |
+
target: main.ImageLogger
|
| 83 |
+
params:
|
| 84 |
+
disabled: False
|
| 85 |
+
batch_frequency: 1000
|
| 86 |
+
max_images: 16
|
| 87 |
+
increase_log_steps: True
|
| 88 |
+
log_first_step: False
|
| 89 |
+
log_images_kwargs:
|
| 90 |
+
use_ema_scope: False
|
| 91 |
+
N: 16
|
| 92 |
+
n_rows: 4
|
| 93 |
+
|
| 94 |
+
trainer:
|
| 95 |
+
devices: 0,
|
| 96 |
+
benchmark: True
|
| 97 |
+
num_sanity_val_steps: 0
|
| 98 |
+
accumulate_grad_batches: 1
|
| 99 |
+
max_epochs: 20
|
CCEdit-main/configs/example_training/toy/mnist_cond_discrete_eps.yaml
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
denoiser_config:
|
| 6 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 7 |
+
params:
|
| 8 |
+
num_idx: 1000
|
| 9 |
+
|
| 10 |
+
weighting_config:
|
| 11 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting
|
| 12 |
+
scaling_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
|
| 14 |
+
discretization_config:
|
| 15 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 16 |
+
|
| 17 |
+
network_config:
|
| 18 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 19 |
+
params:
|
| 20 |
+
use_checkpoint: True
|
| 21 |
+
in_channels: 1
|
| 22 |
+
out_channels: 1
|
| 23 |
+
model_channels: 32
|
| 24 |
+
attention_resolutions: [ ]
|
| 25 |
+
num_res_blocks: 4
|
| 26 |
+
channel_mult: [ 1, 2, 2 ]
|
| 27 |
+
num_head_channels: 32
|
| 28 |
+
num_classes: sequential
|
| 29 |
+
adm_in_channels: 128
|
| 30 |
+
|
| 31 |
+
conditioner_config:
|
| 32 |
+
target: sgm.modules.GeneralConditioner
|
| 33 |
+
params:
|
| 34 |
+
emb_models:
|
| 35 |
+
- is_trainable: True
|
| 36 |
+
input_key: "cls"
|
| 37 |
+
ucg_rate: 0.2
|
| 38 |
+
target: sgm.modules.encoders.modules.ClassEmbedder
|
| 39 |
+
params:
|
| 40 |
+
embed_dim: 128
|
| 41 |
+
n_classes: 10
|
| 42 |
+
|
| 43 |
+
first_stage_config:
|
| 44 |
+
target: sgm.models.autoencoder.IdentityFirstStage
|
| 45 |
+
|
| 46 |
+
loss_fn_config:
|
| 47 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 48 |
+
params:
|
| 49 |
+
sigma_sampler_config:
|
| 50 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling
|
| 51 |
+
params:
|
| 52 |
+
num_idx: 1000
|
| 53 |
+
|
| 54 |
+
discretization_config:
|
| 55 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 56 |
+
|
| 57 |
+
sampler_config:
|
| 58 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 59 |
+
params:
|
| 60 |
+
num_steps: 50
|
| 61 |
+
|
| 62 |
+
discretization_config:
|
| 63 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 64 |
+
|
| 65 |
+
guider_config:
|
| 66 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFG
|
| 67 |
+
params:
|
| 68 |
+
scale: 5.0
|
| 69 |
+
|
| 70 |
+
data:
|
| 71 |
+
target: sgm.data.mnist.MNISTLoader
|
| 72 |
+
params:
|
| 73 |
+
batch_size: 512
|
| 74 |
+
num_workers: 1
|
| 75 |
+
|
| 76 |
+
lightning:
|
| 77 |
+
modelcheckpoint:
|
| 78 |
+
params:
|
| 79 |
+
every_n_train_steps: 5000
|
| 80 |
+
|
| 81 |
+
callbacks:
|
| 82 |
+
metrics_over_trainsteps_checkpoint:
|
| 83 |
+
params:
|
| 84 |
+
every_n_train_steps: 25000
|
| 85 |
+
|
| 86 |
+
image_logger:
|
| 87 |
+
target: main.ImageLogger
|
| 88 |
+
params:
|
| 89 |
+
disabled: False
|
| 90 |
+
batch_frequency: 1000
|
| 91 |
+
max_images: 16
|
| 92 |
+
increase_log_steps: True
|
| 93 |
+
log_first_step: False
|
| 94 |
+
log_images_kwargs:
|
| 95 |
+
use_ema_scope: False
|
| 96 |
+
N: 16
|
| 97 |
+
n_rows: 4
|
| 98 |
+
|
| 99 |
+
trainer:
|
| 100 |
+
devices: 0,
|
| 101 |
+
benchmark: True
|
| 102 |
+
num_sanity_val_steps: 0
|
| 103 |
+
accumulate_grad_batches: 1
|
| 104 |
+
max_epochs: 20
|
CCEdit-main/configs/example_training/toy/mnist_cond_l1_loss.yaml
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
denoiser_config:
|
| 6 |
+
target: sgm.modules.diffusionmodules.denoiser.Denoiser
|
| 7 |
+
params:
|
| 8 |
+
weighting_config:
|
| 9 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting
|
| 10 |
+
params:
|
| 11 |
+
sigma_data: 1.0
|
| 12 |
+
scaling_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
|
| 14 |
+
params:
|
| 15 |
+
sigma_data: 1.0
|
| 16 |
+
|
| 17 |
+
network_config:
|
| 18 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 19 |
+
params:
|
| 20 |
+
use_checkpoint: True
|
| 21 |
+
in_channels: 1
|
| 22 |
+
out_channels: 1
|
| 23 |
+
model_channels: 32
|
| 24 |
+
attention_resolutions: []
|
| 25 |
+
num_res_blocks: 4
|
| 26 |
+
channel_mult: [1, 2, 2]
|
| 27 |
+
num_head_channels: 32
|
| 28 |
+
num_classes: "sequential"
|
| 29 |
+
adm_in_channels: 128
|
| 30 |
+
|
| 31 |
+
conditioner_config:
|
| 32 |
+
target: sgm.modules.GeneralConditioner
|
| 33 |
+
params:
|
| 34 |
+
emb_models:
|
| 35 |
+
- is_trainable: True
|
| 36 |
+
input_key: "cls"
|
| 37 |
+
ucg_rate: 0.2
|
| 38 |
+
target: sgm.modules.encoders.modules.ClassEmbedder
|
| 39 |
+
params:
|
| 40 |
+
embed_dim: 128
|
| 41 |
+
n_classes: 10
|
| 42 |
+
|
| 43 |
+
first_stage_config:
|
| 44 |
+
target: sgm.models.autoencoder.IdentityFirstStage
|
| 45 |
+
|
| 46 |
+
loss_fn_config:
|
| 47 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 48 |
+
params:
|
| 49 |
+
sigma_sampler_config:
|
| 50 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
|
| 51 |
+
|
| 52 |
+
sampler_config:
|
| 53 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 54 |
+
params:
|
| 55 |
+
num_steps: 50
|
| 56 |
+
|
| 57 |
+
discretization_config:
|
| 58 |
+
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
|
| 59 |
+
|
| 60 |
+
guider_config:
|
| 61 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFG
|
| 62 |
+
params:
|
| 63 |
+
scale: 3.0
|
| 64 |
+
|
| 65 |
+
loss_config:
|
| 66 |
+
target: sgm.modules.diffusionmodules.StandardDiffusionLoss
|
| 67 |
+
params:
|
| 68 |
+
type: l1
|
| 69 |
+
|
| 70 |
+
data:
|
| 71 |
+
target: sgm.data.mnist.MNISTLoader
|
| 72 |
+
params:
|
| 73 |
+
batch_size: 512
|
| 74 |
+
num_workers: 1
|
| 75 |
+
|
| 76 |
+
lightning:
|
| 77 |
+
modelcheckpoint:
|
| 78 |
+
params:
|
| 79 |
+
every_n_train_steps: 5000
|
| 80 |
+
|
| 81 |
+
callbacks:
|
| 82 |
+
metrics_over_trainsteps_checkpoint:
|
| 83 |
+
params:
|
| 84 |
+
every_n_train_steps: 25000
|
| 85 |
+
|
| 86 |
+
image_logger:
|
| 87 |
+
target: main.ImageLogger
|
| 88 |
+
params:
|
| 89 |
+
disabled: False
|
| 90 |
+
batch_frequency: 1000
|
| 91 |
+
max_images: 64
|
| 92 |
+
increase_log_steps: True
|
| 93 |
+
log_first_step: False
|
| 94 |
+
log_images_kwargs:
|
| 95 |
+
use_ema_scope: False
|
| 96 |
+
N: 64
|
| 97 |
+
n_rows: 8
|
| 98 |
+
|
| 99 |
+
trainer:
|
| 100 |
+
devices: 0,
|
| 101 |
+
benchmark: True
|
| 102 |
+
num_sanity_val_steps: 0
|
| 103 |
+
accumulate_grad_batches: 1
|
| 104 |
+
max_epochs: 20
|
CCEdit-main/configs/example_training/toy/mnist_cond_with_ema.yaml
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
base_learning_rate: 1.0e-4
|
| 3 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 4 |
+
params:
|
| 5 |
+
use_ema: True
|
| 6 |
+
|
| 7 |
+
denoiser_config:
|
| 8 |
+
target: sgm.modules.diffusionmodules.denoiser.Denoiser
|
| 9 |
+
params:
|
| 10 |
+
weighting_config:
|
| 11 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting
|
| 12 |
+
params:
|
| 13 |
+
sigma_data: 1.0
|
| 14 |
+
scaling_config:
|
| 15 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling
|
| 16 |
+
params:
|
| 17 |
+
sigma_data: 1.0
|
| 18 |
+
|
| 19 |
+
network_config:
|
| 20 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 21 |
+
params:
|
| 22 |
+
use_checkpoint: True
|
| 23 |
+
in_channels: 1
|
| 24 |
+
out_channels: 1
|
| 25 |
+
model_channels: 32
|
| 26 |
+
attention_resolutions: []
|
| 27 |
+
num_res_blocks: 4
|
| 28 |
+
channel_mult: [1, 2, 2]
|
| 29 |
+
num_head_channels: 32
|
| 30 |
+
num_classes: sequential
|
| 31 |
+
adm_in_channels: 128
|
| 32 |
+
|
| 33 |
+
conditioner_config:
|
| 34 |
+
target: sgm.modules.GeneralConditioner
|
| 35 |
+
params:
|
| 36 |
+
emb_models:
|
| 37 |
+
- is_trainable: True
|
| 38 |
+
input_key: cls
|
| 39 |
+
ucg_rate: 0.2
|
| 40 |
+
target: sgm.modules.encoders.modules.ClassEmbedder
|
| 41 |
+
params:
|
| 42 |
+
embed_dim: 128
|
| 43 |
+
n_classes: 10
|
| 44 |
+
|
| 45 |
+
first_stage_config:
|
| 46 |
+
target: sgm.models.autoencoder.IdentityFirstStage
|
| 47 |
+
|
| 48 |
+
loss_fn_config:
|
| 49 |
+
target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
|
| 50 |
+
params:
|
| 51 |
+
sigma_sampler_config:
|
| 52 |
+
target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
|
| 53 |
+
|
| 54 |
+
sampler_config:
|
| 55 |
+
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
|
| 56 |
+
params:
|
| 57 |
+
num_steps: 50
|
| 58 |
+
|
| 59 |
+
discretization_config:
|
| 60 |
+
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
|
| 61 |
+
|
| 62 |
+
guider_config:
|
| 63 |
+
target: sgm.modules.diffusionmodules.guiders.VanillaCFG
|
| 64 |
+
params:
|
| 65 |
+
scale: 3.0
|
| 66 |
+
|
| 67 |
+
data:
|
| 68 |
+
target: sgm.data.mnist.MNISTLoader
|
| 69 |
+
params:
|
| 70 |
+
batch_size: 512
|
| 71 |
+
num_workers: 1
|
| 72 |
+
|
| 73 |
+
lightning:
|
| 74 |
+
modelcheckpoint:
|
| 75 |
+
params:
|
| 76 |
+
every_n_train_steps: 5000
|
| 77 |
+
|
| 78 |
+
callbacks:
|
| 79 |
+
metrics_over_trainsteps_checkpoint:
|
| 80 |
+
params:
|
| 81 |
+
every_n_train_steps: 25000
|
| 82 |
+
|
| 83 |
+
image_logger:
|
| 84 |
+
target: main.ImageLogger
|
| 85 |
+
params:
|
| 86 |
+
disabled: False
|
| 87 |
+
batch_frequency: 1000
|
| 88 |
+
max_images: 64
|
| 89 |
+
increase_log_steps: True
|
| 90 |
+
log_first_step: False
|
| 91 |
+
log_images_kwargs:
|
| 92 |
+
use_ema_scope: False
|
| 93 |
+
N: 64
|
| 94 |
+
n_rows: 8
|
| 95 |
+
|
| 96 |
+
trainer:
|
| 97 |
+
devices: 0,
|
| 98 |
+
benchmark: True
|
| 99 |
+
num_sanity_val_steps: 0
|
| 100 |
+
accumulate_grad_batches: 1
|
| 101 |
+
max_epochs: 20
|
CCEdit-main/configs/inference/sd_2_1.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 3 |
+
params:
|
| 4 |
+
scale_factor: 0.18215
|
| 5 |
+
disable_first_stage_autocast: True
|
| 6 |
+
|
| 7 |
+
denoiser_config:
|
| 8 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 9 |
+
params:
|
| 10 |
+
num_idx: 1000
|
| 11 |
+
|
| 12 |
+
weighting_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 14 |
+
scaling_config:
|
| 15 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 16 |
+
discretization_config:
|
| 17 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 18 |
+
|
| 19 |
+
network_config:
|
| 20 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 21 |
+
params:
|
| 22 |
+
use_checkpoint: True
|
| 23 |
+
use_fp16: True
|
| 24 |
+
in_channels: 4
|
| 25 |
+
out_channels: 4
|
| 26 |
+
model_channels: 320
|
| 27 |
+
attention_resolutions: [4, 2, 1]
|
| 28 |
+
num_res_blocks: 2
|
| 29 |
+
channel_mult: [1, 2, 4, 4]
|
| 30 |
+
num_head_channels: 64
|
| 31 |
+
use_spatial_transformer: True
|
| 32 |
+
use_linear_in_transformer: True
|
| 33 |
+
transformer_depth: 1
|
| 34 |
+
context_dim: 1024
|
| 35 |
+
legacy: False
|
| 36 |
+
|
| 37 |
+
conditioner_config:
|
| 38 |
+
target: sgm.modules.GeneralConditioner
|
| 39 |
+
params:
|
| 40 |
+
emb_models:
|
| 41 |
+
# crossattn cond
|
| 42 |
+
- is_trainable: False
|
| 43 |
+
input_key: txt
|
| 44 |
+
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
| 45 |
+
params:
|
| 46 |
+
freeze: true
|
| 47 |
+
layer: penultimate
|
| 48 |
+
|
| 49 |
+
first_stage_config:
|
| 50 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 51 |
+
params:
|
| 52 |
+
embed_dim: 4
|
| 53 |
+
monitor: val/rec_loss
|
| 54 |
+
ddconfig:
|
| 55 |
+
double_z: true
|
| 56 |
+
z_channels: 4
|
| 57 |
+
resolution: 256
|
| 58 |
+
in_channels: 3
|
| 59 |
+
out_ch: 3
|
| 60 |
+
ch: 128
|
| 61 |
+
ch_mult: [1, 2, 4, 4]
|
| 62 |
+
num_res_blocks: 2
|
| 63 |
+
attn_resolutions: []
|
| 64 |
+
dropout: 0.0
|
| 65 |
+
lossconfig:
|
| 66 |
+
target: torch.nn.Identity
|
CCEdit-main/configs/inference/sd_2_1_768.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 3 |
+
params:
|
| 4 |
+
scale_factor: 0.18215
|
| 5 |
+
disable_first_stage_autocast: True
|
| 6 |
+
|
| 7 |
+
denoiser_config:
|
| 8 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 9 |
+
params:
|
| 10 |
+
num_idx: 1000
|
| 11 |
+
|
| 12 |
+
weighting_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.VWeighting
|
| 14 |
+
scaling_config:
|
| 15 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
|
| 16 |
+
discretization_config:
|
| 17 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 18 |
+
|
| 19 |
+
network_config:
|
| 20 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 21 |
+
params:
|
| 22 |
+
use_checkpoint: True
|
| 23 |
+
use_fp16: True
|
| 24 |
+
in_channels: 4
|
| 25 |
+
out_channels: 4
|
| 26 |
+
model_channels: 320
|
| 27 |
+
attention_resolutions: [4, 2, 1]
|
| 28 |
+
num_res_blocks: 2
|
| 29 |
+
channel_mult: [1, 2, 4, 4]
|
| 30 |
+
num_head_channels: 64
|
| 31 |
+
use_spatial_transformer: True
|
| 32 |
+
use_linear_in_transformer: True
|
| 33 |
+
transformer_depth: 1
|
| 34 |
+
context_dim: 1024
|
| 35 |
+
legacy: False
|
| 36 |
+
|
| 37 |
+
conditioner_config:
|
| 38 |
+
target: sgm.modules.GeneralConditioner
|
| 39 |
+
params:
|
| 40 |
+
emb_models:
|
| 41 |
+
# crossattn cond
|
| 42 |
+
- is_trainable: False
|
| 43 |
+
input_key: txt
|
| 44 |
+
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
| 45 |
+
params:
|
| 46 |
+
freeze: true
|
| 47 |
+
layer: penultimate
|
| 48 |
+
|
| 49 |
+
first_stage_config:
|
| 50 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 51 |
+
params:
|
| 52 |
+
embed_dim: 4
|
| 53 |
+
monitor: val/rec_loss
|
| 54 |
+
ddconfig:
|
| 55 |
+
double_z: true
|
| 56 |
+
z_channels: 4
|
| 57 |
+
resolution: 256
|
| 58 |
+
in_channels: 3
|
| 59 |
+
out_ch: 3
|
| 60 |
+
ch: 128
|
| 61 |
+
ch_mult: [1, 2, 4, 4]
|
| 62 |
+
num_res_blocks: 2
|
| 63 |
+
attn_resolutions: []
|
| 64 |
+
dropout: 0.0
|
| 65 |
+
lossconfig:
|
| 66 |
+
target: torch.nn.Identity
|
CCEdit-main/configs/inference/sd_xl_refiner.yaml
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
target: sgm.models.diffusion.DiffusionEngine
|
| 3 |
+
params:
|
| 4 |
+
scale_factor: 0.13025
|
| 5 |
+
disable_first_stage_autocast: True
|
| 6 |
+
|
| 7 |
+
denoiser_config:
|
| 8 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 9 |
+
params:
|
| 10 |
+
num_idx: 1000
|
| 11 |
+
|
| 12 |
+
weighting_config:
|
| 13 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 14 |
+
scaling_config:
|
| 15 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 16 |
+
discretization_config:
|
| 17 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 18 |
+
|
| 19 |
+
network_config:
|
| 20 |
+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
| 21 |
+
params:
|
| 22 |
+
adm_in_channels: 2560
|
| 23 |
+
num_classes: sequential
|
| 24 |
+
use_checkpoint: True
|
| 25 |
+
in_channels: 4
|
| 26 |
+
out_channels: 4
|
| 27 |
+
model_channels: 384
|
| 28 |
+
attention_resolutions: [4, 2]
|
| 29 |
+
num_res_blocks: 2
|
| 30 |
+
channel_mult: [1, 2, 4, 4]
|
| 31 |
+
num_head_channels: 64
|
| 32 |
+
use_spatial_transformer: True
|
| 33 |
+
use_linear_in_transformer: True
|
| 34 |
+
transformer_depth: 4
|
| 35 |
+
context_dim: [1280, 1280, 1280, 1280] # 1280
|
| 36 |
+
spatial_transformer_attn_type: softmax-xformers
|
| 37 |
+
legacy: False
|
| 38 |
+
|
| 39 |
+
conditioner_config:
|
| 40 |
+
target: sgm.modules.GeneralConditioner
|
| 41 |
+
params:
|
| 42 |
+
emb_models:
|
| 43 |
+
# crossattn and vector cond
|
| 44 |
+
- is_trainable: False
|
| 45 |
+
input_key: txt
|
| 46 |
+
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
| 47 |
+
params:
|
| 48 |
+
arch: ViT-bigG-14
|
| 49 |
+
version: laion2b_s39b_b160k
|
| 50 |
+
legacy: False
|
| 51 |
+
freeze: True
|
| 52 |
+
layer: penultimate
|
| 53 |
+
always_return_pooled: True
|
| 54 |
+
# vector cond
|
| 55 |
+
- is_trainable: False
|
| 56 |
+
input_key: original_size_as_tuple
|
| 57 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
| 58 |
+
params:
|
| 59 |
+
outdim: 256 # multiplied by two
|
| 60 |
+
# vector cond
|
| 61 |
+
- is_trainable: False
|
| 62 |
+
input_key: crop_coords_top_left
|
| 63 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
| 64 |
+
params:
|
| 65 |
+
outdim: 256 # multiplied by two
|
| 66 |
+
# vector cond
|
| 67 |
+
- is_trainable: False
|
| 68 |
+
input_key: aesthetic_score
|
| 69 |
+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
| 70 |
+
params:
|
| 71 |
+
outdim: 256 # multiplied by one
|
| 72 |
+
|
| 73 |
+
first_stage_config:
|
| 74 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 75 |
+
params:
|
| 76 |
+
embed_dim: 4
|
| 77 |
+
monitor: val/rec_loss
|
| 78 |
+
ddconfig:
|
| 79 |
+
attn_type: vanilla-xformers
|
| 80 |
+
double_z: true
|
| 81 |
+
z_channels: 4
|
| 82 |
+
resolution: 256
|
| 83 |
+
in_channels: 3
|
| 84 |
+
out_ch: 3
|
| 85 |
+
ch: 128
|
| 86 |
+
ch_mult: [1, 2, 4, 4]
|
| 87 |
+
num_res_blocks: 2
|
| 88 |
+
attn_resolutions: []
|
| 89 |
+
dropout: 0.0
|
| 90 |
+
lossconfig:
|
| 91 |
+
target: torch.nn.Identity
|
CCEdit-main/configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
NumSteps: &NumSteps 30
|
| 2 |
+
|
| 3 |
+
model:
|
| 4 |
+
target: sgm.models.diffusion.VideoDiffusionEngineTV2V
|
| 5 |
+
params:
|
| 6 |
+
use_ema: False # Default is False
|
| 7 |
+
scale_factor: 0.18215
|
| 8 |
+
disable_first_stage_autocast: True
|
| 9 |
+
log_keys:
|
| 10 |
+
- txt
|
| 11 |
+
freeze_model: spatial
|
| 12 |
+
|
| 13 |
+
denoiser_config:
|
| 14 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 15 |
+
params:
|
| 16 |
+
num_idx: 1000
|
| 17 |
+
|
| 18 |
+
weighting_config:
|
| 19 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 20 |
+
scaling_config:
|
| 21 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 22 |
+
discretization_config:
|
| 23 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 24 |
+
|
| 25 |
+
network_config:
|
| 26 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V
|
| 27 |
+
params:
|
| 28 |
+
use_checkpoint: False
|
| 29 |
+
in_channels: 4
|
| 30 |
+
out_channels: 4
|
| 31 |
+
model_channels: 320
|
| 32 |
+
attention_resolutions: [4, 2, 1]
|
| 33 |
+
num_res_blocks: 2
|
| 34 |
+
channel_mult: [1, 2, 4, 4]
|
| 35 |
+
num_heads: 8
|
| 36 |
+
use_spatial_transformer: True
|
| 37 |
+
transformer_depth: 1
|
| 38 |
+
context_dim: 768
|
| 39 |
+
legacy: False
|
| 40 |
+
disable_temporal_text_ca: True
|
| 41 |
+
controlnet_config:
|
| 42 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 43 |
+
params:
|
| 44 |
+
use_checkpoint: False
|
| 45 |
+
in_channels: 4
|
| 46 |
+
hint_channels: 3
|
| 47 |
+
model_channels: 320
|
| 48 |
+
attention_resolutions: [4, 2, 1]
|
| 49 |
+
num_res_blocks: 2
|
| 50 |
+
channel_mult: [1, 2, 4, 4]
|
| 51 |
+
num_heads: 8
|
| 52 |
+
use_spatial_transformer: True
|
| 53 |
+
transformer_depth: 1
|
| 54 |
+
context_dim: 768
|
| 55 |
+
legacy: False
|
| 56 |
+
control_scales: 1.0
|
| 57 |
+
|
| 58 |
+
conditioner_config:
|
| 59 |
+
target: sgm.modules.GeneralConditioner
|
| 60 |
+
params:
|
| 61 |
+
emb_models:
|
| 62 |
+
# crossattn cond
|
| 63 |
+
- is_trainable: False
|
| 64 |
+
input_key: txt
|
| 65 |
+
ucg_rate: 0.5
|
| 66 |
+
legacy_ucg_value: ""
|
| 67 |
+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
| 68 |
+
params:
|
| 69 |
+
freeze: true
|
| 70 |
+
- is_trainable: False
|
| 71 |
+
input_key: control_hint
|
| 72 |
+
ucg_rate: 0.01
|
| 73 |
+
target: sgm.modules.encoders.modules.DepthMidasEncoder
|
| 74 |
+
|
| 75 |
+
first_stage_config:
|
| 76 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 77 |
+
params:
|
| 78 |
+
embed_dim: 4
|
| 79 |
+
monitor: val/rec_loss
|
| 80 |
+
ddconfig:
|
| 81 |
+
double_z: true
|
| 82 |
+
z_channels: 4
|
| 83 |
+
resolution: 256
|
| 84 |
+
in_channels: 3
|
| 85 |
+
out_ch: 3
|
| 86 |
+
ch: 128
|
| 87 |
+
ch_mult: [1, 2, 4, 4]
|
| 88 |
+
num_res_blocks: 2
|
| 89 |
+
attn_resolutions: []
|
| 90 |
+
dropout: 0.0
|
| 91 |
+
lossconfig:
|
| 92 |
+
target: torch.nn.Identity
|
| 93 |
+
|
CCEdit-main/configs/inference_ccedit/keyframe_ref_cp_no2ndca_add_cfca_depthzoe.yaml
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
target: sgm.models.diffusion.VideoDiffusionEngineTV2V
|
| 3 |
+
params:
|
| 4 |
+
use_ema: False # Default is False
|
| 5 |
+
scale_factor: 0.18215
|
| 6 |
+
disable_first_stage_autocast: True
|
| 7 |
+
log_keys:
|
| 8 |
+
- txt
|
| 9 |
+
freeze_model: spatial
|
| 10 |
+
|
| 11 |
+
scheduler_config:
|
| 12 |
+
target: sgm.lr_scheduler.LambdaLinearScheduler
|
| 13 |
+
params:
|
| 14 |
+
warm_up_steps: [ 1000 ]
|
| 15 |
+
cycle_lengths: [ 10000000000000 ]
|
| 16 |
+
f_start: [ 1.e-6 ]
|
| 17 |
+
f_max: [ 1. ]
|
| 18 |
+
f_min: [ 1. ]
|
| 19 |
+
|
| 20 |
+
denoiser_config:
|
| 21 |
+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
| 22 |
+
params:
|
| 23 |
+
num_idx: 1000
|
| 24 |
+
|
| 25 |
+
weighting_config:
|
| 26 |
+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
| 27 |
+
scaling_config:
|
| 28 |
+
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
| 29 |
+
discretization_config:
|
| 30 |
+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
| 31 |
+
|
| 32 |
+
network_config:
|
| 33 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V
|
| 34 |
+
params:
|
| 35 |
+
use_checkpoint: True
|
| 36 |
+
in_channels: 4
|
| 37 |
+
out_channels: 4
|
| 38 |
+
model_channels: 320
|
| 39 |
+
attention_resolutions: [4, 2, 1]
|
| 40 |
+
num_res_blocks: 2
|
| 41 |
+
channel_mult: [1, 2, 4, 4]
|
| 42 |
+
num_heads: 8
|
| 43 |
+
use_spatial_transformer: True
|
| 44 |
+
transformer_depth: 1
|
| 45 |
+
context_dim: 768
|
| 46 |
+
legacy: False
|
| 47 |
+
disable_temporal_text_ca: True
|
| 48 |
+
# -> use "temporal_ca" modules
|
| 49 |
+
enable_attention3d_crossframe: True
|
| 50 |
+
ST3DCA_ca_type: 'center_self'
|
| 51 |
+
# crossframe_type: 'reference' # not use the reference image as k,v, so comment it.
|
| 52 |
+
controlnet_config:
|
| 53 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 54 |
+
params:
|
| 55 |
+
use_checkpoint: True
|
| 56 |
+
in_channels: 4
|
| 57 |
+
hint_channels: 3
|
| 58 |
+
model_channels: 320
|
| 59 |
+
attention_resolutions: [4, 2, 1]
|
| 60 |
+
num_res_blocks: 2
|
| 61 |
+
channel_mult: [1, 2, 4, 4]
|
| 62 |
+
num_heads: 8
|
| 63 |
+
use_spatial_transformer: True
|
| 64 |
+
transformer_depth: 1
|
| 65 |
+
context_dim: 768
|
| 66 |
+
legacy: False
|
| 67 |
+
control_scales: 1.0
|
| 68 |
+
controlnet_img_config: # process the anchor frame
|
| 69 |
+
target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D
|
| 70 |
+
params:
|
| 71 |
+
use_checkpoint: True
|
| 72 |
+
in_channels: 4
|
| 73 |
+
hint_channels: 3
|
| 74 |
+
model_channels: 320
|
| 75 |
+
attention_resolutions: [4, 2, 1]
|
| 76 |
+
num_res_blocks: 2
|
| 77 |
+
channel_mult: [1, 2, 4, 4]
|
| 78 |
+
num_heads: 8
|
| 79 |
+
use_spatial_transformer: True
|
| 80 |
+
transformer_depth: 1
|
| 81 |
+
context_dim: 768
|
| 82 |
+
legacy: False
|
| 83 |
+
# -> add on center frame, strengthen the control
|
| 84 |
+
control_scales: 1.0
|
| 85 |
+
# control_scales: 0.0 # use crossattention, instead of add in controlnet
|
| 86 |
+
# -> not add the noised x to controlnet_img
|
| 87 |
+
no_add_x: True # no need to add x
|
| 88 |
+
set_input_hint_block_as_identity: True # ATTENTION: newly added. default: False
|
| 89 |
+
# -> disbale the text cross attention in controlnet_img
|
| 90 |
+
disable_text_ca: True
|
| 91 |
+
|
| 92 |
+
conditioner_config:
|
| 93 |
+
target: sgm.modules.GeneralConditioner
|
| 94 |
+
params:
|
| 95 |
+
emb_models:
|
| 96 |
+
# crossattn cond
|
| 97 |
+
- is_trainable: False
|
| 98 |
+
input_key: txt
|
| 99 |
+
ucg_rate: 0.5
|
| 100 |
+
legacy_ucg_value: ""
|
| 101 |
+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
| 102 |
+
params:
|
| 103 |
+
freeze: true
|
| 104 |
+
- is_trainable: False
|
| 105 |
+
input_key: control_hint
|
| 106 |
+
ucg_rate: 0.0
|
| 107 |
+
target: sgm.modules.encoders.modules.DepthZoeEncoder
|
| 108 |
+
- is_trainable: False
|
| 109 |
+
input_key: cond_img
|
| 110 |
+
ucg_rate: 0.0
|
| 111 |
+
target: sgm.modules.encoders.modules.VAEEmbedder
|
| 112 |
+
|
| 113 |
+
first_stage_config:
|
| 114 |
+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
| 115 |
+
params:
|
| 116 |
+
embed_dim: 4
|
| 117 |
+
monitor: val/rec_loss
|
| 118 |
+
ddconfig:
|
| 119 |
+
double_z: true
|
| 120 |
+
z_channels: 4
|
| 121 |
+
resolution: 256
|
| 122 |
+
in_channels: 3
|
| 123 |
+
out_ch: 3
|
| 124 |
+
ch: 128
|
| 125 |
+
ch_mult: [1, 2, 4, 4]
|
| 126 |
+
num_res_blocks: 2
|
| 127 |
+
attn_resolutions: []
|
| 128 |
+
dropout: 0.0
|
| 129 |
+
lossconfig:
|
| 130 |
+
target: torch.nn.Identity
|
CCEdit-main/scripts/__init__.py
ADDED
|
File without changes
|
CCEdit-main/sgm/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .data import StableDataModuleFromConfig
|
| 2 |
+
from .models import AutoencodingEngine, DiffusionEngine
|
| 3 |
+
from .util import instantiate_from_config
|
CCEdit-main/sgm/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (318 Bytes). View file
|
|
|
CCEdit-main/sgm/__pycache__/util.cpython-39.pyc
ADDED
|
Binary file (8.22 kB). View file
|
|
|
CCEdit-main/sgm/data/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .dataset import StableDataModuleFromConfig
|
CCEdit-main/sgm/data/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (199 Bytes). View file
|
|
|
CCEdit-main/sgm/data/__pycache__/dataset.cpython-39.pyc
ADDED
|
Binary file (3.12 kB). View file
|
|
|
CCEdit-main/sgm/data/cifar10.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torchvision
|
| 2 |
+
import pytorch_lightning as pl
|
| 3 |
+
from torchvision import transforms
|
| 4 |
+
from torch.utils.data import DataLoader, Dataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CIFAR10DataDictWrapper(Dataset):
|
| 8 |
+
def __init__(self, dset):
|
| 9 |
+
super().__init__()
|
| 10 |
+
self.dset = dset
|
| 11 |
+
|
| 12 |
+
def __getitem__(self, i):
|
| 13 |
+
x, y = self.dset[i]
|
| 14 |
+
return {"jpg": x, "cls": y}
|
| 15 |
+
|
| 16 |
+
def __len__(self):
|
| 17 |
+
return len(self.dset)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CIFAR10Loader(pl.LightningDataModule):
|
| 21 |
+
def __init__(self, batch_size, num_workers=0, shuffle=True):
|
| 22 |
+
super().__init__()
|
| 23 |
+
|
| 24 |
+
transform = transforms.Compose(
|
| 25 |
+
[transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)]
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
self.batch_size = batch_size
|
| 29 |
+
self.num_workers = num_workers
|
| 30 |
+
self.shuffle = shuffle
|
| 31 |
+
self.train_dataset = CIFAR10DataDictWrapper(
|
| 32 |
+
torchvision.datasets.CIFAR10(
|
| 33 |
+
root=".data/", train=True, download=True, transform=transform
|
| 34 |
+
)
|
| 35 |
+
)
|
| 36 |
+
self.test_dataset = CIFAR10DataDictWrapper(
|
| 37 |
+
torchvision.datasets.CIFAR10(
|
| 38 |
+
root=".data/", train=False, download=True, transform=transform
|
| 39 |
+
)
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def prepare_data(self):
|
| 43 |
+
pass
|
| 44 |
+
|
| 45 |
+
def train_dataloader(self):
|
| 46 |
+
return DataLoader(
|
| 47 |
+
self.train_dataset,
|
| 48 |
+
batch_size=self.batch_size,
|
| 49 |
+
shuffle=self.shuffle,
|
| 50 |
+
num_workers=self.num_workers,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def test_dataloader(self):
|
| 54 |
+
return DataLoader(
|
| 55 |
+
self.test_dataset,
|
| 56 |
+
batch_size=self.batch_size,
|
| 57 |
+
shuffle=self.shuffle,
|
| 58 |
+
num_workers=self.num_workers,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
def val_dataloader(self):
|
| 62 |
+
return DataLoader(
|
| 63 |
+
self.test_dataset,
|
| 64 |
+
batch_size=self.batch_size,
|
| 65 |
+
shuffle=self.shuffle,
|
| 66 |
+
num_workers=self.num_workers,
|
| 67 |
+
)
|
CCEdit-main/sgm/data/dataset.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torchdata.datapipes.iter
|
| 4 |
+
import webdataset as wds
|
| 5 |
+
from omegaconf import DictConfig
|
| 6 |
+
from pytorch_lightning import LightningDataModule
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
from sdata import create_dataset, create_dummy_dataset, create_loader
|
| 10 |
+
except ImportError as e:
|
| 11 |
+
print("#" * 100)
|
| 12 |
+
print("Datasets not yet available")
|
| 13 |
+
print("to enable, we need to add stable-datasets as a submodule")
|
| 14 |
+
print("please use ``git submodule update --init --recursive``")
|
| 15 |
+
print("and do ``pip install -e stable-datasets/`` from the root of this repo")
|
| 16 |
+
print("#" * 100)
|
| 17 |
+
exit(1)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class StableDataModuleFromConfig(LightningDataModule):
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
train: DictConfig,
|
| 24 |
+
validation: Optional[DictConfig] = None,
|
| 25 |
+
test: Optional[DictConfig] = None,
|
| 26 |
+
skip_val_loader: bool = False,
|
| 27 |
+
dummy: bool = False,
|
| 28 |
+
):
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.train_config = train
|
| 31 |
+
assert (
|
| 32 |
+
"datapipeline" in self.train_config and "loader" in self.train_config
|
| 33 |
+
), "train config requires the fields `datapipeline` and `loader`"
|
| 34 |
+
|
| 35 |
+
self.val_config = validation
|
| 36 |
+
if not skip_val_loader:
|
| 37 |
+
if self.val_config is not None:
|
| 38 |
+
assert (
|
| 39 |
+
"datapipeline" in self.val_config and "loader" in self.val_config
|
| 40 |
+
), "validation config requires the fields `datapipeline` and `loader`"
|
| 41 |
+
else:
|
| 42 |
+
print(
|
| 43 |
+
"Warning: No Validation datapipeline defined, using that one from training"
|
| 44 |
+
)
|
| 45 |
+
self.val_config = train
|
| 46 |
+
|
| 47 |
+
self.test_config = test
|
| 48 |
+
if self.test_config is not None:
|
| 49 |
+
assert (
|
| 50 |
+
"datapipeline" in self.test_config and "loader" in self.test_config
|
| 51 |
+
), "test config requires the fields `datapipeline` and `loader`"
|
| 52 |
+
|
| 53 |
+
self.dummy = dummy
|
| 54 |
+
if self.dummy:
|
| 55 |
+
print("#" * 100)
|
| 56 |
+
print("USING DUMMY DATASET: HOPE YOU'RE DEBUGGING ;)")
|
| 57 |
+
print("#" * 100)
|
| 58 |
+
|
| 59 |
+
def setup(self, stage: str) -> None:
|
| 60 |
+
print("Preparing datasets")
|
| 61 |
+
if self.dummy:
|
| 62 |
+
data_fn = create_dummy_dataset
|
| 63 |
+
else:
|
| 64 |
+
data_fn = create_dataset
|
| 65 |
+
|
| 66 |
+
self.train_datapipeline = data_fn(**self.train_config.datapipeline)
|
| 67 |
+
if self.val_config:
|
| 68 |
+
self.val_datapipeline = data_fn(**self.val_config.datapipeline)
|
| 69 |
+
if self.test_config:
|
| 70 |
+
self.test_datapipeline = data_fn(**self.test_config.datapipeline)
|
| 71 |
+
|
| 72 |
+
def train_dataloader(self) -> torchdata.datapipes.iter.IterDataPipe:
|
| 73 |
+
loader = create_loader(self.train_datapipeline, **self.train_config.loader)
|
| 74 |
+
return loader
|
| 75 |
+
|
| 76 |
+
def val_dataloader(self) -> wds.DataPipeline:
|
| 77 |
+
return create_loader(self.val_datapipeline, **self.val_config.loader)
|
| 78 |
+
|
| 79 |
+
def test_dataloader(self) -> wds.DataPipeline:
|
| 80 |
+
return create_loader(self.test_datapipeline, **self.test_config.loader)
|
CCEdit-main/sgm/data/detaset_webvid.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytorch_lightning as pl
|
| 2 |
+
from functools import partial
|
| 3 |
+
from sgm.util import (
|
| 4 |
+
exists,
|
| 5 |
+
instantiate_from_config,
|
| 6 |
+
isheatmap,
|
| 7 |
+
)
|
| 8 |
+
import torch
|
| 9 |
+
import numpy as np
|
| 10 |
+
from torch.utils.data import random_split, DataLoader, Dataset, Subset, IterableDataset
|
| 11 |
+
from abc import abstractmethod
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Txt2ImgIterableBaseDataset(IterableDataset):
|
| 15 |
+
"""
|
| 16 |
+
Define an interface to make the IterableDatasets for text2img data chainable
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, num_records=0, valid_ids=None, size=256):
|
| 20 |
+
super().__init__()
|
| 21 |
+
self.num_records = num_records
|
| 22 |
+
self.valid_ids = valid_ids
|
| 23 |
+
self.sample_ids = valid_ids
|
| 24 |
+
self.size = size
|
| 25 |
+
|
| 26 |
+
print(f"{self.__class__.__name__} dataset contains {self.__len__()} examples.")
|
| 27 |
+
|
| 28 |
+
def __len__(self):
|
| 29 |
+
return self.num_records
|
| 30 |
+
|
| 31 |
+
@abstractmethod
|
| 32 |
+
def __iter__(self):
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class WrappedDataset(Dataset):
|
| 37 |
+
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, dataset):
|
| 40 |
+
self.data = dataset
|
| 41 |
+
|
| 42 |
+
def __len__(self):
|
| 43 |
+
return len(self.data)
|
| 44 |
+
|
| 45 |
+
def __getitem__(self, idx):
|
| 46 |
+
return self.data[idx]
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def worker_init_fn(_):
|
| 50 |
+
worker_info = torch.utils.data.get_worker_info()
|
| 51 |
+
|
| 52 |
+
dataset = worker_info.dataset
|
| 53 |
+
worker_id = worker_info.id
|
| 54 |
+
|
| 55 |
+
if isinstance(dataset, Txt2ImgIterableBaseDataset):
|
| 56 |
+
split_size = dataset.num_records // worker_info.num_workers
|
| 57 |
+
# reset num_records to the true number to retain reliable length information
|
| 58 |
+
dataset.sample_ids = dataset.valid_ids[
|
| 59 |
+
worker_id * split_size : (worker_id + 1) * split_size
|
| 60 |
+
]
|
| 61 |
+
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
|
| 62 |
+
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
|
| 63 |
+
else:
|
| 64 |
+
return np.random.seed(np.random.get_state()[1][0] + worker_id)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class DataModuleFromConfig(pl.LightningDataModule):
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
batch_size,
|
| 71 |
+
train=None,
|
| 72 |
+
validation=None,
|
| 73 |
+
test=None,
|
| 74 |
+
predict=None,
|
| 75 |
+
wrap=False,
|
| 76 |
+
num_workers=None,
|
| 77 |
+
shuffle_test_loader=False,
|
| 78 |
+
use_worker_init_fn=False,
|
| 79 |
+
shuffle_val_dataloader=False,
|
| 80 |
+
):
|
| 81 |
+
super().__init__()
|
| 82 |
+
self.batch_size = batch_size
|
| 83 |
+
self.dataset_configs = dict()
|
| 84 |
+
self.num_workers = num_workers if num_workers is not None else batch_size * 2
|
| 85 |
+
self.use_worker_init_fn = use_worker_init_fn
|
| 86 |
+
if train is not None:
|
| 87 |
+
self.dataset_configs["train"] = train
|
| 88 |
+
self.train_dataloader = self._train_dataloader
|
| 89 |
+
if validation is not None:
|
| 90 |
+
self.dataset_configs["validation"] = validation
|
| 91 |
+
self.val_dataloader = partial(
|
| 92 |
+
self._val_dataloader, shuffle=shuffle_val_dataloader
|
| 93 |
+
)
|
| 94 |
+
if test is not None:
|
| 95 |
+
self.dataset_configs["test"] = test
|
| 96 |
+
self.test_dataloader = partial(
|
| 97 |
+
self._test_dataloader, shuffle=shuffle_test_loader
|
| 98 |
+
)
|
| 99 |
+
if predict is not None:
|
| 100 |
+
self.dataset_configs["predict"] = predict
|
| 101 |
+
self.predict_dataloader = self._predict_dataloader
|
| 102 |
+
self.wrap = wrap
|
| 103 |
+
|
| 104 |
+
def prepare_data(self):
|
| 105 |
+
for data_cfg in self.dataset_configs.values():
|
| 106 |
+
instantiate_from_config(data_cfg)
|
| 107 |
+
|
| 108 |
+
def setup(self, stage=None):
|
| 109 |
+
self.datasets = dict(
|
| 110 |
+
(k, instantiate_from_config(self.dataset_configs[k]))
|
| 111 |
+
for k in self.dataset_configs
|
| 112 |
+
)
|
| 113 |
+
if self.wrap:
|
| 114 |
+
for k in self.datasets:
|
| 115 |
+
self.datasets[k] = WrappedDataset(self.datasets[k])
|
| 116 |
+
|
| 117 |
+
def _train_dataloader(self):
|
| 118 |
+
is_iterable_dataset = isinstance(
|
| 119 |
+
self.datasets["train"], Txt2ImgIterableBaseDataset
|
| 120 |
+
)
|
| 121 |
+
if is_iterable_dataset or self.use_worker_init_fn:
|
| 122 |
+
init_fn = worker_init_fn
|
| 123 |
+
else:
|
| 124 |
+
init_fn = None
|
| 125 |
+
return DataLoader(
|
| 126 |
+
self.datasets["train"],
|
| 127 |
+
batch_size=self.batch_size,
|
| 128 |
+
num_workers=self.num_workers,
|
| 129 |
+
shuffle=False if is_iterable_dataset else True,
|
| 130 |
+
worker_init_fn=init_fn,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
def _val_dataloader(self, shuffle=False):
|
| 134 |
+
if (
|
| 135 |
+
isinstance(self.datasets["validation"], Txt2ImgIterableBaseDataset)
|
| 136 |
+
or self.use_worker_init_fn
|
| 137 |
+
):
|
| 138 |
+
init_fn = worker_init_fn
|
| 139 |
+
else:
|
| 140 |
+
init_fn = None
|
| 141 |
+
return DataLoader(
|
| 142 |
+
self.datasets["validation"],
|
| 143 |
+
batch_size=self.batch_size,
|
| 144 |
+
num_workers=self.num_workers,
|
| 145 |
+
worker_init_fn=init_fn,
|
| 146 |
+
shuffle=shuffle,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def _test_dataloader(self, shuffle=False):
|
| 150 |
+
is_iterable_dataset = isinstance(
|
| 151 |
+
self.datasets["train"], Txt2ImgIterableBaseDataset
|
| 152 |
+
)
|
| 153 |
+
if is_iterable_dataset or self.use_worker_init_fn:
|
| 154 |
+
init_fn = worker_init_fn
|
| 155 |
+
else:
|
| 156 |
+
init_fn = None
|
| 157 |
+
|
| 158 |
+
# do not shuffle dataloader for iterable dataset
|
| 159 |
+
shuffle = shuffle and (not is_iterable_dataset)
|
| 160 |
+
|
| 161 |
+
return DataLoader(
|
| 162 |
+
self.datasets["test"],
|
| 163 |
+
batch_size=self.batch_size,
|
| 164 |
+
num_workers=self.num_workers,
|
| 165 |
+
worker_init_fn=init_fn,
|
| 166 |
+
shuffle=shuffle,
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
def _predict_dataloader(self, shuffle=False):
|
| 170 |
+
if (
|
| 171 |
+
isinstance(self.datasets["predict"], Txt2ImgIterableBaseDataset)
|
| 172 |
+
or self.use_worker_init_fn
|
| 173 |
+
):
|
| 174 |
+
init_fn = worker_init_fn
|
| 175 |
+
else:
|
| 176 |
+
init_fn = None
|
| 177 |
+
return DataLoader(
|
| 178 |
+
self.datasets["predict"],
|
| 179 |
+
batch_size=self.batch_size,
|
| 180 |
+
num_workers=self.num_workers,
|
| 181 |
+
worker_init_fn=init_fn,
|
| 182 |
+
)
|
CCEdit-main/sgm/data/mnist.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torchvision
|
| 2 |
+
import pytorch_lightning as pl
|
| 3 |
+
from torchvision import transforms
|
| 4 |
+
from torch.utils.data import DataLoader, Dataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MNISTDataDictWrapper(Dataset):
|
| 8 |
+
def __init__(self, dset):
|
| 9 |
+
super().__init__()
|
| 10 |
+
self.dset = dset
|
| 11 |
+
|
| 12 |
+
def __getitem__(self, i):
|
| 13 |
+
x, y = self.dset[i]
|
| 14 |
+
return {"jpg": x, "cls": y}
|
| 15 |
+
|
| 16 |
+
def __len__(self):
|
| 17 |
+
return len(self.dset)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class MNISTLoader(pl.LightningDataModule):
|
| 21 |
+
def __init__(self, batch_size, num_workers=0, prefetch_factor=2, shuffle=True):
|
| 22 |
+
super().__init__()
|
| 23 |
+
|
| 24 |
+
transform = transforms.Compose(
|
| 25 |
+
[transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)]
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
self.batch_size = batch_size
|
| 29 |
+
self.num_workers = num_workers
|
| 30 |
+
self.prefetch_factor = prefetch_factor if num_workers > 0 else 0
|
| 31 |
+
self.shuffle = shuffle
|
| 32 |
+
self.train_dataset = MNISTDataDictWrapper(
|
| 33 |
+
torchvision.datasets.MNIST(
|
| 34 |
+
root=".data/", train=True, download=True, transform=transform
|
| 35 |
+
)
|
| 36 |
+
)
|
| 37 |
+
self.test_dataset = MNISTDataDictWrapper(
|
| 38 |
+
torchvision.datasets.MNIST(
|
| 39 |
+
root=".data/", train=False, download=True, transform=transform
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
def prepare_data(self):
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
def train_dataloader(self):
|
| 47 |
+
return DataLoader(
|
| 48 |
+
self.train_dataset,
|
| 49 |
+
batch_size=self.batch_size,
|
| 50 |
+
shuffle=self.shuffle,
|
| 51 |
+
num_workers=self.num_workers,
|
| 52 |
+
prefetch_factor=self.prefetch_factor,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def test_dataloader(self):
|
| 56 |
+
return DataLoader(
|
| 57 |
+
self.test_dataset,
|
| 58 |
+
batch_size=self.batch_size,
|
| 59 |
+
shuffle=self.shuffle,
|
| 60 |
+
num_workers=self.num_workers,
|
| 61 |
+
prefetch_factor=self.prefetch_factor,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def val_dataloader(self):
|
| 65 |
+
return DataLoader(
|
| 66 |
+
self.test_dataset,
|
| 67 |
+
batch_size=self.batch_size,
|
| 68 |
+
shuffle=self.shuffle,
|
| 69 |
+
num_workers=self.num_workers,
|
| 70 |
+
prefetch_factor=self.prefetch_factor,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
dset = MNISTDataDictWrapper(
|
| 76 |
+
torchvision.datasets.MNIST(
|
| 77 |
+
root=".data/",
|
| 78 |
+
train=False,
|
| 79 |
+
download=True,
|
| 80 |
+
transform=transforms.Compose(
|
| 81 |
+
[transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)]
|
| 82 |
+
),
|
| 83 |
+
)
|
| 84 |
+
)
|
| 85 |
+
ex = dset[0]
|
CCEdit-main/sgm/data/webvid/base_video_dataset.py
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
from abc import abstractmethod
|
| 5 |
+
|
| 6 |
+
import av
|
| 7 |
+
import cv2
|
| 8 |
+
import decord
|
| 9 |
+
import numpy as np
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from torch.utils.data import Dataset, get_worker_info
|
| 12 |
+
from torchvision import transforms
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# def init_transform_dict(
|
| 16 |
+
# input_res_h=224,
|
| 17 |
+
# input_res_w=224,
|
| 18 |
+
# randcrop_scale=(0.5, 1.0),
|
| 19 |
+
# color_jitter=(0, 0, 0),
|
| 20 |
+
# norm_mean=(0.5, 0.5, 0.5),
|
| 21 |
+
# norm_std=(0.5, 0.5, 0.5),
|
| 22 |
+
# ):
|
| 23 |
+
# # todo: This part need to be discussed and designed carefully.
|
| 24 |
+
# normalize = transforms.Normalize(mean=norm_mean, std=norm_std)
|
| 25 |
+
# tsfm_dict = {
|
| 26 |
+
# "train": transforms.Compose(
|
| 27 |
+
# [
|
| 28 |
+
# transforms.RandomResizedCrop(
|
| 29 |
+
# (input_res_h, input_res_w), scale=randcrop_scale, antialias=True
|
| 30 |
+
# ),
|
| 31 |
+
# normalize,
|
| 32 |
+
# ]
|
| 33 |
+
# ),
|
| 34 |
+
# "val": transforms.Compose(
|
| 35 |
+
# [
|
| 36 |
+
# # todo: should we use crop for validation and test?
|
| 37 |
+
# transforms.Resize((input_res_h, input_res_w), antialias=True),
|
| 38 |
+
# normalize,
|
| 39 |
+
# ]
|
| 40 |
+
# ),
|
| 41 |
+
# "test": transforms.Compose(
|
| 42 |
+
# [
|
| 43 |
+
# transforms.Resize((input_res_h, input_res_w), antialias=True),
|
| 44 |
+
# normalize,
|
| 45 |
+
# ]
|
| 46 |
+
# ),
|
| 47 |
+
# }
|
| 48 |
+
# return tsfm_dict
|
| 49 |
+
def init_transform_dict(
|
| 50 |
+
input_res_h=224,
|
| 51 |
+
input_res_w=224,
|
| 52 |
+
randcrop_scale=(0.5, 1.0),
|
| 53 |
+
color_jitter=(0, 0, 0),
|
| 54 |
+
norm_mean=(0.5, 0.5, 0.5),
|
| 55 |
+
norm_std=(0.5, 0.5, 0.5),
|
| 56 |
+
):
|
| 57 |
+
# todo: this implementation might cause bug sometimes.
|
| 58 |
+
# todo: make it safer, please.
|
| 59 |
+
normalize = transforms.Normalize(mean=norm_mean, std=norm_std)
|
| 60 |
+
tsfm_dict = {
|
| 61 |
+
"train": transforms.Compose(
|
| 62 |
+
[
|
| 63 |
+
transforms.Resize(input_res_h, antialias=True),
|
| 64 |
+
transforms.CenterCrop((input_res_h, input_res_w)),
|
| 65 |
+
normalize,
|
| 66 |
+
]
|
| 67 |
+
),
|
| 68 |
+
"val": transforms.Compose(
|
| 69 |
+
[
|
| 70 |
+
transforms.Resize(input_res_h, antialias=True),
|
| 71 |
+
transforms.CenterCrop((input_res_h, input_res_w)),
|
| 72 |
+
normalize,
|
| 73 |
+
]
|
| 74 |
+
),
|
| 75 |
+
"test": transforms.Compose(
|
| 76 |
+
[
|
| 77 |
+
transforms.Resize(input_res_h, antialias=True),
|
| 78 |
+
transforms.CenterCrop((input_res_h, input_res_w)),
|
| 79 |
+
normalize,
|
| 80 |
+
]
|
| 81 |
+
),
|
| 82 |
+
}
|
| 83 |
+
return tsfm_dict
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class TextVideoDataset(Dataset):
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
dataset_name,
|
| 90 |
+
text_params,
|
| 91 |
+
video_params,
|
| 92 |
+
data_dir,
|
| 93 |
+
metadata_dir=None,
|
| 94 |
+
metadata_folder_name=None, # "webvid10m_meta",
|
| 95 |
+
split="train",
|
| 96 |
+
tsfms=None,
|
| 97 |
+
cut=None,
|
| 98 |
+
key=None,
|
| 99 |
+
subsample=1,
|
| 100 |
+
sliding_window_stride=-1,
|
| 101 |
+
reader="decord",
|
| 102 |
+
first_stage_key="video",
|
| 103 |
+
cond_stage_key="txt",
|
| 104 |
+
skip_missing_files=True,
|
| 105 |
+
use_control_hint=False,
|
| 106 |
+
random_cond_img=False,
|
| 107 |
+
):
|
| 108 |
+
# print(dataset_name, text_params, video_params)
|
| 109 |
+
# WebVid {'input': 'text'} {'input_res': 224, 'num_frames': 1, 'loading': 'lax'}
|
| 110 |
+
self.dataset_name = dataset_name
|
| 111 |
+
self.text_params = text_params
|
| 112 |
+
self.video_params = video_params
|
| 113 |
+
# check for environment variables
|
| 114 |
+
self.data_dir = os.path.expandvars(data_dir)
|
| 115 |
+
if metadata_dir is not None:
|
| 116 |
+
self.metadata_dir = os.path.expandvars(metadata_dir)
|
| 117 |
+
else:
|
| 118 |
+
self.metadata_dir = self.data_dir
|
| 119 |
+
# added parameters
|
| 120 |
+
self.metadata_folder_name = metadata_folder_name
|
| 121 |
+
self.first_stage_key = first_stage_key
|
| 122 |
+
self.cond_stage_key = cond_stage_key
|
| 123 |
+
self.skip = skip_missing_files
|
| 124 |
+
self.lack_files = []
|
| 125 |
+
self.split = split
|
| 126 |
+
self.key = key
|
| 127 |
+
tsfm_params = (
|
| 128 |
+
{}
|
| 129 |
+
if "tsfm_params" not in video_params.keys()
|
| 130 |
+
else video_params["tsfm_params"]
|
| 131 |
+
)
|
| 132 |
+
# tsfm_params['input_res'] = video_params['input_res']
|
| 133 |
+
tsfm_params["input_res_h"] = video_params["input_res_h"]
|
| 134 |
+
tsfm_params["input_res_w"] = video_params["input_res_w"]
|
| 135 |
+
tsfm_dict = init_transform_dict(**tsfm_params)
|
| 136 |
+
|
| 137 |
+
if split not in ["train", "val", "test"]:
|
| 138 |
+
print(
|
| 139 |
+
'Warning: split is not in ["train", "val", "test"], '
|
| 140 |
+
'what you set is "{}", '
|
| 141 |
+
'set it to "train"'.format(split)
|
| 142 |
+
)
|
| 143 |
+
split = "train"
|
| 144 |
+
|
| 145 |
+
tsfms = tsfm_dict[split]
|
| 146 |
+
|
| 147 |
+
self.transforms = tsfms
|
| 148 |
+
self.cut = cut
|
| 149 |
+
self.subsample = subsample
|
| 150 |
+
self.sliding_window_stride = sliding_window_stride
|
| 151 |
+
self.video_reader = video_reader[reader]
|
| 152 |
+
self.label_type = "caption"
|
| 153 |
+
self.frame_sample = video_params.get("frame_sample", "proportional")
|
| 154 |
+
self._load_metadata()
|
| 155 |
+
if self.sliding_window_stride != -1:
|
| 156 |
+
if self.split != "test":
|
| 157 |
+
raise ValueError(
|
| 158 |
+
"Fixing frame sampling is for test time only. can remove but..."
|
| 159 |
+
)
|
| 160 |
+
self._fix_temporal_samples()
|
| 161 |
+
self.use_control_hint = use_control_hint
|
| 162 |
+
self.random_cond_img = random_cond_img
|
| 163 |
+
|
| 164 |
+
@abstractmethod
|
| 165 |
+
def _load_metadata(self):
|
| 166 |
+
raise NotImplementedError("Metadata loading must be implemented by subclass")
|
| 167 |
+
|
| 168 |
+
@abstractmethod
|
| 169 |
+
def _get_video_path(self, sample):
|
| 170 |
+
raise NotImplementedError(
|
| 171 |
+
"Get video path function must be implemented by subclass"
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
def _get_caption(self, sample):
|
| 175 |
+
raise NotImplementedError(
|
| 176 |
+
"Get caption function must be implemented by subclass"
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
def _get_video_lens(self):
|
| 180 |
+
vlen_li = []
|
| 181 |
+
for idx, row in self.metadata.iterrows():
|
| 182 |
+
video_path = self._get_video_path(row)[0]
|
| 183 |
+
vlen_li.append(get_video_len(video_path))
|
| 184 |
+
|
| 185 |
+
return vlen_li
|
| 186 |
+
|
| 187 |
+
def _fix_temporal_samples(self):
|
| 188 |
+
self.metadata["vlen"] = self._get_video_lens()
|
| 189 |
+
self.metadata["frame_intervals"] = self.metadata["vlen"].apply(
|
| 190 |
+
lambda x: np.linspace(
|
| 191 |
+
start=0, stop=x, num=min(x, self.video_params["num_frames"]) + 1
|
| 192 |
+
).astype(int)
|
| 193 |
+
)
|
| 194 |
+
self.metadata["fix_start"] = self.metadata["frame_intervals"].apply(
|
| 195 |
+
lambda x: np.arange(0, int(x[-1] / len(x - 1)), self.sliding_window_stride)
|
| 196 |
+
)
|
| 197 |
+
self.metadata = self.metadata.explode("fix_start")
|
| 198 |
+
|
| 199 |
+
def __len__(self):
|
| 200 |
+
return len(self.metadata)
|
| 201 |
+
|
| 202 |
+
def __getitem__(self, item):
|
| 203 |
+
item = item % len(self.metadata)
|
| 204 |
+
sample = self.metadata.iloc[item]
|
| 205 |
+
video_fp, rel_fp = self._get_video_path(sample)
|
| 206 |
+
# if not os.path.exists(video_fp):
|
| 207 |
+
# return self.__getitem__(np.random.choice(self.__len__()))
|
| 208 |
+
caption = self._get_caption(sample)
|
| 209 |
+
|
| 210 |
+
video_loading = self.video_params.get("loading", "strict") #
|
| 211 |
+
# frame_sample = 'rand'
|
| 212 |
+
fix_start = None
|
| 213 |
+
# if self.split == 'test':
|
| 214 |
+
# frame_sample = 'uniform'
|
| 215 |
+
if self.sliding_window_stride != -1:
|
| 216 |
+
fix_start = sample["fix_start"]
|
| 217 |
+
|
| 218 |
+
try:
|
| 219 |
+
if os.path.isfile(video_fp):
|
| 220 |
+
# imgs, idxs = self.video_reader(video_fp, self.video_params['num_frames'], frame_sample,
|
| 221 |
+
# fix_start=fix_start)
|
| 222 |
+
if self.frame_sample == "equally spaced":
|
| 223 |
+
sample_factor = self.video_params.get("es_interval", 10)
|
| 224 |
+
elif self.frame_sample == "proportional":
|
| 225 |
+
sample_factor = self.video_params.get("prop_factor", 3)
|
| 226 |
+
imgs, idxs = self.video_reader(
|
| 227 |
+
video_fp,
|
| 228 |
+
self.video_params["num_frames"],
|
| 229 |
+
self.frame_sample,
|
| 230 |
+
fix_start=fix_start,
|
| 231 |
+
sample_factor=sample_factor,
|
| 232 |
+
)
|
| 233 |
+
if self.random_cond_img:
|
| 234 |
+
random_cond_img, _ = self.video_reader(
|
| 235 |
+
video_fp,
|
| 236 |
+
1,
|
| 237 |
+
self.frame_sample,
|
| 238 |
+
fix_start=fix_start,
|
| 239 |
+
sample_factor=sample_factor,
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
print_str = f"Warning: missing video file {video_fp}."
|
| 243 |
+
if video_fp not in self.lack_files:
|
| 244 |
+
self.lack_files.append(video_fp)
|
| 245 |
+
if self.skip:
|
| 246 |
+
print_str += " Resampling another video."
|
| 247 |
+
print(print_str)
|
| 248 |
+
return self.__getitem__(np.random.choice(self.__len__()))
|
| 249 |
+
else:
|
| 250 |
+
print(print_str)
|
| 251 |
+
assert False
|
| 252 |
+
|
| 253 |
+
except Exception as e:
|
| 254 |
+
if video_loading == "strict":
|
| 255 |
+
raise ValueError(
|
| 256 |
+
f"Video loading failed for {video_fp}, video loading for this dataset is strict."
|
| 257 |
+
) from e
|
| 258 |
+
else:
|
| 259 |
+
print("Warning: using the pure black image as the frame sample")
|
| 260 |
+
# imgs = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0))
|
| 261 |
+
imgs = Image.new(
|
| 262 |
+
"RGB",
|
| 263 |
+
(
|
| 264 |
+
self.video_params["input_res_w"],
|
| 265 |
+
self.video_params["input_res_h"],
|
| 266 |
+
),
|
| 267 |
+
(0, 0, 0),
|
| 268 |
+
)
|
| 269 |
+
imgs = transforms.ToTensor()(imgs).unsqueeze(0)
|
| 270 |
+
if self.random_cond_img:
|
| 271 |
+
random_cond_img = Image.new(
|
| 272 |
+
"RGB",
|
| 273 |
+
(
|
| 274 |
+
self.video_params["input_res_w"],
|
| 275 |
+
self.video_params["input_res_h"],
|
| 276 |
+
),
|
| 277 |
+
(0, 0, 0),
|
| 278 |
+
)
|
| 279 |
+
random_cond_img = transforms.ToTensor()(random_cond_img).unsqueeze(0)
|
| 280 |
+
|
| 281 |
+
if self.transforms is not None:
|
| 282 |
+
imgs = self.transforms(imgs) # normalize or 2 * x - 1 ?
|
| 283 |
+
|
| 284 |
+
# final = torch.zeros([self.video_params['num_frames'], 3, self.video_params['input_res'],
|
| 285 |
+
# self.video_params['input_res']])
|
| 286 |
+
final = torch.zeros(
|
| 287 |
+
[
|
| 288 |
+
self.video_params["num_frames"],
|
| 289 |
+
3,
|
| 290 |
+
self.video_params["input_res_h"],
|
| 291 |
+
self.video_params["input_res_w"],
|
| 292 |
+
]
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
final[: imgs.shape[0]] = imgs
|
| 296 |
+
if self.random_cond_img:
|
| 297 |
+
# import pdb; pdb.set_trace()
|
| 298 |
+
# import torchvision
|
| 299 |
+
# torchvision.utils.save_image(random_cond_img, 'debug_random_cond_img.png', normalize=True)
|
| 300 |
+
# torchvision.utils.save_image(imgs, 'debug_imgs.png', normalize=True)
|
| 301 |
+
cond_img = self.transforms(random_cond_img).squeeze(0)
|
| 302 |
+
else:
|
| 303 |
+
cond_img = final[final.shape[0] // 2, ...]
|
| 304 |
+
final = final.permute(1, 0, 2, 3) # (C, T, H, W)
|
| 305 |
+
interpolate_first_last = final[:, [0, -1], ...]
|
| 306 |
+
|
| 307 |
+
meta_arr = {
|
| 308 |
+
"raw_captions": caption,
|
| 309 |
+
"paths": rel_fp,
|
| 310 |
+
"dataset": self.dataset_name,
|
| 311 |
+
}
|
| 312 |
+
data = {
|
| 313 |
+
self.first_stage_key: final,
|
| 314 |
+
self.cond_stage_key: caption,
|
| 315 |
+
"cond_img": cond_img,
|
| 316 |
+
'interpolate_first_last': interpolate_first_last,
|
| 317 |
+
"original_size_as_tuple": torch.tensor(
|
| 318 |
+
[self.video_params["input_res_w"], self.video_params["input_res_h"]]
|
| 319 |
+
), # TODO only for debug
|
| 320 |
+
"target_size_as_tuple": torch.tensor(
|
| 321 |
+
[self.video_params["input_res_w"], self.video_params["input_res_h"]]
|
| 322 |
+
), # TODO only for debug
|
| 323 |
+
"crop_coords_top_left": torch.tensor([0, 0]), # TODO only for debug
|
| 324 |
+
"meta": meta_arr,
|
| 325 |
+
}
|
| 326 |
+
if self.use_control_hint:
|
| 327 |
+
data["control_hint"] = final
|
| 328 |
+
return data
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class TextImageDataset(TextVideoDataset):
|
| 332 |
+
def __getitem__(self, item):
|
| 333 |
+
item = item % len(self.metadata)
|
| 334 |
+
sample = self.metadata.iloc[item]
|
| 335 |
+
video_fp, rel_fp = self._get_video_path(sample)
|
| 336 |
+
caption = self._get_caption(sample)
|
| 337 |
+
|
| 338 |
+
video_loading = self.video_params.get("loading", "strict")
|
| 339 |
+
|
| 340 |
+
try:
|
| 341 |
+
img = Image.open(video_fp).convert("RGB")
|
| 342 |
+
except:
|
| 343 |
+
if video_loading == "strict":
|
| 344 |
+
raise ValueError(
|
| 345 |
+
f"Image loading failed for {video_fp}, image loading for this dataset is strict."
|
| 346 |
+
)
|
| 347 |
+
else:
|
| 348 |
+
# img = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0))
|
| 349 |
+
img = Image.new(
|
| 350 |
+
"RGB",
|
| 351 |
+
(
|
| 352 |
+
self.video_params["input_res_w"],
|
| 353 |
+
self.video_params["input_res_h"],
|
| 354 |
+
),
|
| 355 |
+
(0, 0, 0),
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
# convert to tensor because video transforms don't, expand such that its a 1-frame video.
|
| 359 |
+
img = transforms.ToTensor()(img).unsqueeze(0)
|
| 360 |
+
if self.transforms is not None:
|
| 361 |
+
img = self.transforms(img)
|
| 362 |
+
meta_arr = {
|
| 363 |
+
"raw_captions": caption,
|
| 364 |
+
"paths": rel_fp,
|
| 365 |
+
"dataset": self.dataset_name,
|
| 366 |
+
}
|
| 367 |
+
data = {"video": img, "text": caption, "meta": meta_arr}
|
| 368 |
+
return data
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def sample_frames(
|
| 372 |
+
num_frames, vlen, sample="rand", fix_start=None, **kwargs
|
| 373 |
+
): # TBD, what do you need
|
| 374 |
+
"""
|
| 375 |
+
num_frames: The number of frames to sample.
|
| 376 |
+
vlen: The length of the video.
|
| 377 |
+
sample: The sampling method.
|
| 378 |
+
choices of frame_sample:
|
| 379 |
+
- 'equally spaced': sample frames equally spaced
|
| 380 |
+
e.g.,1s video has 30 frames, when 'es_interval'=8, we sample frames with spacing of 8
|
| 381 |
+
- 'proportional': sample frames proportional to the length of the frames in one second
|
| 382 |
+
e.g., 1s video has 30 frames, when 'prop_factor'=3, we sample frames with spacing of 30/3=10
|
| 383 |
+
- 'random': sample frames randomly (not recommended)
|
| 384 |
+
- 'uniform': sample frames uniformly (not recommended)
|
| 385 |
+
fix_start: The starting frame index. If it is not None, then it will be used as the starting frame index.
|
| 386 |
+
"""
|
| 387 |
+
acc_samples = min(num_frames, vlen)
|
| 388 |
+
if sample in ["rand", "uniform"]:
|
| 389 |
+
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
|
| 390 |
+
ranges = []
|
| 391 |
+
for idx, interv in enumerate(intervals[:-1]):
|
| 392 |
+
ranges.append((interv, intervals[idx + 1] - 1))
|
| 393 |
+
if sample == "rand":
|
| 394 |
+
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
|
| 395 |
+
elif fix_start is not None:
|
| 396 |
+
frame_idxs = [x[0] + fix_start for x in ranges]
|
| 397 |
+
elif sample == "uniform":
|
| 398 |
+
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
|
| 399 |
+
elif sample in ["equally spaced", "proportional"]:
|
| 400 |
+
if sample == "equally spaced":
|
| 401 |
+
raise NotImplementedError # need to pass in the corresponding parameters
|
| 402 |
+
else:
|
| 403 |
+
interval = round(kwargs["fps"] / kwargs["sample_factor"])
|
| 404 |
+
needed_frames = (acc_samples - 1) * interval
|
| 405 |
+
|
| 406 |
+
if fix_start is not None:
|
| 407 |
+
start = fix_start
|
| 408 |
+
else:
|
| 409 |
+
if vlen - needed_frames - 1 < 0:
|
| 410 |
+
start = 0
|
| 411 |
+
else:
|
| 412 |
+
start = random.randint(0, vlen - needed_frames - 1)
|
| 413 |
+
frame_idxs = np.linspace(
|
| 414 |
+
start=start, stop=min(vlen - 1, start + needed_frames), num=acc_samples
|
| 415 |
+
).astype(int)
|
| 416 |
+
else:
|
| 417 |
+
raise NotImplementedError
|
| 418 |
+
|
| 419 |
+
return frame_idxs
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def read_frames_cv2(video_path, num_frames, sample="rand", fix_start=None, **kwargs):
|
| 423 |
+
cap = cv2.VideoCapture(video_path)
|
| 424 |
+
assert cap.isOpened()
|
| 425 |
+
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 426 |
+
# frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
|
| 427 |
+
# get indexes of sampled frames
|
| 428 |
+
fps = cap.get(cv2.CAP_PROP_FPS) # not verified yet, might cause bug.
|
| 429 |
+
frame_idxs = sample_frames(
|
| 430 |
+
num_frames,
|
| 431 |
+
vlen,
|
| 432 |
+
sample=sample,
|
| 433 |
+
fix_start=fix_start,
|
| 434 |
+
fps=fps,
|
| 435 |
+
sample_factor=kwargs["sample_factor"],
|
| 436 |
+
)
|
| 437 |
+
frames = []
|
| 438 |
+
success_idxs = []
|
| 439 |
+
for index in frame_idxs:
|
| 440 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1)
|
| 441 |
+
ret, frame = cap.read()
|
| 442 |
+
if ret:
|
| 443 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 444 |
+
frame = torch.from_numpy(frame)
|
| 445 |
+
# (H x W x C) to (C x H x W)
|
| 446 |
+
frame = frame.permute(2, 0, 1)
|
| 447 |
+
frames.append(frame)
|
| 448 |
+
success_idxs.append(index)
|
| 449 |
+
else:
|
| 450 |
+
pass
|
| 451 |
+
# print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
|
| 452 |
+
|
| 453 |
+
frames = torch.stack(frames).float() / 255
|
| 454 |
+
cap.release()
|
| 455 |
+
return frames, success_idxs
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def read_frames_av(video_path, num_frames, sample="rand", fix_start=None, **kwargs):
|
| 459 |
+
reader = av.open(video_path)
|
| 460 |
+
try:
|
| 461 |
+
frames = []
|
| 462 |
+
frames = [
|
| 463 |
+
torch.from_numpy(f.to_rgb().to_ndarray()) for f in reader.decode(video=0)
|
| 464 |
+
]
|
| 465 |
+
except (RuntimeError, ZeroDivisionError) as exception:
|
| 466 |
+
print(
|
| 467 |
+
"{}: WEBM reader cannot open {}. Empty "
|
| 468 |
+
"list returned.".format(type(exception).__name__, video_path)
|
| 469 |
+
)
|
| 470 |
+
vlen = len(frames)
|
| 471 |
+
# frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
|
| 472 |
+
fps = reader.streams.video[0].average_rate # not verified yet, might cause bug.
|
| 473 |
+
frame_idxs = sample_frames(
|
| 474 |
+
num_frames,
|
| 475 |
+
vlen,
|
| 476 |
+
sample=sample,
|
| 477 |
+
fix_start=fix_start,
|
| 478 |
+
fps=fps,
|
| 479 |
+
sample_factor=kwargs["sample_factor"],
|
| 480 |
+
)
|
| 481 |
+
frames = torch.stack([frames[idx] for idx in frame_idxs]).float() / 255
|
| 482 |
+
frames = frames.permute(0, 3, 1, 2)
|
| 483 |
+
return frames, frame_idxs
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
decord.bridge.set_bridge("torch")
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def read_frames_decord(video_path, num_frames, sample="rand", fix_start=None, **kwargs):
|
| 490 |
+
video_reader = decord.VideoReader(video_path, num_threads=0)
|
| 491 |
+
vlen = len(video_reader)
|
| 492 |
+
# frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
|
| 493 |
+
fps = video_reader.get_avg_fps() # note that the fps here is float.
|
| 494 |
+
frame_idxs = sample_frames(
|
| 495 |
+
num_frames,
|
| 496 |
+
vlen,
|
| 497 |
+
sample=sample,
|
| 498 |
+
fix_start=fix_start,
|
| 499 |
+
fps=fps,
|
| 500 |
+
sample_factor=kwargs["sample_factor"],
|
| 501 |
+
)
|
| 502 |
+
frames = video_reader.get_batch(frame_idxs)
|
| 503 |
+
frames = frames.float() / 255
|
| 504 |
+
frames = frames.permute(0, 3, 1, 2)
|
| 505 |
+
return frames, frame_idxs
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def get_video_len(video_path):
|
| 509 |
+
cap = cv2.VideoCapture(video_path)
|
| 510 |
+
if not (cap.isOpened()):
|
| 511 |
+
return False
|
| 512 |
+
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 513 |
+
cap.release()
|
| 514 |
+
return vlen
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
video_reader = {
|
| 518 |
+
"av": read_frames_av,
|
| 519 |
+
"cv2": read_frames_cv2,
|
| 520 |
+
"decord": read_frames_decord,
|
| 521 |
+
}
|
CCEdit-main/sgm/data/webvid/webvid_dataset.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
from .base_video_dataset import TextVideoDataset
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class WebVid(TextVideoDataset):
|
| 9 |
+
"""
|
| 10 |
+
WebVid Dataset.
|
| 11 |
+
Assumes webvid data is structured as follows.
|
| 12 |
+
Webvid/
|
| 13 |
+
videos/
|
| 14 |
+
000001_000050/ ($page_dir)
|
| 15 |
+
1.mp4 (videoid.mp4)
|
| 16 |
+
...
|
| 17 |
+
5000.mp4
|
| 18 |
+
...
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def _load_metadata(self):
|
| 22 |
+
assert self.metadata_folder_name is not None
|
| 23 |
+
assert self.cut is not None
|
| 24 |
+
metadata_dir = os.path.join(self.metadata_dir, self.metadata_folder_name)
|
| 25 |
+
if self.key is None:
|
| 26 |
+
metadata_fp = os.path.join(
|
| 27 |
+
metadata_dir, f"results_{self.cut}_{self.split}.csv"
|
| 28 |
+
)
|
| 29 |
+
else:
|
| 30 |
+
metadata_fp = os.path.join(
|
| 31 |
+
metadata_dir, f"results_{self.cut}_{self.split}_{self.key}.csv"
|
| 32 |
+
)
|
| 33 |
+
print(metadata_fp)
|
| 34 |
+
metadata = pd.read_csv(
|
| 35 |
+
metadata_fp,
|
| 36 |
+
on_bad_lines="skip",
|
| 37 |
+
encoding="ISO-8859-1",
|
| 38 |
+
engine="python",
|
| 39 |
+
sep=",",
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
if self.subsample < 1:
|
| 43 |
+
metadata = metadata.sample(frac=self.subsample)
|
| 44 |
+
elif self.split == "val":
|
| 45 |
+
try:
|
| 46 |
+
metadata = metadata.sample(1000, random_state=0)
|
| 47 |
+
except:
|
| 48 |
+
print(
|
| 49 |
+
"there are less than 1000 samples in the val set, thus no downsampling is done"
|
| 50 |
+
)
|
| 51 |
+
pass
|
| 52 |
+
|
| 53 |
+
metadata["caption"] = metadata["name"]
|
| 54 |
+
del metadata["name"]
|
| 55 |
+
self.metadata = metadata
|
| 56 |
+
self.metadata.dropna(inplace=True)
|
| 57 |
+
|
| 58 |
+
def _get_video_path(self, sample):
|
| 59 |
+
rel_video_fp = str(sample["videoid"]) + ".mp4"
|
| 60 |
+
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
|
| 61 |
+
if not os.path.exists(full_video_fp):
|
| 62 |
+
full_video_fp = os.path.join(self.data_dir, "videos", rel_video_fp)
|
| 63 |
+
return full_video_fp, rel_video_fp
|
| 64 |
+
|
| 65 |
+
def _get_caption(self, sample):
|
| 66 |
+
return sample["caption"]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
from tqdm import tqdm
|
| 71 |
+
import imageio
|
| 72 |
+
import argparse
|
| 73 |
+
import numpy as np
|
| 74 |
+
|
| 75 |
+
parser = argparse.ArgumentParser()
|
| 76 |
+
parser.add_argument("--out_path", type=str, default=None)
|
| 77 |
+
parser.add_argument("--num_frames", type=int, default=17)
|
| 78 |
+
parser.add_argument("--motion_scale", type=int, default=4)
|
| 79 |
+
opt = parser.parse_known_args()[0]
|
| 80 |
+
|
| 81 |
+
def write_text_to_file(text, file_path):
|
| 82 |
+
with open(file_path, "w") as file:
|
| 83 |
+
file.write(text)
|
| 84 |
+
|
| 85 |
+
config = {
|
| 86 |
+
"dataset_name": "WebVid",
|
| 87 |
+
"data_dir": "/msra_data/videos_rmwm",
|
| 88 |
+
"metadata_dir": "/msra_data",
|
| 89 |
+
"split": "val",
|
| 90 |
+
"cut": "2M",
|
| 91 |
+
"key": "wmrm_all",
|
| 92 |
+
"subsample": 1,
|
| 93 |
+
"text_params": {"input": "text"},
|
| 94 |
+
"video_params": {
|
| 95 |
+
"input_res_h": 320, # todo: check the input_res_h
|
| 96 |
+
"input_res_w": 320, # todo: check the input_res_w
|
| 97 |
+
"tsfm_params": {
|
| 98 |
+
"norm_mean": [0.5, 0.5, 0.5],
|
| 99 |
+
"norm_std": [0.5, 0.5, 0.5],
|
| 100 |
+
},
|
| 101 |
+
"num_frames": opt.num_frames,
|
| 102 |
+
"prop_factor": 30,
|
| 103 |
+
"loading": "lax",
|
| 104 |
+
},
|
| 105 |
+
"metadata_folder_name": "webvid10m_meta",
|
| 106 |
+
"first_stage_key": "jpg",
|
| 107 |
+
"cond_stage_key": "txt",
|
| 108 |
+
"skip_missing_files": False,
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
dataset = WebVid(**config)
|
| 112 |
+
length = dataset.__len__()
|
| 113 |
+
|
| 114 |
+
txt_out_path = os.path.join(
|
| 115 |
+
opt.out_path, f"num{opt.num_frames}_ms{opt.motion_scale}", "txt"
|
| 116 |
+
)
|
| 117 |
+
video_out_high_path = os.path.join(
|
| 118 |
+
opt.out_path, f"num{opt.num_frames}_ms{opt.motion_scale}", "videoHigh"
|
| 119 |
+
)
|
| 120 |
+
video_out_low_path = os.path.join(
|
| 121 |
+
opt.out_path, f"num{opt.num_frames}_ms{opt.motion_scale}", "videoLow"
|
| 122 |
+
)
|
| 123 |
+
os.makedirs(txt_out_path, exist_ok=True)
|
| 124 |
+
os.makedirs(video_out_high_path, exist_ok=True)
|
| 125 |
+
os.makedirs(video_out_low_path, exist_ok=True)
|
| 126 |
+
|
| 127 |
+
for idx in tqdm(range(length)):
|
| 128 |
+
print(idx)
|
| 129 |
+
item = dataset.__getitem__(idx)
|
| 130 |
+
video = item["jpg"]
|
| 131 |
+
txt = item["txt"]
|
| 132 |
+
|
| 133 |
+
video_new = (
|
| 134 |
+
((video.transpose(3, 1) * 0.5 + 0.5).clamp(0, 1) * 255.0)
|
| 135 |
+
.numpy()
|
| 136 |
+
.astype(np.uint8)
|
| 137 |
+
)
|
| 138 |
+
video_list = [img for img in video_new]
|
| 139 |
+
imageio.mimsave(
|
| 140 |
+
os.path.join(video_out_high_path, f"{idx:09d}.gif"),
|
| 141 |
+
video_list,
|
| 142 |
+
duration=1,
|
| 143 |
+
loops=1,
|
| 144 |
+
)
|
| 145 |
+
imageio.mimsave(
|
| 146 |
+
os.path.join(video_out_low_path, f"{idx:09d}.gif"),
|
| 147 |
+
video_list[:: opt.motion_scale],
|
| 148 |
+
duration=1 * opt.motion_scale,
|
| 149 |
+
loops=1,
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
write_text_to_file(txt, os.path.join(txt_out_path, f"{idx:09d}.txt"))
|
CCEdit-main/sgm/lr_scheduler.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LambdaWarmUpCosineScheduler:
|
| 5 |
+
"""
|
| 6 |
+
note: use with a base_lr of 1.0
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
warm_up_steps,
|
| 12 |
+
lr_min,
|
| 13 |
+
lr_max,
|
| 14 |
+
lr_start,
|
| 15 |
+
max_decay_steps,
|
| 16 |
+
verbosity_interval=0,
|
| 17 |
+
):
|
| 18 |
+
self.lr_warm_up_steps = warm_up_steps
|
| 19 |
+
self.lr_start = lr_start
|
| 20 |
+
self.lr_min = lr_min
|
| 21 |
+
self.lr_max = lr_max
|
| 22 |
+
self.lr_max_decay_steps = max_decay_steps
|
| 23 |
+
self.last_lr = 0.0
|
| 24 |
+
self.verbosity_interval = verbosity_interval
|
| 25 |
+
|
| 26 |
+
def schedule(self, n, **kwargs):
|
| 27 |
+
if self.verbosity_interval > 0:
|
| 28 |
+
if n % self.verbosity_interval == 0:
|
| 29 |
+
print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
|
| 30 |
+
if n < self.lr_warm_up_steps:
|
| 31 |
+
lr = (
|
| 32 |
+
self.lr_max - self.lr_start
|
| 33 |
+
) / self.lr_warm_up_steps * n + self.lr_start
|
| 34 |
+
self.last_lr = lr
|
| 35 |
+
return lr
|
| 36 |
+
else:
|
| 37 |
+
t = (n - self.lr_warm_up_steps) / (
|
| 38 |
+
self.lr_max_decay_steps - self.lr_warm_up_steps
|
| 39 |
+
)
|
| 40 |
+
t = min(t, 1.0)
|
| 41 |
+
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
|
| 42 |
+
1 + np.cos(t * np.pi)
|
| 43 |
+
)
|
| 44 |
+
self.last_lr = lr
|
| 45 |
+
return lr
|
| 46 |
+
|
| 47 |
+
def __call__(self, n, **kwargs):
|
| 48 |
+
return self.schedule(n, **kwargs)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class LambdaWarmUpCosineScheduler2:
|
| 52 |
+
"""
|
| 53 |
+
supports repeated iterations, configurable via lists
|
| 54 |
+
note: use with a base_lr of 1.0.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(
|
| 58 |
+
self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0
|
| 59 |
+
):
|
| 60 |
+
assert (
|
| 61 |
+
len(warm_up_steps)
|
| 62 |
+
== len(f_min)
|
| 63 |
+
== len(f_max)
|
| 64 |
+
== len(f_start)
|
| 65 |
+
== len(cycle_lengths)
|
| 66 |
+
)
|
| 67 |
+
self.lr_warm_up_steps = warm_up_steps
|
| 68 |
+
self.f_start = f_start
|
| 69 |
+
self.f_min = f_min
|
| 70 |
+
self.f_max = f_max
|
| 71 |
+
self.cycle_lengths = cycle_lengths
|
| 72 |
+
self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
|
| 73 |
+
self.last_f = 0.0
|
| 74 |
+
self.verbosity_interval = verbosity_interval
|
| 75 |
+
|
| 76 |
+
def find_in_interval(self, n):
|
| 77 |
+
interval = 0
|
| 78 |
+
for cl in self.cum_cycles[1:]:
|
| 79 |
+
if n <= cl:
|
| 80 |
+
return interval
|
| 81 |
+
interval += 1
|
| 82 |
+
|
| 83 |
+
def schedule(self, n, **kwargs):
|
| 84 |
+
cycle = self.find_in_interval(n)
|
| 85 |
+
n = n - self.cum_cycles[cycle]
|
| 86 |
+
if self.verbosity_interval > 0:
|
| 87 |
+
if n % self.verbosity_interval == 0:
|
| 88 |
+
print(
|
| 89 |
+
f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
| 90 |
+
f"current cycle {cycle}"
|
| 91 |
+
)
|
| 92 |
+
if n < self.lr_warm_up_steps[cycle]:
|
| 93 |
+
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[
|
| 94 |
+
cycle
|
| 95 |
+
] * n + self.f_start[cycle]
|
| 96 |
+
self.last_f = f
|
| 97 |
+
return f
|
| 98 |
+
else:
|
| 99 |
+
t = (n - self.lr_warm_up_steps[cycle]) / (
|
| 100 |
+
self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]
|
| 101 |
+
)
|
| 102 |
+
t = min(t, 1.0)
|
| 103 |
+
f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
|
| 104 |
+
1 + np.cos(t * np.pi)
|
| 105 |
+
)
|
| 106 |
+
self.last_f = f
|
| 107 |
+
return f
|
| 108 |
+
|
| 109 |
+
def __call__(self, n, **kwargs):
|
| 110 |
+
return self.schedule(n, **kwargs)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
|
| 114 |
+
def schedule(self, n, **kwargs):
|
| 115 |
+
cycle = self.find_in_interval(n)
|
| 116 |
+
n = n - self.cum_cycles[cycle]
|
| 117 |
+
if self.verbosity_interval > 0:
|
| 118 |
+
if n % self.verbosity_interval == 0:
|
| 119 |
+
print(
|
| 120 |
+
f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
| 121 |
+
f"current cycle {cycle}"
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
if n < self.lr_warm_up_steps[cycle]:
|
| 125 |
+
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[
|
| 126 |
+
cycle
|
| 127 |
+
] * n + self.f_start[cycle]
|
| 128 |
+
self.last_f = f
|
| 129 |
+
return f
|
| 130 |
+
else:
|
| 131 |
+
f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (
|
| 132 |
+
self.cycle_lengths[cycle] - n
|
| 133 |
+
) / (self.cycle_lengths[cycle])
|
| 134 |
+
self.last_f = f
|
| 135 |
+
return f
|
CCEdit-main/sgm/models/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .autoencoder import AutoencodingEngine
|
| 2 |
+
from .diffusion import DiffusionEngine
|
CCEdit-main/sgm/models/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (246 Bytes). View file
|
|
|
CCEdit-main/sgm/models/__pycache__/autoencoder.cpython-39.pyc
ADDED
|
Binary file (12 kB). View file
|
|
|
CCEdit-main/sgm/models/__pycache__/diffusion.cpython-39.pyc
ADDED
|
Binary file (24.1 kB). View file
|
|
|
CCEdit-main/sgm/models/autoencoder.py
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from abc import abstractmethod
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
from typing import Any, Dict, Tuple, Union
|
| 5 |
+
import einops
|
| 6 |
+
|
| 7 |
+
import pytorch_lightning as pl
|
| 8 |
+
import torch
|
| 9 |
+
from omegaconf import ListConfig
|
| 10 |
+
from packaging import version
|
| 11 |
+
from safetensors.torch import load_file as load_safetensors
|
| 12 |
+
|
| 13 |
+
from ..modules.diffusionmodules.model import Decoder, Encoder
|
| 14 |
+
from ..modules.distributions.distributions import DiagonalGaussianDistribution
|
| 15 |
+
from ..modules.ema import LitEma
|
| 16 |
+
from ..util import default, get_obj_from_str, instantiate_from_config
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class AbstractAutoencoder(pl.LightningModule):
|
| 20 |
+
"""
|
| 21 |
+
This is the base class for all autoencoders, including image autoencoders, image autoencoders with discriminators,
|
| 22 |
+
unCLIP models, etc. Hence, it is fairly general, and specific features
|
| 23 |
+
(e.g. discriminator training, encoding, decoding) must be implemented in subclasses.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
ema_decay: Union[None, float] = None,
|
| 29 |
+
monitor: Union[None, str] = None,
|
| 30 |
+
input_key: str = "jpg",
|
| 31 |
+
ckpt_path: Union[None, str] = None,
|
| 32 |
+
ignore_keys: Union[Tuple, list, ListConfig] = (),
|
| 33 |
+
):
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.input_key = input_key
|
| 36 |
+
self.use_ema = ema_decay is not None
|
| 37 |
+
if monitor is not None:
|
| 38 |
+
self.monitor = monitor
|
| 39 |
+
|
| 40 |
+
if self.use_ema:
|
| 41 |
+
self.model_ema = LitEma(self, decay=ema_decay)
|
| 42 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 43 |
+
|
| 44 |
+
if ckpt_path is not None:
|
| 45 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
| 46 |
+
|
| 47 |
+
if version.parse(torch.__version__) >= version.parse("2.0.0"):
|
| 48 |
+
self.automatic_optimization = False
|
| 49 |
+
|
| 50 |
+
def init_from_ckpt(
|
| 51 |
+
self, path: str, ignore_keys: Union[Tuple, list, ListConfig] = tuple()
|
| 52 |
+
) -> None:
|
| 53 |
+
if path.endswith("ckpt"):
|
| 54 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 55 |
+
elif path.endswith("safetensors"):
|
| 56 |
+
sd = load_safetensors(path)
|
| 57 |
+
else:
|
| 58 |
+
raise NotImplementedError
|
| 59 |
+
|
| 60 |
+
keys = list(sd.keys())
|
| 61 |
+
for k in keys:
|
| 62 |
+
for ik in ignore_keys:
|
| 63 |
+
if re.match(ik, k):
|
| 64 |
+
print("Deleting key {} from state_dict.".format(k))
|
| 65 |
+
del sd[k]
|
| 66 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
| 67 |
+
print(
|
| 68 |
+
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
|
| 69 |
+
)
|
| 70 |
+
if len(missing) > 0:
|
| 71 |
+
print(f"Missing Keys: {missing}")
|
| 72 |
+
if len(unexpected) > 0:
|
| 73 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 74 |
+
|
| 75 |
+
@abstractmethod
|
| 76 |
+
def get_input(self, batch) -> Any:
|
| 77 |
+
raise NotImplementedError()
|
| 78 |
+
|
| 79 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 80 |
+
# for EMA computation
|
| 81 |
+
if self.use_ema:
|
| 82 |
+
self.model_ema(self)
|
| 83 |
+
|
| 84 |
+
@contextmanager
|
| 85 |
+
def ema_scope(self, context=None):
|
| 86 |
+
if self.use_ema:
|
| 87 |
+
self.model_ema.store(self.parameters())
|
| 88 |
+
self.model_ema.copy_to(self)
|
| 89 |
+
if context is not None:
|
| 90 |
+
print(f"{context}: Switched to EMA weights")
|
| 91 |
+
try:
|
| 92 |
+
yield None
|
| 93 |
+
finally:
|
| 94 |
+
if self.use_ema:
|
| 95 |
+
self.model_ema.restore(self.parameters())
|
| 96 |
+
if context is not None:
|
| 97 |
+
print(f"{context}: Restored training weights")
|
| 98 |
+
|
| 99 |
+
@abstractmethod
|
| 100 |
+
def encode(self, *args, **kwargs) -> torch.Tensor:
|
| 101 |
+
raise NotImplementedError("encode()-method of abstract base class called")
|
| 102 |
+
|
| 103 |
+
@abstractmethod
|
| 104 |
+
def decode(self, *args, **kwargs) -> torch.Tensor:
|
| 105 |
+
raise NotImplementedError("decode()-method of abstract base class called")
|
| 106 |
+
|
| 107 |
+
def instantiate_optimizer_from_config(self, params, lr, cfg):
|
| 108 |
+
print(f"loading >>> {cfg['target']} <<< optimizer from config")
|
| 109 |
+
return get_obj_from_str(cfg["target"])(
|
| 110 |
+
params, lr=lr, **cfg.get("params", dict())
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
def configure_optimizers(self) -> Any:
|
| 114 |
+
raise NotImplementedError()
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class AutoencodingEngine(AbstractAutoencoder):
|
| 118 |
+
"""
|
| 119 |
+
Base class for all image autoencoders that we train, like VQGAN or AutoencoderKL
|
| 120 |
+
(we also restore them explicitly as special cases for legacy reasons).
|
| 121 |
+
Regularizations such as KL or VQ are moved to the regularizer class.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(
|
| 125 |
+
self,
|
| 126 |
+
*args,
|
| 127 |
+
encoder_config: Dict,
|
| 128 |
+
decoder_config: Dict,
|
| 129 |
+
loss_config: Dict,
|
| 130 |
+
regularizer_config: Dict,
|
| 131 |
+
optimizer_config: Union[Dict, None] = None,
|
| 132 |
+
lr_g_factor: float = 1.0,
|
| 133 |
+
**kwargs,
|
| 134 |
+
):
|
| 135 |
+
super().__init__(*args, **kwargs)
|
| 136 |
+
# todo: add options to freeze encoder/decoder
|
| 137 |
+
self.encoder = instantiate_from_config(encoder_config)
|
| 138 |
+
self.decoder = instantiate_from_config(decoder_config)
|
| 139 |
+
self.loss = instantiate_from_config(loss_config)
|
| 140 |
+
self.regularization = instantiate_from_config(regularizer_config)
|
| 141 |
+
self.optimizer_config = default(
|
| 142 |
+
optimizer_config, {"target": "torch.optim.Adam"}
|
| 143 |
+
)
|
| 144 |
+
self.lr_g_factor = lr_g_factor
|
| 145 |
+
|
| 146 |
+
def get_input(self, batch: Dict) -> torch.Tensor:
|
| 147 |
+
# assuming unified data format, dataloader returns a dict.
|
| 148 |
+
# image tensors should be scaled to -1 ... 1 and in channels-first format (e.g., bchw instead if bhwc)
|
| 149 |
+
return batch[self.input_key]
|
| 150 |
+
|
| 151 |
+
def get_autoencoder_params(self) -> list:
|
| 152 |
+
params = (
|
| 153 |
+
list(self.encoder.parameters())
|
| 154 |
+
+ list(self.decoder.parameters())
|
| 155 |
+
+ list(self.regularization.get_trainable_parameters())
|
| 156 |
+
+ list(self.loss.get_trainable_autoencoder_parameters())
|
| 157 |
+
)
|
| 158 |
+
return params
|
| 159 |
+
|
| 160 |
+
def get_discriminator_params(self) -> list:
|
| 161 |
+
params = list(self.loss.get_trainable_parameters()) # e.g., discriminator
|
| 162 |
+
return params
|
| 163 |
+
|
| 164 |
+
def get_last_layer(self):
|
| 165 |
+
return self.decoder.get_last_layer()
|
| 166 |
+
|
| 167 |
+
def encode(self, x: Any, return_reg_log: bool = False) -> Any:
|
| 168 |
+
z = self.encoder(x)
|
| 169 |
+
z, reg_log = self.regularization(z)
|
| 170 |
+
if return_reg_log:
|
| 171 |
+
return z, reg_log
|
| 172 |
+
return z
|
| 173 |
+
|
| 174 |
+
def decode(self, z: Any) -> torch.Tensor:
|
| 175 |
+
x = self.decoder(z)
|
| 176 |
+
return x
|
| 177 |
+
|
| 178 |
+
def forward(self, x: Any) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 179 |
+
z, reg_log = self.encode(x, return_reg_log=True)
|
| 180 |
+
dec = self.decode(z)
|
| 181 |
+
return z, dec, reg_log
|
| 182 |
+
|
| 183 |
+
def training_step(self, batch, batch_idx, optimizer_idx) -> Any:
|
| 184 |
+
x = self.get_input(batch)
|
| 185 |
+
z, xrec, regularization_log = self(x)
|
| 186 |
+
|
| 187 |
+
if optimizer_idx == 0:
|
| 188 |
+
# autoencode
|
| 189 |
+
aeloss, log_dict_ae = self.loss(
|
| 190 |
+
regularization_log,
|
| 191 |
+
x,
|
| 192 |
+
xrec,
|
| 193 |
+
optimizer_idx,
|
| 194 |
+
self.global_step,
|
| 195 |
+
last_layer=self.get_last_layer(),
|
| 196 |
+
split="train",
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
self.log_dict(
|
| 200 |
+
log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True
|
| 201 |
+
)
|
| 202 |
+
return aeloss
|
| 203 |
+
|
| 204 |
+
if optimizer_idx == 1:
|
| 205 |
+
# discriminator
|
| 206 |
+
discloss, log_dict_disc = self.loss(
|
| 207 |
+
regularization_log,
|
| 208 |
+
x,
|
| 209 |
+
xrec,
|
| 210 |
+
optimizer_idx,
|
| 211 |
+
self.global_step,
|
| 212 |
+
last_layer=self.get_last_layer(),
|
| 213 |
+
split="train",
|
| 214 |
+
)
|
| 215 |
+
self.log_dict(
|
| 216 |
+
log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True
|
| 217 |
+
)
|
| 218 |
+
return discloss
|
| 219 |
+
|
| 220 |
+
def validation_step(self, batch, batch_idx) -> Dict:
|
| 221 |
+
log_dict = self._validation_step(batch, batch_idx)
|
| 222 |
+
with self.ema_scope():
|
| 223 |
+
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
|
| 224 |
+
log_dict.update(log_dict_ema)
|
| 225 |
+
return log_dict
|
| 226 |
+
|
| 227 |
+
def _validation_step(self, batch, batch_idx, postfix="") -> Dict:
|
| 228 |
+
x = self.get_input(batch)
|
| 229 |
+
|
| 230 |
+
z, xrec, regularization_log = self(x)
|
| 231 |
+
aeloss, log_dict_ae = self.loss(
|
| 232 |
+
regularization_log,
|
| 233 |
+
x,
|
| 234 |
+
xrec,
|
| 235 |
+
0,
|
| 236 |
+
self.global_step,
|
| 237 |
+
last_layer=self.get_last_layer(),
|
| 238 |
+
split="val" + postfix,
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
discloss, log_dict_disc = self.loss(
|
| 242 |
+
regularization_log,
|
| 243 |
+
x,
|
| 244 |
+
xrec,
|
| 245 |
+
1,
|
| 246 |
+
self.global_step,
|
| 247 |
+
last_layer=self.get_last_layer(),
|
| 248 |
+
split="val" + postfix,
|
| 249 |
+
)
|
| 250 |
+
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
|
| 251 |
+
log_dict_ae.update(log_dict_disc)
|
| 252 |
+
self.log_dict(log_dict_ae)
|
| 253 |
+
return log_dict_ae
|
| 254 |
+
|
| 255 |
+
def configure_optimizers(self) -> Any:
|
| 256 |
+
ae_params = self.get_autoencoder_params()
|
| 257 |
+
disc_params = self.get_discriminator_params()
|
| 258 |
+
|
| 259 |
+
opt_ae = self.instantiate_optimizer_from_config(
|
| 260 |
+
ae_params,
|
| 261 |
+
default(self.lr_g_factor, 1.0) * self.learning_rate,
|
| 262 |
+
self.optimizer_config,
|
| 263 |
+
)
|
| 264 |
+
opt_disc = self.instantiate_optimizer_from_config(
|
| 265 |
+
disc_params, self.learning_rate, self.optimizer_config
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
return [opt_ae, opt_disc], []
|
| 269 |
+
|
| 270 |
+
@torch.no_grad()
|
| 271 |
+
def log_images(self, batch: Dict, **kwargs) -> Dict:
|
| 272 |
+
log = dict()
|
| 273 |
+
x = self.get_input(batch)
|
| 274 |
+
_, xrec, _ = self(x)
|
| 275 |
+
log["inputs"] = x
|
| 276 |
+
log["reconstructions"] = xrec
|
| 277 |
+
with self.ema_scope():
|
| 278 |
+
_, xrec_ema, _ = self(x)
|
| 279 |
+
log["reconstructions_ema"] = xrec_ema
|
| 280 |
+
return log
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class AutoencoderKL(AutoencodingEngine):
|
| 284 |
+
def __init__(self, embed_dim: int, **kwargs):
|
| 285 |
+
ddconfig = kwargs.pop("ddconfig")
|
| 286 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
| 287 |
+
ignore_keys = kwargs.pop("ignore_keys", ())
|
| 288 |
+
super().__init__(
|
| 289 |
+
encoder_config={"target": "torch.nn.Identity"},
|
| 290 |
+
decoder_config={"target": "torch.nn.Identity"},
|
| 291 |
+
regularizer_config={"target": "torch.nn.Identity"},
|
| 292 |
+
loss_config=kwargs.pop("lossconfig"),
|
| 293 |
+
**kwargs,
|
| 294 |
+
)
|
| 295 |
+
assert ddconfig["double_z"]
|
| 296 |
+
self.encoder = Encoder(**ddconfig)
|
| 297 |
+
self.decoder = Decoder(**ddconfig)
|
| 298 |
+
self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1)
|
| 299 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
| 300 |
+
self.embed_dim = embed_dim
|
| 301 |
+
|
| 302 |
+
if ckpt_path is not None:
|
| 303 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
| 304 |
+
|
| 305 |
+
def encode(self, x):
|
| 306 |
+
assert (
|
| 307 |
+
not self.training
|
| 308 |
+
), f"{self.__class__.__name__} only supports inference currently"
|
| 309 |
+
assert x.dim() == 4
|
| 310 |
+
h = self.encoder(x)
|
| 311 |
+
moments = self.quant_conv(h)
|
| 312 |
+
posterior = DiagonalGaussianDistribution(moments)
|
| 313 |
+
|
| 314 |
+
return posterior
|
| 315 |
+
|
| 316 |
+
def decode(self, z, **decoder_kwargs):
|
| 317 |
+
z = self.post_quant_conv(z)
|
| 318 |
+
dec = self.decoder(z, **decoder_kwargs)
|
| 319 |
+
return dec
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
class AutoencoderKLInferenceWrapper(AutoencoderKL):
|
| 323 |
+
def encode(self, x):
|
| 324 |
+
# return super().encode(x).sample()
|
| 325 |
+
is_video = x.dim() == 5
|
| 326 |
+
if is_video:
|
| 327 |
+
b, c, t, h, w = x.shape
|
| 328 |
+
x = einops.rearrange(x, "b c t h w -> (b t) c h w")
|
| 329 |
+
x = super().encode(x).sample()
|
| 330 |
+
if is_video:
|
| 331 |
+
x = einops.rearrange(x, "(b t) c h w -> b c t h w", b=b, t=t)
|
| 332 |
+
return x
|
| 333 |
+
|
| 334 |
+
def decode(self, z, **decoder_kwargs):
|
| 335 |
+
is_video = z.dim() == 5
|
| 336 |
+
if is_video:
|
| 337 |
+
b, c, t, h, w = z.shape
|
| 338 |
+
z = einops.rearrange(z, "b c t h w -> (b t) c h w")
|
| 339 |
+
z = self.post_quant_conv(z.to(self.post_quant_conv.weight.dtype))
|
| 340 |
+
dec = self.decoder(z, **decoder_kwargs)
|
| 341 |
+
if is_video:
|
| 342 |
+
dec = einops.rearrange(dec, "(b t) c h w -> b c t h w", b=b, t=t)
|
| 343 |
+
return dec
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
class IdentityFirstStage(AbstractAutoencoder):
|
| 347 |
+
def __init__(self, *args, **kwargs):
|
| 348 |
+
super().__init__(*args, **kwargs)
|
| 349 |
+
|
| 350 |
+
def get_input(self, x: Any) -> Any:
|
| 351 |
+
return x
|
| 352 |
+
|
| 353 |
+
def encode(self, x: Any, *args, **kwargs) -> Any:
|
| 354 |
+
return x
|
| 355 |
+
|
| 356 |
+
def decode(self, x: Any, *args, **kwargs) -> Any:
|
| 357 |
+
return x
|
CCEdit-main/sgm/models/diffusion-ori.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
from typing import Any, Dict, List, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import pytorch_lightning as pl
|
| 5 |
+
import torch
|
| 6 |
+
from omegaconf import ListConfig, OmegaConf
|
| 7 |
+
from safetensors.torch import load_file as load_safetensors
|
| 8 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 9 |
+
|
| 10 |
+
from ..modules import UNCONDITIONAL_CONFIG
|
| 11 |
+
from ..modules.diffusionmodules.wrappers import OPENAIUNETWRAPPER
|
| 12 |
+
from ..modules.ema import LitEma
|
| 13 |
+
from ..util import (
|
| 14 |
+
default,
|
| 15 |
+
disabled_train,
|
| 16 |
+
get_obj_from_str,
|
| 17 |
+
instantiate_from_config,
|
| 18 |
+
log_txt_as_img,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class DiffusionEngine(pl.LightningModule):
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
network_config,
|
| 26 |
+
denoiser_config,
|
| 27 |
+
first_stage_config,
|
| 28 |
+
conditioner_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 29 |
+
sampler_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 30 |
+
optimizer_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 31 |
+
scheduler_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 32 |
+
loss_fn_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 33 |
+
network_wrapper: Union[None, str] = None,
|
| 34 |
+
ckpt_path: Union[None, str] = None,
|
| 35 |
+
use_ema: bool = False,
|
| 36 |
+
ema_decay_rate: float = 0.9999,
|
| 37 |
+
scale_factor: float = 1.0,
|
| 38 |
+
disable_first_stage_autocast=False,
|
| 39 |
+
input_key: str = "jpg",
|
| 40 |
+
log_keys: Union[List, None] = None,
|
| 41 |
+
no_cond_log: bool = False,
|
| 42 |
+
compile_model: bool = False,
|
| 43 |
+
):
|
| 44 |
+
super().__init__()
|
| 45 |
+
self.log_keys = log_keys
|
| 46 |
+
self.input_key = input_key
|
| 47 |
+
self.optimizer_config = default(
|
| 48 |
+
optimizer_config, {"target": "torch.optim.AdamW"}
|
| 49 |
+
)
|
| 50 |
+
model = instantiate_from_config(network_config)
|
| 51 |
+
self.model = get_obj_from_str(default(network_wrapper, OPENAIUNETWRAPPER))(
|
| 52 |
+
model, compile_model=compile_model
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
self.denoiser = instantiate_from_config(denoiser_config)
|
| 56 |
+
self.sampler = (
|
| 57 |
+
instantiate_from_config(sampler_config)
|
| 58 |
+
if sampler_config is not None
|
| 59 |
+
else None
|
| 60 |
+
)
|
| 61 |
+
self.conditioner = instantiate_from_config(
|
| 62 |
+
default(conditioner_config, UNCONDITIONAL_CONFIG)
|
| 63 |
+
)
|
| 64 |
+
self.scheduler_config = scheduler_config
|
| 65 |
+
self._init_first_stage(first_stage_config)
|
| 66 |
+
|
| 67 |
+
self.loss_fn = (
|
| 68 |
+
instantiate_from_config(loss_fn_config)
|
| 69 |
+
if loss_fn_config is not None
|
| 70 |
+
else None
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
self.use_ema = use_ema
|
| 74 |
+
if self.use_ema:
|
| 75 |
+
self.model_ema = LitEma(self.model, decay=ema_decay_rate)
|
| 76 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 77 |
+
|
| 78 |
+
self.scale_factor = scale_factor
|
| 79 |
+
self.disable_first_stage_autocast = disable_first_stage_autocast
|
| 80 |
+
self.no_cond_log = no_cond_log
|
| 81 |
+
|
| 82 |
+
if ckpt_path is not None:
|
| 83 |
+
self.init_from_ckpt(ckpt_path)
|
| 84 |
+
|
| 85 |
+
def init_from_ckpt(
|
| 86 |
+
self,
|
| 87 |
+
path: str,
|
| 88 |
+
) -> None:
|
| 89 |
+
if path.endswith("ckpt"):
|
| 90 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 91 |
+
elif path.endswith("safetensors"):
|
| 92 |
+
sd = load_safetensors(path)
|
| 93 |
+
else:
|
| 94 |
+
raise NotImplementedError
|
| 95 |
+
|
| 96 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
| 97 |
+
print(
|
| 98 |
+
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
|
| 99 |
+
)
|
| 100 |
+
if len(missing) > 0:
|
| 101 |
+
print(f"Missing Keys: {missing}")
|
| 102 |
+
if len(unexpected) > 0:
|
| 103 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 104 |
+
|
| 105 |
+
def _init_first_stage(self, config):
|
| 106 |
+
model = instantiate_from_config(config).eval()
|
| 107 |
+
model.train = disabled_train
|
| 108 |
+
for param in model.parameters():
|
| 109 |
+
param.requires_grad = False
|
| 110 |
+
self.first_stage_model = model
|
| 111 |
+
|
| 112 |
+
def get_input(self, batch):
|
| 113 |
+
# assuming unified data format, dataloader returns a dict.
|
| 114 |
+
# image tensors should be scaled to -1 ... 1 and in bchw format
|
| 115 |
+
return batch[self.input_key]
|
| 116 |
+
|
| 117 |
+
@torch.no_grad()
|
| 118 |
+
def decode_first_stage(self, z):
|
| 119 |
+
z = 1.0 / self.scale_factor * z
|
| 120 |
+
with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast):
|
| 121 |
+
out = self.first_stage_model.decode(z)
|
| 122 |
+
return out
|
| 123 |
+
|
| 124 |
+
@torch.no_grad()
|
| 125 |
+
def encode_first_stage(self, x):
|
| 126 |
+
with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast):
|
| 127 |
+
z = self.first_stage_model.encode(x)
|
| 128 |
+
z = self.scale_factor * z
|
| 129 |
+
return z
|
| 130 |
+
|
| 131 |
+
def forward(self, x, batch):
|
| 132 |
+
loss = self.loss_fn(self.model, self.denoiser, self.conditioner, x, batch)
|
| 133 |
+
loss_mean = loss.mean()
|
| 134 |
+
loss_dict = {"loss": loss_mean}
|
| 135 |
+
return loss_mean, loss_dict
|
| 136 |
+
|
| 137 |
+
def shared_step(self, batch: Dict) -> Any:
|
| 138 |
+
x = self.get_input(batch)
|
| 139 |
+
x = self.encode_first_stage(x)
|
| 140 |
+
batch["global_step"] = self.global_step
|
| 141 |
+
loss, loss_dict = self(x, batch)
|
| 142 |
+
return loss, loss_dict
|
| 143 |
+
|
| 144 |
+
def training_step(self, batch, batch_idx):
|
| 145 |
+
loss, loss_dict = self.shared_step(batch)
|
| 146 |
+
|
| 147 |
+
self.log_dict(
|
| 148 |
+
loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=False
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
self.log(
|
| 152 |
+
"global_step",
|
| 153 |
+
self.global_step,
|
| 154 |
+
prog_bar=True,
|
| 155 |
+
logger=True,
|
| 156 |
+
on_step=True,
|
| 157 |
+
on_epoch=False,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
if self.scheduler_config is not None:
|
| 161 |
+
lr = self.optimizers().param_groups[0]["lr"]
|
| 162 |
+
self.log(
|
| 163 |
+
"lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
return loss
|
| 167 |
+
|
| 168 |
+
def on_train_start(self, *args, **kwargs):
|
| 169 |
+
if self.sampler is None or self.loss_fn is None:
|
| 170 |
+
raise ValueError("Sampler and loss function need to be set for training.")
|
| 171 |
+
|
| 172 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 173 |
+
if self.use_ema:
|
| 174 |
+
self.model_ema(self.model)
|
| 175 |
+
|
| 176 |
+
@contextmanager
|
| 177 |
+
def ema_scope(self, context=None):
|
| 178 |
+
if self.use_ema:
|
| 179 |
+
self.model_ema.store(self.model.parameters())
|
| 180 |
+
self.model_ema.copy_to(self.model)
|
| 181 |
+
if context is not None:
|
| 182 |
+
print(f"{context}: Switched to EMA weights")
|
| 183 |
+
try:
|
| 184 |
+
yield None
|
| 185 |
+
finally:
|
| 186 |
+
if self.use_ema:
|
| 187 |
+
self.model_ema.restore(self.model.parameters())
|
| 188 |
+
if context is not None:
|
| 189 |
+
print(f"{context}: Restored training weights")
|
| 190 |
+
|
| 191 |
+
def instantiate_optimizer_from_config(self, params, lr, cfg):
|
| 192 |
+
return get_obj_from_str(cfg["target"])(
|
| 193 |
+
params, lr=lr, **cfg.get("params", dict())
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
def configure_optimizers(self):
|
| 197 |
+
lr = self.learning_rate
|
| 198 |
+
params = list(self.model.parameters())
|
| 199 |
+
for embedder in self.conditioner.embedders:
|
| 200 |
+
if embedder.is_trainable:
|
| 201 |
+
params = params + list(embedder.parameters())
|
| 202 |
+
opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config)
|
| 203 |
+
if self.scheduler_config is not None:
|
| 204 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 205 |
+
print("Setting up LambdaLR scheduler...")
|
| 206 |
+
scheduler = [
|
| 207 |
+
{
|
| 208 |
+
"scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule),
|
| 209 |
+
"interval": "step",
|
| 210 |
+
"frequency": 1,
|
| 211 |
+
}
|
| 212 |
+
]
|
| 213 |
+
return [opt], scheduler
|
| 214 |
+
return opt
|
| 215 |
+
|
| 216 |
+
@torch.no_grad()
|
| 217 |
+
def sample(
|
| 218 |
+
self,
|
| 219 |
+
cond: Dict,
|
| 220 |
+
uc: Union[Dict, None] = None,
|
| 221 |
+
batch_size: int = 16,
|
| 222 |
+
shape: Union[None, Tuple, List] = None,
|
| 223 |
+
**kwargs,
|
| 224 |
+
):
|
| 225 |
+
randn = torch.randn(batch_size, *shape).to(self.device)
|
| 226 |
+
|
| 227 |
+
denoiser = lambda input, sigma, c: self.denoiser(
|
| 228 |
+
self.model, input, sigma, c, **kwargs
|
| 229 |
+
)
|
| 230 |
+
samples = self.sampler(denoiser, randn, cond, uc=uc)
|
| 231 |
+
return samples
|
| 232 |
+
|
| 233 |
+
@torch.no_grad()
|
| 234 |
+
def log_conditionings(self, batch: Dict, n: int) -> Dict:
|
| 235 |
+
"""
|
| 236 |
+
Defines heuristics to log different conditionings.
|
| 237 |
+
These can be lists of strings (text-to-image), tensors, ints, ...
|
| 238 |
+
"""
|
| 239 |
+
image_h, image_w = batch[self.input_key].shape[2:]
|
| 240 |
+
log = dict()
|
| 241 |
+
|
| 242 |
+
for embedder in self.conditioner.embedders:
|
| 243 |
+
if (
|
| 244 |
+
(self.log_keys is None) or (embedder.input_key in self.log_keys)
|
| 245 |
+
) and not self.no_cond_log:
|
| 246 |
+
x = batch[embedder.input_key][:n]
|
| 247 |
+
if isinstance(x, torch.Tensor):
|
| 248 |
+
if x.dim() == 1:
|
| 249 |
+
# class-conditional, convert integer to string
|
| 250 |
+
x = [str(x[i].item()) for i in range(x.shape[0])]
|
| 251 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4)
|
| 252 |
+
elif x.dim() == 2:
|
| 253 |
+
# size and crop cond and the like
|
| 254 |
+
x = [
|
| 255 |
+
"x".join([str(xx) for xx in x[i].tolist()])
|
| 256 |
+
for i in range(x.shape[0])
|
| 257 |
+
]
|
| 258 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
|
| 259 |
+
else:
|
| 260 |
+
raise NotImplementedError()
|
| 261 |
+
elif isinstance(x, (List, ListConfig)):
|
| 262 |
+
if isinstance(x[0], str):
|
| 263 |
+
# strings
|
| 264 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
|
| 265 |
+
else:
|
| 266 |
+
raise NotImplementedError()
|
| 267 |
+
else:
|
| 268 |
+
raise NotImplementedError()
|
| 269 |
+
log[embedder.input_key] = xc
|
| 270 |
+
return log
|
| 271 |
+
|
| 272 |
+
@torch.no_grad()
|
| 273 |
+
def log_images(
|
| 274 |
+
self,
|
| 275 |
+
batch: Dict,
|
| 276 |
+
N: int = 8,
|
| 277 |
+
sample: bool = True,
|
| 278 |
+
ucg_keys: List[str] = None,
|
| 279 |
+
**kwargs,
|
| 280 |
+
) -> Dict:
|
| 281 |
+
conditioner_input_keys = [e.input_key for e in self.conditioner.embedders]
|
| 282 |
+
if ucg_keys:
|
| 283 |
+
assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), (
|
| 284 |
+
"Each defined ucg key for sampling must be in the provided conditioner input keys,"
|
| 285 |
+
f"but we have {ucg_keys} vs. {conditioner_input_keys}"
|
| 286 |
+
)
|
| 287 |
+
else:
|
| 288 |
+
ucg_keys = conditioner_input_keys
|
| 289 |
+
log = dict()
|
| 290 |
+
|
| 291 |
+
x = self.get_input(batch)
|
| 292 |
+
|
| 293 |
+
c, uc = self.conditioner.get_unconditional_conditioning(
|
| 294 |
+
batch,
|
| 295 |
+
force_uc_zero_embeddings=ucg_keys
|
| 296 |
+
if len(self.conditioner.embedders) > 0
|
| 297 |
+
else [],
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
sampling_kwargs = {}
|
| 301 |
+
|
| 302 |
+
N = min(x.shape[0], N)
|
| 303 |
+
x = x.to(self.device)[:N]
|
| 304 |
+
log["inputs"] = x
|
| 305 |
+
z = self.encode_first_stage(x)
|
| 306 |
+
log["reconstructions"] = self.decode_first_stage(z)
|
| 307 |
+
log.update(self.log_conditionings(batch, N))
|
| 308 |
+
|
| 309 |
+
for k in c:
|
| 310 |
+
if isinstance(c[k], torch.Tensor):
|
| 311 |
+
c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc))
|
| 312 |
+
|
| 313 |
+
if sample:
|
| 314 |
+
with self.ema_scope("Plotting"):
|
| 315 |
+
samples = self.sample(
|
| 316 |
+
c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs
|
| 317 |
+
)
|
| 318 |
+
samples = self.decode_first_stage(samples)
|
| 319 |
+
log["samples"] = samples
|
| 320 |
+
return log
|
CCEdit-main/sgm/models/diffusion.py
ADDED
|
@@ -0,0 +1,910 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
from typing import Any, Dict, List, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import einops
|
| 5 |
+
import pytorch_lightning as pl
|
| 6 |
+
import torch
|
| 7 |
+
from omegaconf import ListConfig, OmegaConf
|
| 8 |
+
from safetensors.torch import load_file as load_safetensors
|
| 9 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 10 |
+
|
| 11 |
+
from sgm.modules.encoders.modules import VAEEmbedder
|
| 12 |
+
from sgm.modules.encoders.modules import (
|
| 13 |
+
LineartEncoder,
|
| 14 |
+
DepthZoeEncoder,
|
| 15 |
+
DepthMidasEncoder,
|
| 16 |
+
SoftEdgeEncoder,
|
| 17 |
+
NormalBaeEncoder,
|
| 18 |
+
ScribbleHEDEncoder,
|
| 19 |
+
ScribblePidiNetEncoder,
|
| 20 |
+
OpenposeEncoder,
|
| 21 |
+
OutpaintingEncoder,
|
| 22 |
+
InpaintingEncoder,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
import os
|
| 26 |
+
import numpy as np
|
| 27 |
+
import torch.nn as nn
|
| 28 |
+
|
| 29 |
+
from ..modules import UNCONDITIONAL_CONFIG
|
| 30 |
+
from ..modules.diffusionmodules.wrappers import (
|
| 31 |
+
OPENAIUNETWRAPPER,
|
| 32 |
+
OPENAIUNETWRAPPERRAIG,
|
| 33 |
+
OPENAIUNETWRAPPERCONTROLLDM3D,
|
| 34 |
+
OPENAIUNETWRAPPERCONTROLLDM3DSSN,
|
| 35 |
+
OPENAIUNETWRAPPERCONTROLLDM3DTV2V,
|
| 36 |
+
OPENAIUNETWRAPPERCONTROLLDM3DTV2V_INTERPOLATE,
|
| 37 |
+
)
|
| 38 |
+
from ..modules.ema import LitEma
|
| 39 |
+
from ..util import (
|
| 40 |
+
default,
|
| 41 |
+
disabled_train,
|
| 42 |
+
get_obj_from_str,
|
| 43 |
+
instantiate_from_config,
|
| 44 |
+
log_txt_as_img,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
class DiffusionEngine(pl.LightningModule):
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
network_config,
|
| 51 |
+
denoiser_config,
|
| 52 |
+
first_stage_config,
|
| 53 |
+
conditioner_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 54 |
+
sampler_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 55 |
+
optimizer_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 56 |
+
scheduler_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 57 |
+
loss_fn_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
| 58 |
+
network_wrapper: Union[None, str] = None,
|
| 59 |
+
ckpt_path: Union[None, str] = None,
|
| 60 |
+
use_ema: bool = False,
|
| 61 |
+
ema_decay_rate: float = 0.9999,
|
| 62 |
+
scale_factor: float = 1.0,
|
| 63 |
+
disable_first_stage_autocast=False,
|
| 64 |
+
input_key: str = "jpg",
|
| 65 |
+
log_keys: Union[List, None] = None,
|
| 66 |
+
no_cond_log: bool = False,
|
| 67 |
+
compile_model: bool = False,
|
| 68 |
+
):
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.log_keys = log_keys
|
| 71 |
+
self.input_key = input_key
|
| 72 |
+
self.optimizer_config = default(
|
| 73 |
+
optimizer_config, {"target": "torch.optim.AdamW"}
|
| 74 |
+
)
|
| 75 |
+
model = instantiate_from_config(network_config)
|
| 76 |
+
wrapper_type = (
|
| 77 |
+
self.wrapper_type if hasattr(self, "wrapper_type") else OPENAIUNETWRAPPER
|
| 78 |
+
)
|
| 79 |
+
self.model = get_obj_from_str(default(network_wrapper, wrapper_type))(
|
| 80 |
+
model, compile_model=compile_model
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
self.denoiser = instantiate_from_config(denoiser_config)
|
| 84 |
+
self.sampler = (
|
| 85 |
+
instantiate_from_config(sampler_config)
|
| 86 |
+
if sampler_config is not None
|
| 87 |
+
else None
|
| 88 |
+
)
|
| 89 |
+
self.conditioner = instantiate_from_config(
|
| 90 |
+
default(conditioner_config, UNCONDITIONAL_CONFIG)
|
| 91 |
+
)
|
| 92 |
+
self.scheduler_config = scheduler_config
|
| 93 |
+
self._init_first_stage(first_stage_config)
|
| 94 |
+
|
| 95 |
+
self.loss_fn = (
|
| 96 |
+
instantiate_from_config(loss_fn_config)
|
| 97 |
+
if loss_fn_config is not None
|
| 98 |
+
else None
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
self.use_ema = use_ema
|
| 102 |
+
if self.use_ema:
|
| 103 |
+
self.model_ema = LitEma(self.model, decay=ema_decay_rate)
|
| 104 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 105 |
+
|
| 106 |
+
self.scale_factor = scale_factor
|
| 107 |
+
self.disable_first_stage_autocast = disable_first_stage_autocast
|
| 108 |
+
self.no_cond_log = no_cond_log
|
| 109 |
+
|
| 110 |
+
if ckpt_path is not None:
|
| 111 |
+
self.init_from_ckpt(ckpt_path)
|
| 112 |
+
|
| 113 |
+
def init_from_ckpt(
|
| 114 |
+
self,
|
| 115 |
+
path: str,
|
| 116 |
+
) -> None:
|
| 117 |
+
print(f"Loading checkpoint from {path} ... ")
|
| 118 |
+
if path.endswith("ckpt"):
|
| 119 |
+
# sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 120 |
+
if "deepspeed" in path:
|
| 121 |
+
sd = torch.load(path, map_location="cpu")
|
| 122 |
+
sd = {k.replace("_forward_module.", ""): v for k, v in sd.items()}
|
| 123 |
+
else:
|
| 124 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 125 |
+
elif path.endswith("safetensors"):
|
| 126 |
+
sd = load_safetensors(path)
|
| 127 |
+
else:
|
| 128 |
+
raise NotImplementedError
|
| 129 |
+
|
| 130 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
| 131 |
+
print(
|
| 132 |
+
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
|
| 133 |
+
)
|
| 134 |
+
if len(missing) > 0:
|
| 135 |
+
print(f"Missing Keys: {missing}")
|
| 136 |
+
if len(unexpected) > 0:
|
| 137 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 138 |
+
|
| 139 |
+
def _init_first_stage(self, config):
|
| 140 |
+
model = instantiate_from_config(config).eval()
|
| 141 |
+
model.train = disabled_train
|
| 142 |
+
for param in model.parameters():
|
| 143 |
+
param.requires_grad = False
|
| 144 |
+
self.first_stage_model = model
|
| 145 |
+
|
| 146 |
+
def get_input(self, batch):
|
| 147 |
+
# assuming unified data format, dataloader returns a dict.
|
| 148 |
+
# image tensors should be scaled to -1 ... 1 and in bchw format
|
| 149 |
+
return batch[self.input_key]
|
| 150 |
+
|
| 151 |
+
@torch.no_grad()
|
| 152 |
+
def decode_first_stage(self, z):
|
| 153 |
+
z = 1.0 / self.scale_factor * z
|
| 154 |
+
with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast):
|
| 155 |
+
out = self.first_stage_model.decode(z)
|
| 156 |
+
return out
|
| 157 |
+
|
| 158 |
+
@torch.no_grad()
|
| 159 |
+
def encode_first_stage(self, x):
|
| 160 |
+
with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast):
|
| 161 |
+
z = self.first_stage_model.encode(x)
|
| 162 |
+
z = self.scale_factor * z
|
| 163 |
+
return z
|
| 164 |
+
|
| 165 |
+
def forward(self, x, batch):
|
| 166 |
+
loss = self.loss_fn(self.model, self.denoiser, self.conditioner, x, batch)
|
| 167 |
+
loss_mean = loss.mean()
|
| 168 |
+
loss_dict = {"loss": loss_mean}
|
| 169 |
+
return loss_mean, loss_dict
|
| 170 |
+
|
| 171 |
+
def shared_step(self, batch: Dict) -> Any:
|
| 172 |
+
x = self.get_input(batch)
|
| 173 |
+
x = self.encode_first_stage(x)
|
| 174 |
+
batch["global_step"] = self.global_step
|
| 175 |
+
loss, loss_dict = self(x, batch)
|
| 176 |
+
return loss, loss_dict
|
| 177 |
+
|
| 178 |
+
def training_step(self, batch, batch_idx):
|
| 179 |
+
loss, loss_dict = self.shared_step(batch)
|
| 180 |
+
|
| 181 |
+
self.log_dict(
|
| 182 |
+
loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=False
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
self.log(
|
| 186 |
+
"global_step",
|
| 187 |
+
self.global_step,
|
| 188 |
+
prog_bar=True,
|
| 189 |
+
logger=True,
|
| 190 |
+
on_step=True,
|
| 191 |
+
on_epoch=False,
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
if self.scheduler_config is not None:
|
| 195 |
+
lr = self.optimizers().param_groups[0]["lr"]
|
| 196 |
+
self.log(
|
| 197 |
+
"lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
return loss
|
| 201 |
+
|
| 202 |
+
def on_train_start(self, *args, **kwargs):
|
| 203 |
+
if self.sampler is None or self.loss_fn is None:
|
| 204 |
+
raise ValueError("Sampler and loss function need to be set for training.")
|
| 205 |
+
|
| 206 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 207 |
+
if self.use_ema:
|
| 208 |
+
self.model_ema(self.model)
|
| 209 |
+
|
| 210 |
+
@contextmanager
|
| 211 |
+
def ema_scope(self, context=None):
|
| 212 |
+
if self.use_ema:
|
| 213 |
+
self.model_ema.store(self.model.parameters())
|
| 214 |
+
self.model_ema.copy_to(self.model)
|
| 215 |
+
if context is not None:
|
| 216 |
+
print(f"{context}: Switched to EMA weights")
|
| 217 |
+
try:
|
| 218 |
+
yield None
|
| 219 |
+
finally:
|
| 220 |
+
if self.use_ema:
|
| 221 |
+
self.model_ema.restore(self.model.parameters())
|
| 222 |
+
if context is not None:
|
| 223 |
+
print(f"{context}: Restored training weights")
|
| 224 |
+
|
| 225 |
+
def instantiate_optimizer_from_config(self, params, lr, cfg):
|
| 226 |
+
return get_obj_from_str(cfg["target"])(
|
| 227 |
+
params, lr=lr, **cfg.get("params", dict())
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
def configure_optimizers(self):
|
| 231 |
+
lr = self.learning_rate
|
| 232 |
+
params = list(self.model.parameters())
|
| 233 |
+
for embedder in self.conditioner.embedders:
|
| 234 |
+
if embedder.is_trainable:
|
| 235 |
+
params = params + list(embedder.parameters())
|
| 236 |
+
opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config)
|
| 237 |
+
if self.scheduler_config is not None:
|
| 238 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 239 |
+
print("Setting up LambdaLR scheduler...")
|
| 240 |
+
scheduler = [
|
| 241 |
+
{
|
| 242 |
+
"scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule),
|
| 243 |
+
"interval": "step",
|
| 244 |
+
"frequency": 1,
|
| 245 |
+
}
|
| 246 |
+
]
|
| 247 |
+
return [opt], scheduler
|
| 248 |
+
return opt
|
| 249 |
+
|
| 250 |
+
@torch.no_grad()
|
| 251 |
+
def sample(
|
| 252 |
+
self,
|
| 253 |
+
cond: Dict,
|
| 254 |
+
uc: Union[Dict, None] = None,
|
| 255 |
+
batch_size: int = 16,
|
| 256 |
+
shape: Union[None, Tuple, List] = None,
|
| 257 |
+
**kwargs,
|
| 258 |
+
):
|
| 259 |
+
randn = torch.randn(batch_size, *shape).to(self.device)
|
| 260 |
+
|
| 261 |
+
denoiser = lambda input, sigma, c: self.denoiser(
|
| 262 |
+
self.model, input, sigma, c, **kwargs
|
| 263 |
+
)
|
| 264 |
+
samples = self.sampler(denoiser, randn, cond, uc=uc)
|
| 265 |
+
return samples
|
| 266 |
+
|
| 267 |
+
@torch.no_grad()
|
| 268 |
+
def log_conditionings(self, batch: Dict, n: int) -> Dict:
|
| 269 |
+
"""
|
| 270 |
+
Defines heuristics to log different conditionings.
|
| 271 |
+
These can be lists of strings (text-to-image), tensors, ints, ...
|
| 272 |
+
"""
|
| 273 |
+
image_h, image_w = batch[self.input_key].shape[2:]
|
| 274 |
+
log = dict()
|
| 275 |
+
|
| 276 |
+
for embedder in self.conditioner.embedders:
|
| 277 |
+
if (
|
| 278 |
+
(self.log_keys is None) or (embedder.input_key in self.log_keys)
|
| 279 |
+
) and not self.no_cond_log:
|
| 280 |
+
x = batch[embedder.input_key][:n]
|
| 281 |
+
if isinstance(x, torch.Tensor):
|
| 282 |
+
if x.dim() == 1:
|
| 283 |
+
# class-conditional, convert integer to string
|
| 284 |
+
x = [str(x[i].item()) for i in range(x.shape[0])]
|
| 285 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4)
|
| 286 |
+
elif x.dim() == 2:
|
| 287 |
+
# size and crop cond and the like
|
| 288 |
+
x = [
|
| 289 |
+
"x".join([str(xx) for xx in x[i].tolist()])
|
| 290 |
+
for i in range(x.shape[0])
|
| 291 |
+
]
|
| 292 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
|
| 293 |
+
else:
|
| 294 |
+
raise NotImplementedError()
|
| 295 |
+
elif isinstance(x, (List, ListConfig)):
|
| 296 |
+
if isinstance(x[0], str):
|
| 297 |
+
# strings
|
| 298 |
+
# xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
|
| 299 |
+
# xc = log_txt_as_img((image_w * 2, image_h), x, size=image_h // 15)
|
| 300 |
+
xc = log_txt_as_img(
|
| 301 |
+
(image_w * 2, image_h), x, size=image_h // 25
|
| 302 |
+
)
|
| 303 |
+
else:
|
| 304 |
+
raise NotImplementedError()
|
| 305 |
+
else:
|
| 306 |
+
raise NotImplementedError()
|
| 307 |
+
log[embedder.input_key] = xc
|
| 308 |
+
return log
|
| 309 |
+
|
| 310 |
+
@torch.no_grad()
|
| 311 |
+
def log_images(
|
| 312 |
+
self,
|
| 313 |
+
batch: Dict,
|
| 314 |
+
N: int = 8,
|
| 315 |
+
sample: bool = True,
|
| 316 |
+
ucg_keys: List[str] = None,
|
| 317 |
+
**kwargs,
|
| 318 |
+
) -> Dict:
|
| 319 |
+
conditioner_input_keys = [e.input_key for e in self.conditioner.embedders]
|
| 320 |
+
if ucg_keys:
|
| 321 |
+
assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), (
|
| 322 |
+
"Each defined ucg key for sampling must be in the provided conditioner input keys,"
|
| 323 |
+
f"but we have {ucg_keys} vs. {conditioner_input_keys}"
|
| 324 |
+
)
|
| 325 |
+
else:
|
| 326 |
+
ucg_keys = conditioner_input_keys
|
| 327 |
+
log = dict()
|
| 328 |
+
|
| 329 |
+
x = self.get_input(batch)
|
| 330 |
+
|
| 331 |
+
c, uc = self.conditioner.get_unconditional_conditioning(
|
| 332 |
+
batch,
|
| 333 |
+
force_uc_zero_embeddings=ucg_keys
|
| 334 |
+
if len(self.conditioner.embedders) > 0
|
| 335 |
+
else [],
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
sampling_kwargs = {}
|
| 339 |
+
|
| 340 |
+
N = min(x.shape[0], N)
|
| 341 |
+
x = x.to(self.device)[:N]
|
| 342 |
+
log["inputs"] = x
|
| 343 |
+
z = self.encode_first_stage(x)
|
| 344 |
+
log["reconstructions"] = self.decode_first_stage(z)
|
| 345 |
+
log.update(self.log_conditionings(batch, N))
|
| 346 |
+
|
| 347 |
+
for k in c:
|
| 348 |
+
if isinstance(c[k], torch.Tensor):
|
| 349 |
+
c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc))
|
| 350 |
+
|
| 351 |
+
if sample:
|
| 352 |
+
with self.ema_scope("Plotting"):
|
| 353 |
+
samples = self.sample(
|
| 354 |
+
c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs
|
| 355 |
+
)
|
| 356 |
+
samples = self.decode_first_stage(samples)
|
| 357 |
+
log["samples"] = samples
|
| 358 |
+
return log
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class VideoDiffusionEngine(DiffusionEngine):
|
| 362 |
+
def __init__(
|
| 363 |
+
self,
|
| 364 |
+
freeze_model="none",
|
| 365 |
+
wrapper_type="OPENAIUNETWRAPPERCONTROLLDM3D",
|
| 366 |
+
*args,
|
| 367 |
+
**kwargs,
|
| 368 |
+
):
|
| 369 |
+
self.wrapper_type = eval(wrapper_type)
|
| 370 |
+
super().__init__(*args, **kwargs)
|
| 371 |
+
self.freeze_model = freeze_model
|
| 372 |
+
|
| 373 |
+
self.setup_vaeembedder()
|
| 374 |
+
|
| 375 |
+
def setup_vaeembedder(self):
|
| 376 |
+
for embedder in self.conditioner.embedders:
|
| 377 |
+
if isinstance(embedder, VAEEmbedder):
|
| 378 |
+
embedder.first_stage_model = (
|
| 379 |
+
self.first_stage_model
|
| 380 |
+
) # TODO: should we add .clone()
|
| 381 |
+
embedder.disable_first_stage_autocast = (
|
| 382 |
+
self.disable_first_stage_autocast
|
| 383 |
+
)
|
| 384 |
+
embedder.scale_factor = self.scale_factor
|
| 385 |
+
embedder.freeze()
|
| 386 |
+
|
| 387 |
+
def get_input(self, batch):
|
| 388 |
+
# assuming unified data format, dataloader returns a dict.
|
| 389 |
+
# video tensors should be scaled to -1 ... 1 and in bcthw format
|
| 390 |
+
out_data = batch[self.input_key]
|
| 391 |
+
return out_data
|
| 392 |
+
|
| 393 |
+
def shared_step(self, batch: Dict) -> Any:
|
| 394 |
+
x = self.get_input(batch)
|
| 395 |
+
x = self.encode_first_stage(x)
|
| 396 |
+
batch["global_step"] = self.global_step
|
| 397 |
+
loss, loss_dict = self(x, batch)
|
| 398 |
+
return loss, loss_dict
|
| 399 |
+
|
| 400 |
+
@torch.no_grad()
|
| 401 |
+
def log_conditionings(self, batch: Dict, n: int) -> Dict:
|
| 402 |
+
"""
|
| 403 |
+
Defines heuristics to log different conditionings.
|
| 404 |
+
These can be lists of strings (text-to-image), tensors, ints, ...
|
| 405 |
+
"""
|
| 406 |
+
image_h, image_w = batch[self.input_key].shape[-2:]
|
| 407 |
+
log = dict()
|
| 408 |
+
|
| 409 |
+
for embedder in self.conditioner.embedders:
|
| 410 |
+
if (
|
| 411 |
+
(self.log_keys is None) or (embedder.input_key in self.log_keys)
|
| 412 |
+
) and not self.no_cond_log:
|
| 413 |
+
x = batch[embedder.input_key][:n]
|
| 414 |
+
if isinstance(x, torch.Tensor):
|
| 415 |
+
if x.dim() == 1:
|
| 416 |
+
# class-conditional, convert integer to string
|
| 417 |
+
x = [str(x[i].item()) for i in range(x.shape[0])]
|
| 418 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4)
|
| 419 |
+
elif x.dim() == 2:
|
| 420 |
+
# size and crop cond and the like
|
| 421 |
+
x = [
|
| 422 |
+
"x".join([str(xx) for xx in x[i].tolist()])
|
| 423 |
+
for i in range(x.shape[0])
|
| 424 |
+
]
|
| 425 |
+
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
|
| 426 |
+
else:
|
| 427 |
+
raise NotImplementedError()
|
| 428 |
+
elif isinstance(x, (List, ListConfig)):
|
| 429 |
+
if isinstance(x[0], str):
|
| 430 |
+
# strings
|
| 431 |
+
# xc = log_txt_as_img((image_w, image_h), x, size=image_h // 20)
|
| 432 |
+
xc = log_txt_as_img(
|
| 433 |
+
(image_w, image_h), x, size=image_h // 10, split_loc=15
|
| 434 |
+
)
|
| 435 |
+
# xc = log_txt_as_img((image_w * 2, image_h), x, size=image_h // 15, split_loc=20)
|
| 436 |
+
# xc = log_txt_as_img(
|
| 437 |
+
# (image_w * 3, image_h), x, size=image_h // 5, split_loc=15
|
| 438 |
+
# )
|
| 439 |
+
else:
|
| 440 |
+
raise NotImplementedError()
|
| 441 |
+
else:
|
| 442 |
+
raise NotImplementedError()
|
| 443 |
+
log[embedder.input_key] = xc
|
| 444 |
+
return log
|
| 445 |
+
|
| 446 |
+
@torch.no_grad()
|
| 447 |
+
def log_images(
|
| 448 |
+
self,
|
| 449 |
+
batch: Dict,
|
| 450 |
+
N: int = 8,
|
| 451 |
+
sample: bool = True,
|
| 452 |
+
ucg_keys: List[str] = None,
|
| 453 |
+
**kwargs,
|
| 454 |
+
) -> Dict:
|
| 455 |
+
# TODO: refactor this
|
| 456 |
+
conditioner_input_keys = [e.input_key for e in self.conditioner.embedders]
|
| 457 |
+
if ucg_keys:
|
| 458 |
+
assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), (
|
| 459 |
+
"Each defined ucg key for sampling must be in the provided conditioner input keys,"
|
| 460 |
+
f"but we have {ucg_keys} vs. {conditioner_input_keys}"
|
| 461 |
+
)
|
| 462 |
+
else:
|
| 463 |
+
ucg_keys = conditioner_input_keys
|
| 464 |
+
log = dict()
|
| 465 |
+
|
| 466 |
+
x = self.get_input(batch)
|
| 467 |
+
|
| 468 |
+
c, uc = self.conditioner.get_unconditional_conditioning(
|
| 469 |
+
batch,
|
| 470 |
+
force_uc_zero_embeddings=ucg_keys
|
| 471 |
+
if len(self.conditioner.embedders) > 0
|
| 472 |
+
else [],
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
sampling_kwargs = {
|
| 476 |
+
key: batch[key] for key in self.loss_fn.batch2model_keys.intersection(batch)
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
N = min(x.shape[0], N)
|
| 480 |
+
x = x.to(self.device)[:N]
|
| 481 |
+
log["inputs"] = x
|
| 482 |
+
log["inputs-video"] = x
|
| 483 |
+
log["cond_img"] = batch["cond_img"]
|
| 484 |
+
z = self.encode_first_stage(x)
|
| 485 |
+
log["reconstructions"] = self.decode_first_stage(z)
|
| 486 |
+
log["reconstructions-video"] = self.decode_first_stage(z)
|
| 487 |
+
log.update(self.log_conditionings(batch, N))
|
| 488 |
+
|
| 489 |
+
for k in c:
|
| 490 |
+
if isinstance(c[k], torch.Tensor):
|
| 491 |
+
c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc))
|
| 492 |
+
|
| 493 |
+
if sample:
|
| 494 |
+
with self.ema_scope("Plotting"):
|
| 495 |
+
samples = self.sample(
|
| 496 |
+
c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs
|
| 497 |
+
)
|
| 498 |
+
samples = self.decode_first_stage(samples)
|
| 499 |
+
log["samples"] = samples
|
| 500 |
+
log["samples-video"] = samples
|
| 501 |
+
|
| 502 |
+
# concat the inputs and outputs for visualization
|
| 503 |
+
log["inputs_samples"] = torch.cat([log["inputs"], log["samples"]], dim=3)
|
| 504 |
+
del log["inputs"]
|
| 505 |
+
del log["samples"]
|
| 506 |
+
# log['inputs_samples-video'] = torch.cat([log['inputs-video'], log['samples-video']], dim=3)
|
| 507 |
+
# del log['inputs-video']
|
| 508 |
+
# del log['samples-video']
|
| 509 |
+
return log
|
| 510 |
+
|
| 511 |
+
def configure_optimizers(self):
|
| 512 |
+
lr = self.learning_rate
|
| 513 |
+
|
| 514 |
+
if self.freeze_model == "none":
|
| 515 |
+
params = list(self.model.diffusion_model.parameters())
|
| 516 |
+
for name, param in self.model.diffusion_model.named_parameters():
|
| 517 |
+
print(f"Setting {name} to trainable")
|
| 518 |
+
param.requires_grad = True # TODO: why this?
|
| 519 |
+
elif self.freeze_model == "spatial":
|
| 520 |
+
params = []
|
| 521 |
+
if hasattr(self.model.diffusion_model, "controlnet"):
|
| 522 |
+
params += list(self.model.diffusion_model.controlnet.parameters())
|
| 523 |
+
for name, param in self.model.diffusion_model.named_parameters():
|
| 524 |
+
if "controlnet" not in name:
|
| 525 |
+
if "temporal" in name:
|
| 526 |
+
params.append(param)
|
| 527 |
+
else:
|
| 528 |
+
param.requires_grad = False
|
| 529 |
+
elif self.freeze_model == "spatial_openlora":
|
| 530 |
+
params = []
|
| 531 |
+
if hasattr(self.model.diffusion_model, "controlnet"):
|
| 532 |
+
params += list(self.model.diffusion_model.controlnet.parameters())
|
| 533 |
+
for name, param in self.model.diffusion_model.named_parameters():
|
| 534 |
+
if "controlnet" not in name:
|
| 535 |
+
if "temporal" in name or "lora" in name:
|
| 536 |
+
params.append(param)
|
| 537 |
+
else:
|
| 538 |
+
param.requires_grad = False
|
| 539 |
+
else:
|
| 540 |
+
raise NotImplementedError
|
| 541 |
+
|
| 542 |
+
for embedder in self.conditioner.embedders:
|
| 543 |
+
if embedder.is_trainable:
|
| 544 |
+
params = params + list(embedder.parameters())
|
| 545 |
+
opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config)
|
| 546 |
+
if self.scheduler_config is not None:
|
| 547 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 548 |
+
print("Setting up LambdaLR scheduler...")
|
| 549 |
+
scheduler = [
|
| 550 |
+
{
|
| 551 |
+
"scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule),
|
| 552 |
+
"interval": "step",
|
| 553 |
+
"frequency": 1,
|
| 554 |
+
}
|
| 555 |
+
]
|
| 556 |
+
return [opt], scheduler
|
| 557 |
+
return opt
|
| 558 |
+
|
| 559 |
+
# -----------------------------------------------------
|
| 560 |
+
class VideoDiffusionEngineTV2V(VideoDiffusionEngine):
|
| 561 |
+
def __init__(self, *args, **kwargs):
|
| 562 |
+
# kwargs['wrapper_type'] = OPENAIUNETWRAPPERCONTROLLDM3DTV2V
|
| 563 |
+
kwargs["wrapper_type"] = kwargs.get(
|
| 564 |
+
"wrapper_type", "OPENAIUNETWRAPPERCONTROLLDM3DTV2V"
|
| 565 |
+
)
|
| 566 |
+
super().__init__(*args, **kwargs)
|
| 567 |
+
|
| 568 |
+
# freeze the controlnet (load pre-trained weights, no need to train)
|
| 569 |
+
self.model.diffusion_model.controlnet.eval()
|
| 570 |
+
for name, param in self.model.diffusion_model.controlnet.named_parameters():
|
| 571 |
+
param.requires_grad = False
|
| 572 |
+
|
| 573 |
+
if hasattr(self.model.diffusion_model, "controlnet_img"):
|
| 574 |
+
print('Setting controlnet_img to trainable ... ')
|
| 575 |
+
# open the controlnet_img
|
| 576 |
+
for (
|
| 577 |
+
name,
|
| 578 |
+
param,
|
| 579 |
+
) in self.model.diffusion_model.controlnet_img.named_parameters():
|
| 580 |
+
param.requires_grad = True
|
| 581 |
+
|
| 582 |
+
def init_from_ckpt(
|
| 583 |
+
self,
|
| 584 |
+
path: str,
|
| 585 |
+
) -> None:
|
| 586 |
+
print(f"Loading checkpoint from {path} ... ")
|
| 587 |
+
if path.endswith("ckpt"):
|
| 588 |
+
if "deepspeed" in path:
|
| 589 |
+
sd = torch.load(path, map_location="cpu")
|
| 590 |
+
sd = {k.replace("_forward_module.", ""): v for k, v in sd.items()}
|
| 591 |
+
else:
|
| 592 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 593 |
+
elif path.endswith("safetensors"):
|
| 594 |
+
sd = load_safetensors(path)
|
| 595 |
+
else:
|
| 596 |
+
raise NotImplementedError
|
| 597 |
+
|
| 598 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
| 599 |
+
|
| 600 |
+
print(
|
| 601 |
+
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
|
| 602 |
+
)
|
| 603 |
+
if len(missing) > 0:
|
| 604 |
+
print(f"Missing Keys: {missing}")
|
| 605 |
+
if len(unexpected) > 0:
|
| 606 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 607 |
+
|
| 608 |
+
@torch.no_grad()
|
| 609 |
+
def log_images(
|
| 610 |
+
self,
|
| 611 |
+
batch: Dict,
|
| 612 |
+
N: int = 8,
|
| 613 |
+
sample: bool = True,
|
| 614 |
+
ucg_keys: List[str] = None,
|
| 615 |
+
**kwargs,
|
| 616 |
+
) -> Dict:
|
| 617 |
+
conditioner_input_keys = [e.input_key for e in self.conditioner.embedders]
|
| 618 |
+
if ucg_keys:
|
| 619 |
+
assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), (
|
| 620 |
+
"Each defined ucg key for sampling must be in the provided conditioner input keys,"
|
| 621 |
+
f"but we have {ucg_keys} vs. {conditioner_input_keys}"
|
| 622 |
+
)
|
| 623 |
+
else:
|
| 624 |
+
ucg_keys = conditioner_input_keys
|
| 625 |
+
log = dict()
|
| 626 |
+
|
| 627 |
+
x = self.get_input(batch)
|
| 628 |
+
|
| 629 |
+
negative_prompt = "ugly, low quality"
|
| 630 |
+
batch_uc = {
|
| 631 |
+
"txt": [negative_prompt for i in range(x.shape[0])],
|
| 632 |
+
"control_hint": batch[
|
| 633 |
+
"control_hint"
|
| 634 |
+
].clone(), # to use the pretrained weights, we must use the same control_hint in the batch_uc
|
| 635 |
+
}
|
| 636 |
+
if "cond_img" in batch.keys(): # for TVI2V;
|
| 637 |
+
# TODO: in fact, we can delete this, just use empty tensor as cond_img for batch_uc
|
| 638 |
+
batch_uc["cond_img"] = batch["cond_img"].clone()
|
| 639 |
+
# batch_uc['cond_img'] = torch.zeros_like(batch['cond_img'])
|
| 640 |
+
batch["txt"] = ["masterpiece, best quality, " + each for each in batch["txt"]]
|
| 641 |
+
c, uc = self.conditioner.get_unconditional_conditioning(
|
| 642 |
+
batch_c=batch,
|
| 643 |
+
batch_uc=batch_uc,
|
| 644 |
+
)
|
| 645 |
+
|
| 646 |
+
sampling_kwargs = {
|
| 647 |
+
key: batch[key] for key in self.loss_fn.batch2model_keys.intersection(batch)
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
N = min(x.shape[0], N)
|
| 651 |
+
x = x.to(self.device)[:N]
|
| 652 |
+
log["inputs"] = x
|
| 653 |
+
log["inputs-video"] = x
|
| 654 |
+
if "cond_img" in batch.keys():
|
| 655 |
+
log["cond_img"] = batch["cond_img"]
|
| 656 |
+
z = self.encode_first_stage(x)
|
| 657 |
+
# log["reconstructions"] = self.decode_first_stage(z)
|
| 658 |
+
# log["reconstructions-video"] = self.decode_first_stage(z)
|
| 659 |
+
log.update(self.log_conditionings(batch, N))
|
| 660 |
+
|
| 661 |
+
for k in c:
|
| 662 |
+
if isinstance(c[k], torch.Tensor):
|
| 663 |
+
c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc))
|
| 664 |
+
|
| 665 |
+
if sample:
|
| 666 |
+
with self.ema_scope("Plotting"):
|
| 667 |
+
samples = self.sample(
|
| 668 |
+
c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs
|
| 669 |
+
)
|
| 670 |
+
samples = self.decode_first_stage(samples)
|
| 671 |
+
log["samples"] = samples
|
| 672 |
+
log["samples-video"] = samples
|
| 673 |
+
|
| 674 |
+
for embedder in self.conditioner.embedders:
|
| 675 |
+
if (
|
| 676 |
+
isinstance(embedder, LineartEncoder)
|
| 677 |
+
or isinstance(embedder, DepthZoeEncoder)
|
| 678 |
+
or isinstance(embedder, DepthMidasEncoder)
|
| 679 |
+
or isinstance(embedder, SoftEdgeEncoder)
|
| 680 |
+
or isinstance(embedder, NormalBaeEncoder)
|
| 681 |
+
or isinstance(embedder, ScribbleHEDEncoder)
|
| 682 |
+
or isinstance(embedder, ScribblePidiNetEncoder)
|
| 683 |
+
or isinstance(embedder, OpenposeEncoder)
|
| 684 |
+
or isinstance(embedder, OutpaintingEncoder)
|
| 685 |
+
or isinstance(embedder, InpaintingEncoder)
|
| 686 |
+
):
|
| 687 |
+
# log['control_hint'] = embedder.encode(batch['control_hint'])
|
| 688 |
+
# log['control_hint-video'] = embedder.encode(batch['control_hint'])
|
| 689 |
+
log["control_hint"] = -embedder.encode(batch["control_hint"])
|
| 690 |
+
log["control_hint-video"] = -embedder.encode(batch["control_hint"])
|
| 691 |
+
break
|
| 692 |
+
|
| 693 |
+
# concat the inputs and outputs for visualization
|
| 694 |
+
log["inputs_samples_hint"] = torch.cat(
|
| 695 |
+
[log["inputs"], log["samples"], log["control_hint"]], dim=3
|
| 696 |
+
)
|
| 697 |
+
del log["inputs"]
|
| 698 |
+
del log["samples"]
|
| 699 |
+
del log["control_hint"]
|
| 700 |
+
|
| 701 |
+
log["inputs_samples_hint-video"] = torch.cat(
|
| 702 |
+
[log["inputs-video"], log["samples-video"], log["control_hint-video"]],
|
| 703 |
+
dim=3,
|
| 704 |
+
)
|
| 705 |
+
del log["inputs-video"]
|
| 706 |
+
del log["samples-video"]
|
| 707 |
+
del log["control_hint-video"]
|
| 708 |
+
return log
|
| 709 |
+
|
| 710 |
+
def configure_optimizers(self):
|
| 711 |
+
lr = self.learning_rate
|
| 712 |
+
|
| 713 |
+
if self.freeze_model == "none":
|
| 714 |
+
params = list(self.model.diffusion_model.parameters())
|
| 715 |
+
for name, param in self.model.diffusion_model.named_parameters():
|
| 716 |
+
print(f"Setting {name} to trainable")
|
| 717 |
+
param.requires_grad = True
|
| 718 |
+
elif self.freeze_model == "spatial":
|
| 719 |
+
params = []
|
| 720 |
+
if hasattr(self.model.diffusion_model, "controlnet"):
|
| 721 |
+
params += list(self.model.diffusion_model.controlnet.parameters())
|
| 722 |
+
if hasattr(self.model.diffusion_model, "controlnet_img"):
|
| 723 |
+
params += list(self.model.diffusion_model.controlnet_img.parameters())
|
| 724 |
+
for name, param in self.model.diffusion_model.named_parameters():
|
| 725 |
+
if "controlnet" not in name:
|
| 726 |
+
if "temporal" in name:
|
| 727 |
+
params.append(param)
|
| 728 |
+
else:
|
| 729 |
+
param.requires_grad = False
|
| 730 |
+
else:
|
| 731 |
+
raise NotImplementedError
|
| 732 |
+
|
| 733 |
+
for embedder in self.conditioner.embedders:
|
| 734 |
+
if embedder.is_trainable:
|
| 735 |
+
params = params + list(embedder.parameters())
|
| 736 |
+
opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config)
|
| 737 |
+
if self.scheduler_config is not None:
|
| 738 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 739 |
+
print("Setting up LambdaLR scheduler...")
|
| 740 |
+
scheduler = [
|
| 741 |
+
{
|
| 742 |
+
"scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule),
|
| 743 |
+
"interval": "step",
|
| 744 |
+
"frequency": 1,
|
| 745 |
+
}
|
| 746 |
+
]
|
| 747 |
+
return [opt], scheduler
|
| 748 |
+
return opt
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
class VideoDiffusionEngineTV2VInterpolate(VideoDiffusionEngineTV2V):
|
| 752 |
+
def __init__(self, *args, **kwargs):
|
| 753 |
+
kwargs["wrapper_type"] = "OPENAIUNETWRAPPERCONTROLLDM3DTV2V_INTERPOLATE"
|
| 754 |
+
super().__init__(*args, **kwargs)
|
| 755 |
+
|
| 756 |
+
@torch.no_grad()
|
| 757 |
+
def log_images(
|
| 758 |
+
self,
|
| 759 |
+
batch: Dict,
|
| 760 |
+
N: int = 8,
|
| 761 |
+
sample: bool = True,
|
| 762 |
+
ucg_keys: List[str] = None,
|
| 763 |
+
**kwargs,
|
| 764 |
+
) -> Dict:
|
| 765 |
+
conditioner_input_keys = [e.input_key for e in self.conditioner.embedders]
|
| 766 |
+
if ucg_keys:
|
| 767 |
+
assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), (
|
| 768 |
+
"Each defined ucg key for sampling must be in the provided conditioner input keys,"
|
| 769 |
+
f"but we have {ucg_keys} vs. {conditioner_input_keys}"
|
| 770 |
+
)
|
| 771 |
+
else:
|
| 772 |
+
ucg_keys = conditioner_input_keys
|
| 773 |
+
log = dict()
|
| 774 |
+
|
| 775 |
+
x = self.get_input(batch)
|
| 776 |
+
|
| 777 |
+
# negative_prompt = "ugly, low quality"
|
| 778 |
+
negative_prompt = ''
|
| 779 |
+
batch_uc = {
|
| 780 |
+
"txt": [negative_prompt for i in range(x.shape[0])],
|
| 781 |
+
"control_hint": batch["control_hint"].clone(),
|
| 782 |
+
"interpolate_first_last": batch["interpolate_first_last"].clone(),
|
| 783 |
+
}
|
| 784 |
+
# TODO: specify this in the config file
|
| 785 |
+
batch["txt"] = ['' for each in batch["txt"]] # disbale text prompt
|
| 786 |
+
# batch["txt"] = ["masterpiece, best quality, " + each for each in batch["txt"]]
|
| 787 |
+
# batch['txt'] = ['masterpiece, best quality' for each in batch['txt']] # disbale text prompt
|
| 788 |
+
|
| 789 |
+
c, uc = self.conditioner.get_unconditional_conditioning(
|
| 790 |
+
batch_c=batch,
|
| 791 |
+
batch_uc=batch_uc,
|
| 792 |
+
)
|
| 793 |
+
|
| 794 |
+
sampling_kwargs = {
|
| 795 |
+
key: batch[key] for key in self.loss_fn.batch2model_keys.intersection(batch)
|
| 796 |
+
}
|
| 797 |
+
|
| 798 |
+
N = min(x.shape[0], N)
|
| 799 |
+
x = x.to(self.device)[:N]
|
| 800 |
+
log["inputs"] = x
|
| 801 |
+
log["inputs-video"] = x
|
| 802 |
+
# log['interpolate_first_last'] = torch.cat([batch['interpolate_first'], batch['interpolate_last']], dim=2)
|
| 803 |
+
from sgm.modules.encoders.modules import CustomIdentityEncoder, CustomIdentityDownCondEncoder
|
| 804 |
+
|
| 805 |
+
for embedder in self.conditioner.embedders:
|
| 806 |
+
if isinstance(embedder, CustomIdentityEncoder) or isinstance(embedder, CustomIdentityDownCondEncoder):
|
| 807 |
+
log["interpolate_first_last"] = embedder.encode(batch["interpolate_first_last"])[:,:3,:,...] # in case of more than 3
|
| 808 |
+
break
|
| 809 |
+
z = self.encode_first_stage(x)
|
| 810 |
+
# log["reconstructions"] = self.decode_first_stage(z)
|
| 811 |
+
# log["reconstructions-video"] = self.decode_first_stage(z)
|
| 812 |
+
log.update(self.log_conditionings(batch, N))
|
| 813 |
+
|
| 814 |
+
for k in c:
|
| 815 |
+
if isinstance(c[k], torch.Tensor):
|
| 816 |
+
c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc))
|
| 817 |
+
|
| 818 |
+
if sample:
|
| 819 |
+
with self.ema_scope("Plotting"):
|
| 820 |
+
samples = self.sample(
|
| 821 |
+
c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs
|
| 822 |
+
)
|
| 823 |
+
samples = self.decode_first_stage(samples)
|
| 824 |
+
log["samples"] = samples
|
| 825 |
+
log["samples-video"] = samples
|
| 826 |
+
|
| 827 |
+
for embedder in self.conditioner.embedders:
|
| 828 |
+
if (
|
| 829 |
+
isinstance(embedder, LineartEncoder)
|
| 830 |
+
or isinstance(embedder, DepthZoeEncoder)
|
| 831 |
+
or isinstance(embedder, DepthMidasEncoder)
|
| 832 |
+
or isinstance(embedder, SoftEdgeEncoder)
|
| 833 |
+
or isinstance(embedder, NormalBaeEncoder)
|
| 834 |
+
or isinstance(embedder, ScribbleHEDEncoder)
|
| 835 |
+
or isinstance(embedder, ScribblePidiNetEncoder)
|
| 836 |
+
or isinstance(embedder, OpenposeEncoder)
|
| 837 |
+
or isinstance(embedder, OutpaintingEncoder)
|
| 838 |
+
or isinstance(embedder, InpaintingEncoder)
|
| 839 |
+
):
|
| 840 |
+
log["control_hint"] = -embedder.encode(batch["control_hint"])
|
| 841 |
+
log["control_hint-video"] = -embedder.encode(batch["control_hint"])
|
| 842 |
+
break
|
| 843 |
+
|
| 844 |
+
# concat the inputs and outputs for visualization
|
| 845 |
+
log["inputs_samples_hint"] = torch.cat(
|
| 846 |
+
[log["inputs"], log["samples"], log["control_hint"]], dim=3
|
| 847 |
+
)
|
| 848 |
+
del log["inputs"]
|
| 849 |
+
del log["samples"]
|
| 850 |
+
del log["control_hint"]
|
| 851 |
+
|
| 852 |
+
log["inputs_samples_hint-video"] = torch.cat(
|
| 853 |
+
[log["inputs-video"], log["samples-video"], log["control_hint-video"]],
|
| 854 |
+
dim=3,
|
| 855 |
+
)
|
| 856 |
+
del log["inputs-video"]
|
| 857 |
+
del log["samples-video"]
|
| 858 |
+
del log["control_hint-video"]
|
| 859 |
+
return log
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
if __name__ == "__main__":
|
| 863 |
+
import logging
|
| 864 |
+
|
| 865 |
+
import yaml
|
| 866 |
+
|
| 867 |
+
open("output.log", "w").close()
|
| 868 |
+
|
| 869 |
+
logging.basicConfig(
|
| 870 |
+
level=logging.DEBUG,
|
| 871 |
+
filename="output.log",
|
| 872 |
+
datefmt="%Y/%m/%d %H:%M:%S",
|
| 873 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s",
|
| 874 |
+
)
|
| 875 |
+
logger = logging.getLogger(__name__)
|
| 876 |
+
# logger.info('This is a log info')
|
| 877 |
+
# logger.debug('Debugging')
|
| 878 |
+
# logger.warning('Warning exists')
|
| 879 |
+
# logger.info('Finish')
|
| 880 |
+
|
| 881 |
+
BS = 2
|
| 882 |
+
frame_length = 17
|
| 883 |
+
# size = [BS, frame_length, 3, 320, 320]
|
| 884 |
+
size = [BS, 3, 320, 320]
|
| 885 |
+
batch = {
|
| 886 |
+
"jpg": torch.randn(size).cuda(),
|
| 887 |
+
"txt": BS * ["text"],
|
| 888 |
+
"original_size_as_tuple": torch.tensor([320, 320]).repeat(BS, 1).cuda(),
|
| 889 |
+
"crop_coords_top_left": torch.tensor([0, 0]).repeat(BS, 1).cuda(),
|
| 890 |
+
"target_size_as_tuple": torch.tensor([320, 320]).repeat(BS, 1).cuda(),
|
| 891 |
+
}
|
| 892 |
+
|
| 893 |
+
model_config = yaml.load(
|
| 894 |
+
open("configs/example_training/sd_xl_base-test.yaml"), Loader=yaml.Loader
|
| 895 |
+
)["model"]
|
| 896 |
+
|
| 897 |
+
learning_rate = model_config.pop("base_learning_rate")
|
| 898 |
+
model = DiffusionEngine(**model_config["params"]).cuda()
|
| 899 |
+
model.learning_rate = learning_rate
|
| 900 |
+
logger.info(model)
|
| 901 |
+
|
| 902 |
+
opt = model.configure_optimizers()
|
| 903 |
+
|
| 904 |
+
while True:
|
| 905 |
+
# out = model.shared_step(batch)
|
| 906 |
+
loss = model.training_step(batch, 1)
|
| 907 |
+
print(f"loss: {loss}")
|
| 908 |
+
loss.backward()
|
| 909 |
+
opt[0][0].step()
|
| 910 |
+
opt[0][0].zero_grad()
|
CCEdit-main/sgm/modules/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .encoders.modules import GeneralConditioner
|
| 2 |
+
|
| 3 |
+
UNCONDITIONAL_CONFIG = {
|
| 4 |
+
"target": "sgm.modules.GeneralConditioner",
|
| 5 |
+
"params": {"emb_models": []},
|
| 6 |
+
}
|
CCEdit-main/sgm/modules/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (307 Bytes). View file
|
|
|
CCEdit-main/sgm/modules/__pycache__/attention.cpython-39.pyc
ADDED
|
Binary file (34.9 kB). View file
|
|
|
CCEdit-main/sgm/modules/__pycache__/ema.cpython-39.pyc
ADDED
|
Binary file (3.19 kB). View file
|
|
|
CCEdit-main/sgm/modules/attention.py
ADDED
|
@@ -0,0 +1,1663 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from inspect import isfunction
|
| 3 |
+
from typing import Any, Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from einops import rearrange, repeat
|
| 8 |
+
from packaging import version
|
| 9 |
+
from torch import nn
|
| 10 |
+
|
| 11 |
+
import loralib as lora
|
| 12 |
+
|
| 13 |
+
if version.parse(torch.__version__) >= version.parse("2.0.0"):
|
| 14 |
+
SDP_IS_AVAILABLE = True
|
| 15 |
+
from torch.backends.cuda import SDPBackend, sdp_kernel
|
| 16 |
+
|
| 17 |
+
BACKEND_MAP = {
|
| 18 |
+
SDPBackend.MATH: {
|
| 19 |
+
"enable_math": True,
|
| 20 |
+
"enable_flash": False,
|
| 21 |
+
"enable_mem_efficient": False,
|
| 22 |
+
},
|
| 23 |
+
SDPBackend.FLASH_ATTENTION: {
|
| 24 |
+
"enable_math": False,
|
| 25 |
+
"enable_flash": True,
|
| 26 |
+
"enable_mem_efficient": False,
|
| 27 |
+
},
|
| 28 |
+
SDPBackend.EFFICIENT_ATTENTION: {
|
| 29 |
+
"enable_math": False,
|
| 30 |
+
"enable_flash": False,
|
| 31 |
+
"enable_mem_efficient": True,
|
| 32 |
+
},
|
| 33 |
+
None: {"enable_math": True, "enable_flash": True, "enable_mem_efficient": True},
|
| 34 |
+
}
|
| 35 |
+
else:
|
| 36 |
+
from contextlib import nullcontext
|
| 37 |
+
|
| 38 |
+
SDP_IS_AVAILABLE = False
|
| 39 |
+
sdp_kernel = nullcontext
|
| 40 |
+
BACKEND_MAP = {}
|
| 41 |
+
print(
|
| 42 |
+
f"No SDP backend available, likely because you are running in pytorch versions < 2.0. In fact, "
|
| 43 |
+
f"you are using PyTorch {torch.__version__}. You might want to consider upgrading."
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
import xformers
|
| 48 |
+
import xformers.ops
|
| 49 |
+
|
| 50 |
+
XFORMERS_IS_AVAILABLE = True
|
| 51 |
+
except:
|
| 52 |
+
XFORMERS_IS_AVAILABLE = False
|
| 53 |
+
print("no module 'xformers'. Processing without...")
|
| 54 |
+
|
| 55 |
+
from .diffusionmodules.util import checkpoint
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
|
| 59 |
+
from flash_attn.bert_padding import unpad_input
|
| 60 |
+
|
| 61 |
+
use_flash_attention = True
|
| 62 |
+
except ImportError:
|
| 63 |
+
try:
|
| 64 |
+
from flash_attn.flash_attn_interface import (
|
| 65 |
+
flash_attn_varlen_func as flash_attn_unpadded_func,
|
| 66 |
+
)
|
| 67 |
+
from flash_attn.bert_padding import unpad_input
|
| 68 |
+
|
| 69 |
+
use_flash_attention = True
|
| 70 |
+
except ImportError:
|
| 71 |
+
flash_attn_unpadded_func = None
|
| 72 |
+
unpad_input = None
|
| 73 |
+
use_flash_attention = False
|
| 74 |
+
print("Not use flash Attention")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def exists(val):
|
| 78 |
+
return val is not None
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def uniq(arr):
|
| 82 |
+
return {el: True for el in arr}.keys()
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def default(val, d):
|
| 86 |
+
if exists(val):
|
| 87 |
+
return val
|
| 88 |
+
return d() if isfunction(d) else d
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def max_neg_value(t):
|
| 92 |
+
return -torch.finfo(t.dtype).max
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def init_(tensor):
|
| 96 |
+
dim = tensor.shape[-1]
|
| 97 |
+
std = 1 / math.sqrt(dim)
|
| 98 |
+
tensor.uniform_(-std, std)
|
| 99 |
+
return tensor
|
| 100 |
+
|
| 101 |
+
def get_lora_params(kwargs):
|
| 102 |
+
lora_names = ["q", "k", "v", "o"]
|
| 103 |
+
lora_params = dict()
|
| 104 |
+
for lora_name in lora_names:
|
| 105 |
+
lora_use = lora_name + "_use_lora"
|
| 106 |
+
lora_r = lora_name + "_lora_r"
|
| 107 |
+
lora_alpha = lora_name + "_lora_alpha"
|
| 108 |
+
lora_params[lora_use] = kwargs.get(lora_use, False)
|
| 109 |
+
lora_params[lora_r] = kwargs.get(lora_r, 4)
|
| 110 |
+
lora_params[lora_alpha] = kwargs.get(lora_alpha, 1)
|
| 111 |
+
return lora_params
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# feedforward
|
| 115 |
+
class GEGLU(nn.Module):
|
| 116 |
+
def __init__(self, dim_in, dim_out):
|
| 117 |
+
super().__init__()
|
| 118 |
+
self.proj = nn.Linear(dim_in, dim_out * 2)
|
| 119 |
+
|
| 120 |
+
def forward(self, x):
|
| 121 |
+
x, gate = self.proj(x).chunk(2, dim=-1)
|
| 122 |
+
return x * F.gelu(gate)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class FeedForward(nn.Module):
|
| 126 |
+
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
|
| 127 |
+
super().__init__()
|
| 128 |
+
inner_dim = int(dim * mult)
|
| 129 |
+
dim_out = default(dim_out, dim)
|
| 130 |
+
project_in = (
|
| 131 |
+
nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
|
| 132 |
+
if not glu
|
| 133 |
+
else GEGLU(dim, inner_dim)
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
self.net = nn.Sequential(
|
| 137 |
+
project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
def forward(self, x):
|
| 141 |
+
return self.net(x)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def zero_module(module):
|
| 145 |
+
"""
|
| 146 |
+
Zero out the parameters of a module and return it.
|
| 147 |
+
"""
|
| 148 |
+
for p in module.parameters():
|
| 149 |
+
p.detach().zero_()
|
| 150 |
+
return module
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def Normalize(in_channels):
|
| 154 |
+
return torch.nn.GroupNorm(
|
| 155 |
+
num_groups=32, num_channels=in_channels, eps=1e-6, affine=True
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class LinearAttention(nn.Module):
|
| 160 |
+
def __init__(self, dim, heads=4, dim_head=32):
|
| 161 |
+
super().__init__()
|
| 162 |
+
self.heads = heads
|
| 163 |
+
hidden_dim = dim_head * heads
|
| 164 |
+
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
|
| 165 |
+
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
|
| 166 |
+
|
| 167 |
+
def forward(self, x):
|
| 168 |
+
b, c, h, w = x.shape
|
| 169 |
+
qkv = self.to_qkv(x)
|
| 170 |
+
q, k, v = rearrange(
|
| 171 |
+
qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3
|
| 172 |
+
)
|
| 173 |
+
k = k.softmax(dim=-1)
|
| 174 |
+
context = torch.einsum("bhdn,bhen->bhde", k, v)
|
| 175 |
+
out = torch.einsum("bhde,bhdn->bhen", context, q)
|
| 176 |
+
out = rearrange(
|
| 177 |
+
out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w
|
| 178 |
+
)
|
| 179 |
+
return self.to_out(out)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class SpatialSelfAttention(nn.Module):
|
| 183 |
+
def __init__(self, in_channels):
|
| 184 |
+
super().__init__()
|
| 185 |
+
self.in_channels = in_channels
|
| 186 |
+
|
| 187 |
+
self.norm = Normalize(in_channels)
|
| 188 |
+
self.q = torch.nn.Conv2d(
|
| 189 |
+
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
| 190 |
+
)
|
| 191 |
+
self.k = torch.nn.Conv2d(
|
| 192 |
+
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
| 193 |
+
)
|
| 194 |
+
self.v = torch.nn.Conv2d(
|
| 195 |
+
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
| 196 |
+
)
|
| 197 |
+
self.proj_out = torch.nn.Conv2d(
|
| 198 |
+
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
def forward(self, x):
|
| 202 |
+
h_ = x
|
| 203 |
+
h_ = self.norm(h_)
|
| 204 |
+
q = self.q(h_)
|
| 205 |
+
k = self.k(h_)
|
| 206 |
+
v = self.v(h_)
|
| 207 |
+
|
| 208 |
+
# compute attention
|
| 209 |
+
b, c, h, w = q.shape
|
| 210 |
+
q = rearrange(q, "b c h w -> b (h w) c")
|
| 211 |
+
k = rearrange(k, "b c h w -> b c (h w)")
|
| 212 |
+
w_ = torch.einsum("bij,bjk->bik", q, k)
|
| 213 |
+
|
| 214 |
+
w_ = w_ * (int(c) ** (-0.5))
|
| 215 |
+
w_ = torch.nn.functional.softmax(w_, dim=2)
|
| 216 |
+
|
| 217 |
+
# attend to values
|
| 218 |
+
v = rearrange(v, "b c h w -> b c (h w)")
|
| 219 |
+
w_ = rearrange(w_, "b i j -> b j i")
|
| 220 |
+
h_ = torch.einsum("bij,bjk->bik", v, w_)
|
| 221 |
+
h_ = rearrange(h_, "b c (h w) -> b c h w", h=h)
|
| 222 |
+
h_ = self.proj_out(h_)
|
| 223 |
+
|
| 224 |
+
return x + h_
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
class FlashCrossAttention(nn.Module):
|
| 228 |
+
def __init__(
|
| 229 |
+
self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, **kwargs
|
| 230 |
+
):
|
| 231 |
+
super().__init__()
|
| 232 |
+
print(
|
| 233 |
+
f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
|
| 234 |
+
f"{heads} heads with a dimension of {dim_head}."
|
| 235 |
+
)
|
| 236 |
+
self.dropout = dropout
|
| 237 |
+
self.query_dim = query_dim
|
| 238 |
+
self.context_dim = context_dim
|
| 239 |
+
self.heads = heads
|
| 240 |
+
self.dim_head = dim_head
|
| 241 |
+
|
| 242 |
+
inner_dim = dim_head * heads
|
| 243 |
+
context_dim = default(context_dim, query_dim)
|
| 244 |
+
|
| 245 |
+
self.scale = dim_head**-0.5
|
| 246 |
+
|
| 247 |
+
lora_params = get_lora_params(kwargs)
|
| 248 |
+
|
| 249 |
+
if lora_params["q_use_lora"]:
|
| 250 |
+
self.to_q = lora.Linear(
|
| 251 |
+
query_dim, inner_dim, r=lora_params["q_lora_r"], bias=False
|
| 252 |
+
)
|
| 253 |
+
else:
|
| 254 |
+
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
| 255 |
+
|
| 256 |
+
if lora_params["k_use_lora"]:
|
| 257 |
+
self.to_k = lora.Linear(
|
| 258 |
+
context_dim, inner_dim, r=lora_params["k_lora_r"], bias=False
|
| 259 |
+
)
|
| 260 |
+
else:
|
| 261 |
+
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
| 262 |
+
|
| 263 |
+
if lora_params["v_use_lora"]:
|
| 264 |
+
self.to_v = lora.Linear(
|
| 265 |
+
context_dim, inner_dim, r=lora_params["v_lora_r"], bias=False
|
| 266 |
+
)
|
| 267 |
+
else:
|
| 268 |
+
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
| 269 |
+
|
| 270 |
+
if lora_params["o_use_lora"]:
|
| 271 |
+
self.to_out = nn.Sequential(
|
| 272 |
+
lora.Linear(inner_dim, query_dim, r=lora_params["o_lora_r"]),
|
| 273 |
+
nn.Dropout(dropout),
|
| 274 |
+
)
|
| 275 |
+
else:
|
| 276 |
+
self.to_out = nn.Sequential(
|
| 277 |
+
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
def get_input(self, x, seqlen, batch_size, nheads, mask=None):
|
| 281 |
+
assert mask is None, "not implemented for mask with flash attention"
|
| 282 |
+
lengths = torch.ones([batch_size, 1], dtype=torch.int, device="cuda") * seqlen
|
| 283 |
+
attention_mask_bool = (
|
| 284 |
+
repeat(torch.arange(seqlen, device="cuda"), "s -> b s", b=batch_size)
|
| 285 |
+
< lengths
|
| 286 |
+
)
|
| 287 |
+
attention_mask = torch.zeros(
|
| 288 |
+
batch_size, seqlen, device="cuda", dtype=torch.float16
|
| 289 |
+
)
|
| 290 |
+
attention_mask[~attention_mask_bool] = -10000.0
|
| 291 |
+
attention_mask = rearrange(attention_mask, "b s -> b 1 1 s")
|
| 292 |
+
x_unpad, indices, cu_seqlens_x, max_seqlen_in_batch_x = unpad_input(
|
| 293 |
+
x, attention_mask_bool
|
| 294 |
+
)
|
| 295 |
+
x_unpad = rearrange(x_unpad, "nnz (h d) -> nnz h d", h=nheads)
|
| 296 |
+
return x_unpad.to(torch.float16), cu_seqlens_x, max_seqlen_in_batch_x
|
| 297 |
+
|
| 298 |
+
def forward(
|
| 299 |
+
self,
|
| 300 |
+
x,
|
| 301 |
+
context=None,
|
| 302 |
+
mask=None,
|
| 303 |
+
additional_tokens=None,
|
| 304 |
+
n_times_crossframe_attn_in_self=0,
|
| 305 |
+
):
|
| 306 |
+
if additional_tokens is not None:
|
| 307 |
+
# get the number of masked tokens at the beginning of the output sequence
|
| 308 |
+
n_tokens_to_mask = additional_tokens.shape[1]
|
| 309 |
+
# add additional token
|
| 310 |
+
x = torch.cat([additional_tokens, x], dim=1)
|
| 311 |
+
|
| 312 |
+
h = self.heads
|
| 313 |
+
q = self.to_q(x)
|
| 314 |
+
context = default(context, x)
|
| 315 |
+
k = self.to_k(context)
|
| 316 |
+
v = self.to_v(context)
|
| 317 |
+
|
| 318 |
+
if n_times_crossframe_attn_in_self:
|
| 319 |
+
# reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439
|
| 320 |
+
assert x.shape[0] % n_times_crossframe_attn_in_self == 0
|
| 321 |
+
# n_cp = x.shape[0]//n_times_crossframe_attn_in_self
|
| 322 |
+
k = repeat(
|
| 323 |
+
k[::n_times_crossframe_attn_in_self],
|
| 324 |
+
"b ... -> (b n) ...",
|
| 325 |
+
n=n_times_crossframe_attn_in_self,
|
| 326 |
+
)
|
| 327 |
+
v = repeat(
|
| 328 |
+
v[::n_times_crossframe_attn_in_self],
|
| 329 |
+
"b ... -> (b n) ...",
|
| 330 |
+
n=n_times_crossframe_attn_in_self,
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
b, seqlen_q = q.shape[0], q.shape[1]
|
| 334 |
+
seqlen_k = k.shape[1]
|
| 335 |
+
seqlen_v = v.shape[1]
|
| 336 |
+
q, cu_seqlens_q, max_seqlen_in_batch_q = self.get_input(q, seqlen_q, b, h)
|
| 337 |
+
k, cu_seqlens_k, max_seqlen_in_batch_k = self.get_input(k, seqlen_k, b, h)
|
| 338 |
+
v, cu_seqlens_v, max_seqlen_in_batch_v = self.get_input(v, seqlen_v, b, h)
|
| 339 |
+
|
| 340 |
+
if self.training:
|
| 341 |
+
dropout_p = self.dropout
|
| 342 |
+
else:
|
| 343 |
+
dropout_p = 0
|
| 344 |
+
|
| 345 |
+
out = flash_attn_unpadded_func(
|
| 346 |
+
q,
|
| 347 |
+
k,
|
| 348 |
+
v,
|
| 349 |
+
cu_seqlens_q,
|
| 350 |
+
cu_seqlens_k,
|
| 351 |
+
max_seqlen_in_batch_q,
|
| 352 |
+
max_seqlen_in_batch_k,
|
| 353 |
+
dropout_p,
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
out = rearrange(out, "(b n) h d -> b n (h d)", b=b, h=h)
|
| 357 |
+
out = out.to(context.dtype)
|
| 358 |
+
if additional_tokens is not None:
|
| 359 |
+
# remove additional token
|
| 360 |
+
out = out[:, n_tokens_to_mask:]
|
| 361 |
+
out = self.to_out(out)
|
| 362 |
+
return out
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class CrossAttention(nn.Module):
|
| 366 |
+
def __init__(
|
| 367 |
+
self,
|
| 368 |
+
query_dim,
|
| 369 |
+
context_dim=None,
|
| 370 |
+
heads=8,
|
| 371 |
+
dim_head=64,
|
| 372 |
+
dropout=0.0,
|
| 373 |
+
backend=None,
|
| 374 |
+
**kwargs,
|
| 375 |
+
):
|
| 376 |
+
super().__init__()
|
| 377 |
+
inner_dim = dim_head * heads
|
| 378 |
+
context_dim = default(context_dim, query_dim)
|
| 379 |
+
|
| 380 |
+
self.scale = dim_head**-0.5
|
| 381 |
+
self.heads = heads
|
| 382 |
+
|
| 383 |
+
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
| 384 |
+
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
| 385 |
+
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
| 386 |
+
self.to_out = nn.Sequential(
|
| 387 |
+
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
self.backend = backend
|
| 391 |
+
|
| 392 |
+
def forward(
|
| 393 |
+
self,
|
| 394 |
+
x,
|
| 395 |
+
context=None,
|
| 396 |
+
mask=None,
|
| 397 |
+
additional_tokens=None,
|
| 398 |
+
n_times_crossframe_attn_in_self=0,
|
| 399 |
+
):
|
| 400 |
+
h = self.heads
|
| 401 |
+
|
| 402 |
+
if additional_tokens is not None:
|
| 403 |
+
# get the number of masked tokens at the beginning of the output sequence
|
| 404 |
+
n_tokens_to_mask = additional_tokens.shape[1]
|
| 405 |
+
# add additional token
|
| 406 |
+
x = torch.cat([additional_tokens, x], dim=1)
|
| 407 |
+
|
| 408 |
+
q = self.to_q(x)
|
| 409 |
+
context = default(context, x)
|
| 410 |
+
context = context.to(self.to_k.weight.dtype)
|
| 411 |
+
k = self.to_k(context)
|
| 412 |
+
v = self.to_v(context)
|
| 413 |
+
|
| 414 |
+
if n_times_crossframe_attn_in_self:
|
| 415 |
+
# reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439
|
| 416 |
+
assert x.shape[0] % n_times_crossframe_attn_in_self == 0
|
| 417 |
+
n_cp = x.shape[0] // n_times_crossframe_attn_in_self
|
| 418 |
+
k = repeat(
|
| 419 |
+
k[::n_times_crossframe_attn_in_self], "b ... -> (b n) ...", n=n_cp
|
| 420 |
+
)
|
| 421 |
+
v = repeat(
|
| 422 |
+
v[::n_times_crossframe_attn_in_self], "b ... -> (b n) ...", n=n_cp
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v))
|
| 426 |
+
|
| 427 |
+
# old
|
| 428 |
+
"""
|
| 429 |
+
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
| 430 |
+
del q, k
|
| 431 |
+
|
| 432 |
+
if exists(mask):
|
| 433 |
+
mask = rearrange(mask, 'b ... -> b (...)')
|
| 434 |
+
max_neg_value = -torch.finfo(sim.dtype).max
|
| 435 |
+
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
| 436 |
+
sim.masked_fill_(~mask, max_neg_value)
|
| 437 |
+
|
| 438 |
+
# attention, what we cannot get enough of
|
| 439 |
+
sim = sim.softmax(dim=-1)
|
| 440 |
+
|
| 441 |
+
out = einsum('b i j, b j d -> b i d', sim, v)
|
| 442 |
+
"""
|
| 443 |
+
# new
|
| 444 |
+
with sdp_kernel(**BACKEND_MAP[self.backend]):
|
| 445 |
+
# print("dispatching into backend", self.backend, "q/k/v shape: ", q.shape, k.shape, v.shape)
|
| 446 |
+
out = F.scaled_dot_product_attention(
|
| 447 |
+
q, k, v, attn_mask=mask
|
| 448 |
+
) # scale is dim_head ** -0.5 per default
|
| 449 |
+
# if self.to_q.weight.dtype == torch.float16:
|
| 450 |
+
# q, k, v = q.to(torch.float32), k.to(torch.float32), v.to(torch.float32)
|
| 451 |
+
# elif self.to_q.weight.dtype == torch.bfloat16:
|
| 452 |
+
# q, k, v = (
|
| 453 |
+
# q.to(torch.bfloat16),
|
| 454 |
+
# k.to(torch.bfloat16),
|
| 455 |
+
# v.to(torch.bfloat16),
|
| 456 |
+
# )
|
| 457 |
+
# out = F.scaled_dot_product_attention(q, k, v, attn_mask=mask).to(
|
| 458 |
+
# self.to_q.weight.dtype
|
| 459 |
+
# )
|
| 460 |
+
|
| 461 |
+
del q, k, v
|
| 462 |
+
out = rearrange(out, "b h n d -> b n (h d)", h=h)
|
| 463 |
+
|
| 464 |
+
if additional_tokens is not None:
|
| 465 |
+
# remove additional token
|
| 466 |
+
out = out[:, n_tokens_to_mask:]
|
| 467 |
+
return self.to_out(out)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
class MemoryEfficientCrossAttention(nn.Module):
|
| 471 |
+
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
| 472 |
+
def __init__(
|
| 473 |
+
self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, **kwargs
|
| 474 |
+
):
|
| 475 |
+
super().__init__()
|
| 476 |
+
print(
|
| 477 |
+
f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
|
| 478 |
+
f"{heads} heads with a dimension of {dim_head}."
|
| 479 |
+
)
|
| 480 |
+
inner_dim = dim_head * heads
|
| 481 |
+
context_dim = default(context_dim, query_dim)
|
| 482 |
+
|
| 483 |
+
self.heads = heads
|
| 484 |
+
self.dim_head = dim_head
|
| 485 |
+
|
| 486 |
+
lora_params = get_lora_params(kwargs)
|
| 487 |
+
|
| 488 |
+
if lora_params["q_use_lora"]:
|
| 489 |
+
self.to_q = lora.Linear(
|
| 490 |
+
query_dim, inner_dim, r=lora_params["q_lora_r"], bias=False
|
| 491 |
+
)
|
| 492 |
+
else:
|
| 493 |
+
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
| 494 |
+
|
| 495 |
+
if lora_params["k_use_lora"]:
|
| 496 |
+
self.to_k = lora.Linear(
|
| 497 |
+
context_dim, inner_dim, r=lora_params["k_lora_r"], bias=False
|
| 498 |
+
)
|
| 499 |
+
else:
|
| 500 |
+
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
| 501 |
+
|
| 502 |
+
if lora_params["v_use_lora"]:
|
| 503 |
+
self.to_v = lora.Linear(
|
| 504 |
+
context_dim, inner_dim, r=lora_params["v_lora_r"], bias=False
|
| 505 |
+
)
|
| 506 |
+
else:
|
| 507 |
+
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
| 508 |
+
|
| 509 |
+
if lora_params["o_use_lora"]:
|
| 510 |
+
self.to_out = nn.Sequential(
|
| 511 |
+
lora.Linear(inner_dim, query_dim, r=lora_params["o_lora_r"]),
|
| 512 |
+
nn.Dropout(dropout),
|
| 513 |
+
)
|
| 514 |
+
else:
|
| 515 |
+
self.to_out = nn.Sequential(
|
| 516 |
+
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
self.attention_op: Optional[Any] = None
|
| 520 |
+
|
| 521 |
+
def forward(
|
| 522 |
+
self,
|
| 523 |
+
x,
|
| 524 |
+
context=None,
|
| 525 |
+
mask=None,
|
| 526 |
+
additional_tokens=None,
|
| 527 |
+
n_times_crossframe_attn_in_self=0,
|
| 528 |
+
):
|
| 529 |
+
if additional_tokens is not None:
|
| 530 |
+
# get the number of masked tokens at the beginning of the output sequence
|
| 531 |
+
n_tokens_to_mask = additional_tokens.shape[1]
|
| 532 |
+
# add additional token
|
| 533 |
+
x = torch.cat([additional_tokens, x], dim=1)
|
| 534 |
+
q = self.to_q(x)
|
| 535 |
+
context = default(context, x)
|
| 536 |
+
context = context.to(self.to_k.weight.dtype)
|
| 537 |
+
k = self.to_k(context)
|
| 538 |
+
v = self.to_v(context)
|
| 539 |
+
|
| 540 |
+
if n_times_crossframe_attn_in_self:
|
| 541 |
+
# reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439
|
| 542 |
+
assert x.shape[0] % n_times_crossframe_attn_in_self == 0
|
| 543 |
+
# n_cp = x.shape[0]//n_times_crossframe_attn_in_self
|
| 544 |
+
k = repeat(
|
| 545 |
+
k[::n_times_crossframe_attn_in_self],
|
| 546 |
+
"b ... -> (b n) ...",
|
| 547 |
+
n=n_times_crossframe_attn_in_self,
|
| 548 |
+
)
|
| 549 |
+
v = repeat(
|
| 550 |
+
v[::n_times_crossframe_attn_in_self],
|
| 551 |
+
"b ... -> (b n) ...",
|
| 552 |
+
n=n_times_crossframe_attn_in_self,
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
b, _, _ = q.shape
|
| 556 |
+
q, k, v = map(
|
| 557 |
+
lambda t: t.unsqueeze(3)
|
| 558 |
+
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
| 559 |
+
.permute(0, 2, 1, 3)
|
| 560 |
+
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
| 561 |
+
.contiguous(),
|
| 562 |
+
(q, k, v),
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
# actually compute the attention, what we cannot get enough of
|
| 566 |
+
# out = xformers.ops.memory_efficient_attention(
|
| 567 |
+
# q, k, v, attn_bias=None, op=self.attention_op
|
| 568 |
+
# )
|
| 569 |
+
|
| 570 |
+
with torch.autocast(enabled=False, device_type="cuda"):
|
| 571 |
+
if self.to_q.weight.dtype == torch.float16:
|
| 572 |
+
q, k, v = q.to(torch.float32), k.to(torch.float32), v.to(torch.float32)
|
| 573 |
+
elif self.to_q.weight.dtype == torch.bfloat16:
|
| 574 |
+
q, k, v = (
|
| 575 |
+
q.to(torch.bfloat16),
|
| 576 |
+
k.to(torch.bfloat16),
|
| 577 |
+
v.to(torch.bfloat16),
|
| 578 |
+
)
|
| 579 |
+
out = F.scaled_dot_product_attention(q, k, v, is_causal=False).to(
|
| 580 |
+
self.to_q.weight.dtype
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
# TODO: Use this directly in the attention operation, as a bias
|
| 584 |
+
if exists(mask):
|
| 585 |
+
raise NotImplementedError
|
| 586 |
+
out = (
|
| 587 |
+
out.unsqueeze(0)
|
| 588 |
+
.reshape(b, self.heads, out.shape[1], self.dim_head)
|
| 589 |
+
.permute(0, 2, 1, 3)
|
| 590 |
+
.reshape(b, out.shape[1], self.heads * self.dim_head)
|
| 591 |
+
)
|
| 592 |
+
if additional_tokens is not None:
|
| 593 |
+
# remove additional token
|
| 594 |
+
out = out[:, n_tokens_to_mask:]
|
| 595 |
+
return self.to_out(out)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
class BasicTransformerBlock(nn.Module):
|
| 599 |
+
ATTENTION_MODES = {
|
| 600 |
+
"softmax": CrossAttention, # vanilla attention
|
| 601 |
+
"flash": FlashCrossAttention, # flash attention
|
| 602 |
+
"softmax-xformers": MemoryEfficientCrossAttention, # ampere
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
def __init__(
|
| 606 |
+
self,
|
| 607 |
+
dim,
|
| 608 |
+
n_heads,
|
| 609 |
+
d_head,
|
| 610 |
+
dropout=0.0,
|
| 611 |
+
context_dim=None,
|
| 612 |
+
gated_ff=True,
|
| 613 |
+
checkpoint=True,
|
| 614 |
+
disable_self_attn=False,
|
| 615 |
+
flash_attention=False,
|
| 616 |
+
attn_mode="softmax",
|
| 617 |
+
sdp_backend=None,
|
| 618 |
+
**kwargs,
|
| 619 |
+
):
|
| 620 |
+
super().__init__()
|
| 621 |
+
assert attn_mode in self.ATTENTION_MODES
|
| 622 |
+
if use_flash_attention and flash_attention:
|
| 623 |
+
attn_mode = "flash"
|
| 624 |
+
else:
|
| 625 |
+
if attn_mode != "softmax" and not XFORMERS_IS_AVAILABLE:
|
| 626 |
+
print(
|
| 627 |
+
f"Attention mode '{attn_mode}' is not available. Falling back to native attention. "
|
| 628 |
+
f"This is not a problem in Pytorch >= 2.0. FYI, you are running with PyTorch version {torch.__version__}"
|
| 629 |
+
)
|
| 630 |
+
attn_mode = "softmax"
|
| 631 |
+
elif attn_mode == "softmax" and not SDP_IS_AVAILABLE:
|
| 632 |
+
print(
|
| 633 |
+
"We do not support vanilla attention anymore, as it is too expensive. Sorry."
|
| 634 |
+
)
|
| 635 |
+
if not XFORMERS_IS_AVAILABLE:
|
| 636 |
+
assert (
|
| 637 |
+
False
|
| 638 |
+
), "Please install xformers via e.g. 'pip install xformers==0.0.16'"
|
| 639 |
+
else:
|
| 640 |
+
print("Falling back to xformers efficient attention.")
|
| 641 |
+
attn_mode = "softmax-xformers"
|
| 642 |
+
attn_cls = self.ATTENTION_MODES[attn_mode]
|
| 643 |
+
if version.parse(torch.__version__) >= version.parse("2.0.0"):
|
| 644 |
+
assert sdp_backend is None or isinstance(sdp_backend, SDPBackend)
|
| 645 |
+
else:
|
| 646 |
+
assert sdp_backend is None
|
| 647 |
+
self.disable_self_attn = disable_self_attn
|
| 648 |
+
self.attn1 = attn_cls(
|
| 649 |
+
query_dim=dim,
|
| 650 |
+
heads=n_heads,
|
| 651 |
+
dim_head=d_head,
|
| 652 |
+
dropout=dropout,
|
| 653 |
+
context_dim=context_dim if self.disable_self_attn else None,
|
| 654 |
+
backend=sdp_backend,
|
| 655 |
+
**kwargs,
|
| 656 |
+
) # is a self-attention if not self.disable_self_attn
|
| 657 |
+
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
| 658 |
+
self.attn2 = attn_cls(
|
| 659 |
+
query_dim=dim,
|
| 660 |
+
context_dim=context_dim,
|
| 661 |
+
heads=n_heads,
|
| 662 |
+
dim_head=d_head,
|
| 663 |
+
dropout=dropout,
|
| 664 |
+
backend=sdp_backend,
|
| 665 |
+
**kwargs,
|
| 666 |
+
) # is self-attn if context is none
|
| 667 |
+
self.norm1 = nn.LayerNorm(dim)
|
| 668 |
+
self.norm2 = nn.LayerNorm(dim)
|
| 669 |
+
self.norm3 = nn.LayerNorm(dim)
|
| 670 |
+
self.checkpoint = checkpoint
|
| 671 |
+
if self.checkpoint:
|
| 672 |
+
print(f"{self.__class__.__name__} is using checkpointing")
|
| 673 |
+
|
| 674 |
+
def forward(
|
| 675 |
+
self, x, context=None, additional_tokens=None, n_times_crossframe_attn_in_self=0
|
| 676 |
+
):
|
| 677 |
+
kwargs = {"x": x}
|
| 678 |
+
|
| 679 |
+
if context is not None:
|
| 680 |
+
kwargs.update({"context": context})
|
| 681 |
+
|
| 682 |
+
if additional_tokens is not None:
|
| 683 |
+
kwargs.update({"additional_tokens": additional_tokens})
|
| 684 |
+
|
| 685 |
+
if n_times_crossframe_attn_in_self:
|
| 686 |
+
kwargs.update(
|
| 687 |
+
{"n_times_crossframe_attn_in_self": n_times_crossframe_attn_in_self}
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
# return mixed_checkpoint(self._forward, kwargs, self.parameters(), self.checkpoint)
|
| 691 |
+
return checkpoint(
|
| 692 |
+
self._forward, (x, context), self.parameters(), self.checkpoint
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
def _forward(
|
| 696 |
+
self, x, context=None, additional_tokens=None, n_times_crossframe_attn_in_self=0
|
| 697 |
+
):
|
| 698 |
+
x = (
|
| 699 |
+
self.attn1(
|
| 700 |
+
self.norm1(x),
|
| 701 |
+
context=context if self.disable_self_attn else None,
|
| 702 |
+
additional_tokens=additional_tokens,
|
| 703 |
+
n_times_crossframe_attn_in_self=n_times_crossframe_attn_in_self
|
| 704 |
+
if not self.disable_self_attn
|
| 705 |
+
else 0,
|
| 706 |
+
)
|
| 707 |
+
+ x
|
| 708 |
+
)
|
| 709 |
+
x = (
|
| 710 |
+
self.attn2(
|
| 711 |
+
self.norm2(x), context=context, additional_tokens=additional_tokens
|
| 712 |
+
)
|
| 713 |
+
+ x
|
| 714 |
+
)
|
| 715 |
+
x = self.ff(self.norm3(x)) + x
|
| 716 |
+
return x
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
class BasicTransformerSingleLayerBlock(nn.Module):
|
| 720 |
+
ATTENTION_MODES = {
|
| 721 |
+
"softmax": CrossAttention, # vanilla attention
|
| 722 |
+
# on the A100s not quite as fast as the above version
|
| 723 |
+
"softmax-xformers": MemoryEfficientCrossAttention
|
| 724 |
+
# (todo might depend on head_dim, check, falls back to semi-optimized kernels for dim!=[16,32,64,128])
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
def __init__(
|
| 728 |
+
self,
|
| 729 |
+
dim,
|
| 730 |
+
n_heads,
|
| 731 |
+
d_head,
|
| 732 |
+
dropout=0.0,
|
| 733 |
+
context_dim=None,
|
| 734 |
+
gated_ff=True,
|
| 735 |
+
checkpoint=True,
|
| 736 |
+
attn_mode="softmax-xformers",
|
| 737 |
+
):
|
| 738 |
+
super().__init__()
|
| 739 |
+
assert attn_mode in self.ATTENTION_MODES
|
| 740 |
+
attn_cls = self.ATTENTION_MODES[attn_mode]
|
| 741 |
+
self.attn1 = attn_cls(
|
| 742 |
+
query_dim=dim,
|
| 743 |
+
heads=n_heads,
|
| 744 |
+
dim_head=d_head,
|
| 745 |
+
dropout=dropout,
|
| 746 |
+
context_dim=context_dim,
|
| 747 |
+
)
|
| 748 |
+
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
| 749 |
+
self.norm1 = nn.LayerNorm(dim)
|
| 750 |
+
self.norm2 = nn.LayerNorm(dim)
|
| 751 |
+
self.checkpoint = checkpoint
|
| 752 |
+
|
| 753 |
+
def forward(self, x, context=None):
|
| 754 |
+
return checkpoint(
|
| 755 |
+
self._forward, (x, context), self.parameters(), self.checkpoint
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
def _forward(self, x, context=None):
|
| 759 |
+
x = self.attn1(self.norm1(x), context=context) + x
|
| 760 |
+
x = self.ff(self.norm2(x)) + x
|
| 761 |
+
return x
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
class SpatialTransformer(nn.Module):
|
| 765 |
+
"""
|
| 766 |
+
Transformer block for image-like data.
|
| 767 |
+
First, project the input (aka embedding)
|
| 768 |
+
and reshape to b, t, d.
|
| 769 |
+
Then apply standard transformer action.
|
| 770 |
+
Finally, reshape to image
|
| 771 |
+
NEW: use_linear for more efficiency instead of the 1x1 convs
|
| 772 |
+
"""
|
| 773 |
+
|
| 774 |
+
def __init__(
|
| 775 |
+
self,
|
| 776 |
+
in_channels,
|
| 777 |
+
n_heads,
|
| 778 |
+
d_head,
|
| 779 |
+
depth=1,
|
| 780 |
+
dropout=0.0,
|
| 781 |
+
context_dim=None,
|
| 782 |
+
disable_self_attn=False,
|
| 783 |
+
use_linear=False,
|
| 784 |
+
attn_type="softmax-xformers",
|
| 785 |
+
use_checkpoint=True,
|
| 786 |
+
sdp_backend=None,
|
| 787 |
+
**kwargs,
|
| 788 |
+
):
|
| 789 |
+
super().__init__()
|
| 790 |
+
print(
|
| 791 |
+
f"constructing {self.__class__.__name__} of depth {depth} w/ {in_channels} channels and {n_heads} heads"
|
| 792 |
+
)
|
| 793 |
+
from omegaconf import ListConfig
|
| 794 |
+
|
| 795 |
+
if exists(context_dim) and not isinstance(context_dim, (list, ListConfig)):
|
| 796 |
+
context_dim = [context_dim]
|
| 797 |
+
if exists(context_dim) and isinstance(context_dim, list):
|
| 798 |
+
if depth != len(context_dim):
|
| 799 |
+
print(
|
| 800 |
+
f"WARNING: {self.__class__.__name__}: Found context dims {context_dim} of depth {len(context_dim)}, "
|
| 801 |
+
f"which does not match the specified 'depth' of {depth}. Setting context_dim to {depth * [context_dim[0]]} now."
|
| 802 |
+
)
|
| 803 |
+
# depth does not match context dims.
|
| 804 |
+
assert all(
|
| 805 |
+
map(lambda x: x == context_dim[0], context_dim)
|
| 806 |
+
), "need homogenous context_dim to match depth automatically"
|
| 807 |
+
context_dim = depth * [context_dim[0]]
|
| 808 |
+
elif context_dim is None:
|
| 809 |
+
context_dim = [None] * depth
|
| 810 |
+
self.in_channels = in_channels
|
| 811 |
+
inner_dim = n_heads * d_head
|
| 812 |
+
self.norm = Normalize(in_channels)
|
| 813 |
+
if not use_linear:
|
| 814 |
+
self.proj_in = nn.Conv2d(
|
| 815 |
+
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
|
| 816 |
+
)
|
| 817 |
+
else:
|
| 818 |
+
self.proj_in = nn.Linear(in_channels, inner_dim)
|
| 819 |
+
|
| 820 |
+
disable_text_ca = kwargs.get("disable_text_ca", False)
|
| 821 |
+
self.disable_text_ca = disable_text_ca
|
| 822 |
+
if disable_text_ca:
|
| 823 |
+
self.transformer_blocks = nn.ModuleList(
|
| 824 |
+
[
|
| 825 |
+
# BasicTransformerBlock( # temporal transformer does not use flash attention
|
| 826 |
+
BasicTransformerSingleLayerBlock( # temporal transformer does not use flash attention
|
| 827 |
+
inner_dim,
|
| 828 |
+
n_heads,
|
| 829 |
+
d_head,
|
| 830 |
+
dropout=dropout,
|
| 831 |
+
# context_dim=context_dim[d],
|
| 832 |
+
context_dim=None,
|
| 833 |
+
attn_mode="softmax",
|
| 834 |
+
checkpoint=use_checkpoint,
|
| 835 |
+
)
|
| 836 |
+
for d in range(depth)
|
| 837 |
+
]
|
| 838 |
+
)
|
| 839 |
+
else:
|
| 840 |
+
self.transformer_blocks = nn.ModuleList(
|
| 841 |
+
[
|
| 842 |
+
BasicTransformerBlock(
|
| 843 |
+
inner_dim,
|
| 844 |
+
n_heads,
|
| 845 |
+
d_head,
|
| 846 |
+
dropout=dropout,
|
| 847 |
+
context_dim=context_dim[d],
|
| 848 |
+
disable_self_attn=disable_self_attn,
|
| 849 |
+
attn_mode=attn_type,
|
| 850 |
+
checkpoint=use_checkpoint,
|
| 851 |
+
sdp_backend=sdp_backend,
|
| 852 |
+
)
|
| 853 |
+
for d in range(depth)
|
| 854 |
+
]
|
| 855 |
+
)
|
| 856 |
+
if not use_linear:
|
| 857 |
+
self.proj_out = zero_module(
|
| 858 |
+
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
| 859 |
+
)
|
| 860 |
+
else:
|
| 861 |
+
# self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
|
| 862 |
+
self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))
|
| 863 |
+
self.use_linear = use_linear
|
| 864 |
+
|
| 865 |
+
def forward(self, x, context=None):
|
| 866 |
+
# note: if no context is given, cross-attention defaults to self-attention
|
| 867 |
+
if not isinstance(context, list):
|
| 868 |
+
context = [context]
|
| 869 |
+
b, c, h, w = x.shape
|
| 870 |
+
x_in = x
|
| 871 |
+
x = self.norm(x)
|
| 872 |
+
if not self.use_linear:
|
| 873 |
+
x = self.proj_in(x)
|
| 874 |
+
x = rearrange(x, "b c h w -> b (h w) c").contiguous()
|
| 875 |
+
if self.use_linear:
|
| 876 |
+
x = self.proj_in(x)
|
| 877 |
+
for i, block in enumerate(self.transformer_blocks):
|
| 878 |
+
if i > 0 and len(context) == 1:
|
| 879 |
+
i = 0 # use same context for each block
|
| 880 |
+
if self.disable_text_ca:
|
| 881 |
+
x = block(x, context=x)
|
| 882 |
+
else:
|
| 883 |
+
x = block(x, context=context[i])
|
| 884 |
+
if self.use_linear:
|
| 885 |
+
x = self.proj_out(x)
|
| 886 |
+
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous()
|
| 887 |
+
if not self.use_linear:
|
| 888 |
+
x = self.proj_out(x)
|
| 889 |
+
return x + x_in
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
class SpatialTransformerCA(SpatialTransformer):
|
| 893 |
+
"""
|
| 894 |
+
This is hacked from SpatialTransformer.
|
| 895 |
+
Conduct additional cross-attention with k,v from the reference feature.
|
| 896 |
+
Thus, the attention order is text cross-attention -> sptial self-attention -> reference cross-attention.
|
| 897 |
+
Note that if the reference feature is not given, this module is equivalent to SpatialTransformer.
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
Transformer block for image-like data.
|
| 901 |
+
First, project the input (aka embedding)
|
| 902 |
+
and reshape to b, t, d.
|
| 903 |
+
Then apply standard transformer action.
|
| 904 |
+
Finally, reshape to image
|
| 905 |
+
NEW: use_linear for more efficiency instead of the 1x1 convs
|
| 906 |
+
"""
|
| 907 |
+
|
| 908 |
+
def __init__(
|
| 909 |
+
self,
|
| 910 |
+
in_channels,
|
| 911 |
+
n_heads,
|
| 912 |
+
d_head,
|
| 913 |
+
depth=1,
|
| 914 |
+
dropout=0.0,
|
| 915 |
+
context_dim=None,
|
| 916 |
+
disable_self_attn=False,
|
| 917 |
+
use_linear=False,
|
| 918 |
+
attn_type="softmax-xformers",
|
| 919 |
+
use_checkpoint=True,
|
| 920 |
+
sdp_backend=None,
|
| 921 |
+
**kwargs,
|
| 922 |
+
):
|
| 923 |
+
super().__init__(
|
| 924 |
+
in_channels,
|
| 925 |
+
n_heads,
|
| 926 |
+
d_head,
|
| 927 |
+
depth,
|
| 928 |
+
dropout,
|
| 929 |
+
context_dim,
|
| 930 |
+
disable_self_attn,
|
| 931 |
+
use_linear,
|
| 932 |
+
attn_type,
|
| 933 |
+
use_checkpoint,
|
| 934 |
+
sdp_backend,
|
| 935 |
+
**kwargs,
|
| 936 |
+
)
|
| 937 |
+
inner_dim = n_heads * d_head
|
| 938 |
+
|
| 939 |
+
# temporal crossattention part
|
| 940 |
+
self.norm_ca = Normalize(in_channels)
|
| 941 |
+
if not use_linear:
|
| 942 |
+
self.proj_in_ca = nn.Conv2d(
|
| 943 |
+
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
|
| 944 |
+
)
|
| 945 |
+
else:
|
| 946 |
+
self.proj_in_ca = nn.Linear(in_channels, inner_dim)
|
| 947 |
+
self.transformer_blocks_ca = nn.ModuleList(
|
| 948 |
+
[
|
| 949 |
+
BasicTransformerSingleLayerBlock(
|
| 950 |
+
inner_dim,
|
| 951 |
+
n_heads,
|
| 952 |
+
d_head,
|
| 953 |
+
dropout=dropout,
|
| 954 |
+
context_dim=None,
|
| 955 |
+
attn_mode=attn_type,
|
| 956 |
+
checkpoint=use_checkpoint,
|
| 957 |
+
)
|
| 958 |
+
for d in range(depth)
|
| 959 |
+
]
|
| 960 |
+
)
|
| 961 |
+
if not use_linear:
|
| 962 |
+
self.proj_out_ca = zero_module(
|
| 963 |
+
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
| 964 |
+
)
|
| 965 |
+
else:
|
| 966 |
+
self.proj_out_ca = zero_module(nn.Linear(inner_dim, in_channels))
|
| 967 |
+
|
| 968 |
+
def forward(self, x, context=None):
|
| 969 |
+
x = super().forward(x, context)
|
| 970 |
+
|
| 971 |
+
assert hasattr(self, 'ref_control'), "must have ref_control"
|
| 972 |
+
ref_control = self.ref_control
|
| 973 |
+
|
| 974 |
+
b, c, h, w = x.shape
|
| 975 |
+
# cross-frame attention
|
| 976 |
+
# x = rearrange(x, "b c t h w -> (b t) c h w").contiguous()
|
| 977 |
+
x_in = x
|
| 978 |
+
x = self.norm_ca(x)
|
| 979 |
+
if not self.use_linear:
|
| 980 |
+
x = self.proj_in_ca(x)
|
| 981 |
+
x = rearrange(x, "b c h w -> b (h w) c").contiguous()
|
| 982 |
+
if self.use_linear:
|
| 983 |
+
x = self.proj_in_ca(x)
|
| 984 |
+
|
| 985 |
+
for i, block in enumerate(self.transformer_blocks_ca):
|
| 986 |
+
ref_control = rearrange(ref_control, "b c h w -> b (h w) c").contiguous()
|
| 987 |
+
context_texture = ref_control
|
| 988 |
+
x = block(x, context_texture)
|
| 989 |
+
|
| 990 |
+
if self.use_linear:
|
| 991 |
+
x = self.proj_out_ca(x)
|
| 992 |
+
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous()
|
| 993 |
+
if not self.use_linear:
|
| 994 |
+
x = self.proj_out_ca(x)
|
| 995 |
+
x = x + x_in
|
| 996 |
+
|
| 997 |
+
return x
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
class SpatialTransformer3D(nn.Module):
|
| 1001 |
+
"""
|
| 1002 |
+
This is hacked from the 2D version above.
|
| 1003 |
+
|
| 1004 |
+
Transformer block for video-like data.
|
| 1005 |
+
First, project the input (aka embedding)
|
| 1006 |
+
and reshape to b, t, d.
|
| 1007 |
+
Then apply standard transformer action.
|
| 1008 |
+
Finally, reshape to image
|
| 1009 |
+
NEW: use_linear for more efficiency instead of the 1x1 convs
|
| 1010 |
+
"""
|
| 1011 |
+
|
| 1012 |
+
def __init__(
|
| 1013 |
+
self,
|
| 1014 |
+
in_channels,
|
| 1015 |
+
n_heads,
|
| 1016 |
+
d_head,
|
| 1017 |
+
depth=1,
|
| 1018 |
+
dropout=0.0,
|
| 1019 |
+
context_dim=None,
|
| 1020 |
+
disable_self_attn=False,
|
| 1021 |
+
use_linear=False,
|
| 1022 |
+
attn_type="softmax-xformers",
|
| 1023 |
+
use_checkpoint=True,
|
| 1024 |
+
sdp_backend=None,
|
| 1025 |
+
**kwargs,
|
| 1026 |
+
):
|
| 1027 |
+
super().__init__()
|
| 1028 |
+
print(
|
| 1029 |
+
f"constructing {self.__class__.__name__} of depth {depth} w/ {in_channels} channels and {n_heads} heads"
|
| 1030 |
+
)
|
| 1031 |
+
from omegaconf import ListConfig
|
| 1032 |
+
|
| 1033 |
+
if exists(context_dim) and not isinstance(context_dim, (list, ListConfig)):
|
| 1034 |
+
context_dim = [context_dim]
|
| 1035 |
+
if exists(context_dim) and isinstance(context_dim, list):
|
| 1036 |
+
if depth != len(context_dim):
|
| 1037 |
+
print(
|
| 1038 |
+
f"WARNING: {self.__class__.__name__}: Found context dims {context_dim} of depth {len(context_dim)}, "
|
| 1039 |
+
f"which does not match the specified 'depth' of {depth}. Setting context_dim to {depth * [context_dim[0]]} now."
|
| 1040 |
+
)
|
| 1041 |
+
# depth does not match context dims.
|
| 1042 |
+
assert all(
|
| 1043 |
+
map(lambda x: x == context_dim[0], context_dim)
|
| 1044 |
+
), "need homogenous context_dim to match depth automatically"
|
| 1045 |
+
context_dim = depth * [context_dim[0]]
|
| 1046 |
+
elif context_dim is None:
|
| 1047 |
+
context_dim = [None] * depth
|
| 1048 |
+
self.in_channels = in_channels
|
| 1049 |
+
inner_dim = n_heads * d_head
|
| 1050 |
+
self.norm = Normalize(in_channels)
|
| 1051 |
+
if not use_linear:
|
| 1052 |
+
self.proj_in = nn.Conv2d(
|
| 1053 |
+
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
|
| 1054 |
+
)
|
| 1055 |
+
else:
|
| 1056 |
+
self.proj_in = nn.Linear(in_channels, inner_dim)
|
| 1057 |
+
|
| 1058 |
+
self.transformer_blocks = nn.ModuleList(
|
| 1059 |
+
[
|
| 1060 |
+
BasicTransformerBlock(
|
| 1061 |
+
inner_dim,
|
| 1062 |
+
n_heads,
|
| 1063 |
+
d_head,
|
| 1064 |
+
dropout=dropout,
|
| 1065 |
+
context_dim=context_dim[d],
|
| 1066 |
+
disable_self_attn=disable_self_attn,
|
| 1067 |
+
attn_mode=attn_type,
|
| 1068 |
+
checkpoint=use_checkpoint,
|
| 1069 |
+
sdp_backend=sdp_backend,
|
| 1070 |
+
flash_attention=True,
|
| 1071 |
+
**kwargs,
|
| 1072 |
+
)
|
| 1073 |
+
for d in range(depth)
|
| 1074 |
+
]
|
| 1075 |
+
)
|
| 1076 |
+
if not use_linear:
|
| 1077 |
+
self.proj_out = zero_module(
|
| 1078 |
+
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
| 1079 |
+
)
|
| 1080 |
+
else:
|
| 1081 |
+
self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))
|
| 1082 |
+
self.use_linear = use_linear
|
| 1083 |
+
|
| 1084 |
+
# temporal part
|
| 1085 |
+
self.norm_temporal = Normalize(in_channels)
|
| 1086 |
+
if not use_linear:
|
| 1087 |
+
self.proj_in_temporal = zero_module(
|
| 1088 |
+
nn.Conv1d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
|
| 1089 |
+
)
|
| 1090 |
+
else:
|
| 1091 |
+
self.proj_in_temporal = zero_module(nn.Linear(in_channels, inner_dim))
|
| 1092 |
+
disable_temporal_text_ca = kwargs.get("disable_temporal_text_ca", False)
|
| 1093 |
+
self.disable_temporal_text_ca = disable_temporal_text_ca
|
| 1094 |
+
if disable_temporal_text_ca:
|
| 1095 |
+
self.transformer_blocks_temporal = nn.ModuleList(
|
| 1096 |
+
[
|
| 1097 |
+
# BasicTransformerBlock( # temporal transformer does not use flash attention
|
| 1098 |
+
BasicTransformerSingleLayerBlock( # temporal transformer does not use flash attention
|
| 1099 |
+
inner_dim,
|
| 1100 |
+
n_heads,
|
| 1101 |
+
d_head,
|
| 1102 |
+
dropout=dropout,
|
| 1103 |
+
# context_dim=context_dim[d],
|
| 1104 |
+
context_dim=None,
|
| 1105 |
+
attn_mode="softmax",
|
| 1106 |
+
checkpoint=use_checkpoint,
|
| 1107 |
+
)
|
| 1108 |
+
for d in range(depth)
|
| 1109 |
+
]
|
| 1110 |
+
)
|
| 1111 |
+
else:
|
| 1112 |
+
self.transformer_blocks_temporal = nn.ModuleList(
|
| 1113 |
+
[
|
| 1114 |
+
BasicTransformerBlock( # temporal transformer does not use flash attention
|
| 1115 |
+
inner_dim,
|
| 1116 |
+
n_heads,
|
| 1117 |
+
d_head,
|
| 1118 |
+
dropout=dropout,
|
| 1119 |
+
context_dim=context_dim[d],
|
| 1120 |
+
disable_self_attn=disable_self_attn,
|
| 1121 |
+
attn_mode="softmax",
|
| 1122 |
+
checkpoint=use_checkpoint,
|
| 1123 |
+
sdp_backend=sdp_backend,
|
| 1124 |
+
)
|
| 1125 |
+
for d in range(depth)
|
| 1126 |
+
]
|
| 1127 |
+
)
|
| 1128 |
+
if not use_linear:
|
| 1129 |
+
self.proj_out_temporal = zero_module(
|
| 1130 |
+
nn.Conv1d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
| 1131 |
+
)
|
| 1132 |
+
else:
|
| 1133 |
+
self.proj_out_temporal = zero_module(nn.Linear(inner_dim, in_channels))
|
| 1134 |
+
|
| 1135 |
+
use_learnable_alpha = kwargs.get("use_learnable_alpha", False)
|
| 1136 |
+
if use_learnable_alpha:
|
| 1137 |
+
self.alpha_temporal = nn.Parameter(
|
| 1138 |
+
torch.ones(1)
|
| 1139 |
+
) # x = alpha * spatial + (1-alpha) * temporal
|
| 1140 |
+
|
| 1141 |
+
def forward(self, x, context=None):
|
| 1142 |
+
# note: if no context is given, cross-attention defaults to self-attention
|
| 1143 |
+
if not isinstance(context, list):
|
| 1144 |
+
context = [context]
|
| 1145 |
+
b, c, t, h, w = x.shape
|
| 1146 |
+
# spatial attention
|
| 1147 |
+
x = rearrange(x, "b c t h w -> (b t) c h w").contiguous()
|
| 1148 |
+
x_in = x
|
| 1149 |
+
x = self.norm(x)
|
| 1150 |
+
if not self.use_linear:
|
| 1151 |
+
x = self.proj_in(x)
|
| 1152 |
+
x = rearrange(x, "bt c h w -> bt (h w) c").contiguous()
|
| 1153 |
+
if self.use_linear:
|
| 1154 |
+
x = self.proj_in(x)
|
| 1155 |
+
|
| 1156 |
+
for i, block in enumerate(self.transformer_blocks):
|
| 1157 |
+
if i > 0 and len(context) == 1:
|
| 1158 |
+
i = 0
|
| 1159 |
+
context_i = (
|
| 1160 |
+
repeat(context[i], "b l c -> (b t) l c", t=t).contiguous()
|
| 1161 |
+
if context[i] is not None
|
| 1162 |
+
else None
|
| 1163 |
+
)
|
| 1164 |
+
x = block(x, context=context_i)
|
| 1165 |
+
if self.use_linear:
|
| 1166 |
+
x = self.proj_out(x)
|
| 1167 |
+
x = rearrange(x, "bt (h w) c -> bt c h w", h=h, w=w).contiguous()
|
| 1168 |
+
if not self.use_linear:
|
| 1169 |
+
x = self.proj_out(x)
|
| 1170 |
+
x = x + x_in
|
| 1171 |
+
|
| 1172 |
+
x = rearrange(x, "(b t) c h w -> (b h w) c t", t=t).contiguous()
|
| 1173 |
+
# temporal attention
|
| 1174 |
+
if hasattr(self, "norm_temporal"): # temporal operation exist
|
| 1175 |
+
x_in = x
|
| 1176 |
+
x = self.norm_temporal(x)
|
| 1177 |
+
if not self.use_linear:
|
| 1178 |
+
x = self.proj_in_temporal(x)
|
| 1179 |
+
x = rearrange(x, "bhw c t->bhw t c").contiguous()
|
| 1180 |
+
if self.use_linear:
|
| 1181 |
+
x = self.proj_in_temporal(x)
|
| 1182 |
+
for i, block in enumerate(self.transformer_blocks_temporal):
|
| 1183 |
+
if i > 0 and len(context) == 1:
|
| 1184 |
+
i = 0 # use same context for each block
|
| 1185 |
+
# if context[i] != None:
|
| 1186 |
+
context_i = (
|
| 1187 |
+
repeat(context[i], "b l c -> (b h w) l c", h=h, w=w).contiguous()
|
| 1188 |
+
if context[i] is not None
|
| 1189 |
+
else None
|
| 1190 |
+
)
|
| 1191 |
+
if self.disable_temporal_text_ca:
|
| 1192 |
+
x = block(x, context=x)
|
| 1193 |
+
else:
|
| 1194 |
+
x = block(x, context=context_i)
|
| 1195 |
+
|
| 1196 |
+
if self.use_linear:
|
| 1197 |
+
x = self.proj_out_temporal(x)
|
| 1198 |
+
x = rearrange(x, "bhw t c -> bhw c t").contiguous()
|
| 1199 |
+
if not self.use_linear:
|
| 1200 |
+
x = self.proj_out_temporal(x)
|
| 1201 |
+
if hasattr(self, "alpha_temporal"):
|
| 1202 |
+
x = self.alpha_temporal * x_in + (1 - self.alpha_temporal) * x
|
| 1203 |
+
else:
|
| 1204 |
+
x = x_in + x
|
| 1205 |
+
# x = x_in # ! DEBUG ONLY
|
| 1206 |
+
|
| 1207 |
+
x = rearrange(x, "(b h w) c t -> b c t h w", h=h, w=w).contiguous()
|
| 1208 |
+
return x
|
| 1209 |
+
|
| 1210 |
+
|
| 1211 |
+
class SpatialTransformer3DCA(SpatialTransformer3D):
|
| 1212 |
+
"""
|
| 1213 |
+
# -> SpatialTransformer3DCrossAttention
|
| 1214 |
+
# Replace the second temporal attention in SpatialTransformer3D with cross-attention
|
| 1215 |
+
# Original attention order:
|
| 1216 |
+
# 1. spatial self-attention
|
| 1217 |
+
# 2. cross-attention with text condition
|
| 1218 |
+
# 3. temporal self-attention (1d)
|
| 1219 |
+
# 4. cross-attention with text condition
|
| 1220 |
+
# Attention order:
|
| 1221 |
+
# 1. spatial self-attention
|
| 1222 |
+
# 2. cross-attention with text condition
|
| 1223 |
+
# 3. temporal self-attention (1d)
|
| 1224 |
+
# 4. cross-attention with text condition (maybe not necessary, but... nevermind)
|
| 1225 |
+
# 5. cross-attention with anchor frame (usually center frame, or reference image from outside)
|
| 1226 |
+
|
| 1227 |
+
This is hacked from the 2D version above.
|
| 1228 |
+
|
| 1229 |
+
Transformer block for video-like data.
|
| 1230 |
+
First, project the input (aka embedding)
|
| 1231 |
+
and reshape to b, t, d.
|
| 1232 |
+
Then apply standard transformer action.
|
| 1233 |
+
Finally, reshape to image
|
| 1234 |
+
NEW: use_linear for more efficiency instead of the 1x1 convs
|
| 1235 |
+
"""
|
| 1236 |
+
|
| 1237 |
+
def __init__(
|
| 1238 |
+
self,
|
| 1239 |
+
in_channels,
|
| 1240 |
+
n_heads,
|
| 1241 |
+
d_head,
|
| 1242 |
+
depth=1,
|
| 1243 |
+
dropout=0.0,
|
| 1244 |
+
context_dim=None,
|
| 1245 |
+
disable_self_attn=False,
|
| 1246 |
+
use_linear=False,
|
| 1247 |
+
attn_type="softmax-xformers",
|
| 1248 |
+
use_checkpoint=True,
|
| 1249 |
+
sdp_backend=None,
|
| 1250 |
+
**kwargs,
|
| 1251 |
+
):
|
| 1252 |
+
# super().__init__(**kwargs)
|
| 1253 |
+
super().__init__(
|
| 1254 |
+
in_channels,
|
| 1255 |
+
n_heads,
|
| 1256 |
+
d_head,
|
| 1257 |
+
depth=depth,
|
| 1258 |
+
dropout=dropout,
|
| 1259 |
+
context_dim=context_dim,
|
| 1260 |
+
disable_self_attn=disable_self_attn,
|
| 1261 |
+
use_linear=use_linear,
|
| 1262 |
+
attn_type=attn_type,
|
| 1263 |
+
use_checkpoint=use_checkpoint,
|
| 1264 |
+
sdp_backend=sdp_backend,
|
| 1265 |
+
**kwargs,
|
| 1266 |
+
)
|
| 1267 |
+
|
| 1268 |
+
inner_dim = n_heads * d_head
|
| 1269 |
+
|
| 1270 |
+
# temporal crossattention part
|
| 1271 |
+
self.norm_temporal_ca = Normalize(in_channels)
|
| 1272 |
+
if not use_linear:
|
| 1273 |
+
self.proj_in_temporal_ca = nn.Conv2d(
|
| 1274 |
+
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
|
| 1275 |
+
)
|
| 1276 |
+
else:
|
| 1277 |
+
self.proj_in_temporal_ca = nn.Linear(in_channels, inner_dim)
|
| 1278 |
+
self.transformer_blocks_temporal_ca = nn.ModuleList(
|
| 1279 |
+
[
|
| 1280 |
+
BasicTransformerSingleLayerBlock(
|
| 1281 |
+
inner_dim,
|
| 1282 |
+
n_heads,
|
| 1283 |
+
d_head,
|
| 1284 |
+
dropout=dropout,
|
| 1285 |
+
context_dim=None,
|
| 1286 |
+
attn_mode=attn_type,
|
| 1287 |
+
checkpoint=use_checkpoint,
|
| 1288 |
+
)
|
| 1289 |
+
for d in range(depth)
|
| 1290 |
+
]
|
| 1291 |
+
)
|
| 1292 |
+
if not use_linear:
|
| 1293 |
+
self.proj_out_temporal_ca = zero_module(
|
| 1294 |
+
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
| 1295 |
+
)
|
| 1296 |
+
else:
|
| 1297 |
+
self.proj_out_temporal_ca = zero_module(nn.Linear(inner_dim, in_channels))
|
| 1298 |
+
|
| 1299 |
+
self.ST3DCA_ca_type = kwargs.get("ST3DCA_ca_type", "center")
|
| 1300 |
+
assert self.ST3DCA_ca_type in ["center", 'self', 'center_self']
|
| 1301 |
+
|
| 1302 |
+
def forward(self, x, context=None):
|
| 1303 |
+
x = super().forward(x, context)
|
| 1304 |
+
|
| 1305 |
+
# note: if no context is given, cross-attention defaults to self-attention
|
| 1306 |
+
if not isinstance(context, list):
|
| 1307 |
+
context = [context]
|
| 1308 |
+
b, c, t, h, w = x.shape
|
| 1309 |
+
# cross-frame attention
|
| 1310 |
+
x = rearrange(x, "b c t h w -> (b t) c h w").contiguous()
|
| 1311 |
+
x_in = x
|
| 1312 |
+
x = self.norm_temporal_ca(x)
|
| 1313 |
+
if not self.use_linear:
|
| 1314 |
+
x = self.proj_in_temporal_ca(x)
|
| 1315 |
+
x = rearrange(x, "bt c h w -> bt (h w) c").contiguous()
|
| 1316 |
+
if self.use_linear:
|
| 1317 |
+
x = self.proj_in_temporal_ca(x)
|
| 1318 |
+
|
| 1319 |
+
for i, block in enumerate(self.transformer_blocks_temporal_ca):
|
| 1320 |
+
if i > 0 and len(context) == 1:
|
| 1321 |
+
i = 0
|
| 1322 |
+
# # center frame as anchor
|
| 1323 |
+
x = rearrange(x, "(b t) hw c -> b t hw c", b=b).contiguous()
|
| 1324 |
+
attn_anchor_frame_idx = t // 2 # center frame
|
| 1325 |
+
anchor_frame = x[:, attn_anchor_frame_idx, :, :].contiguous()
|
| 1326 |
+
anchor_frame = repeat(anchor_frame, "b hw c -> b t hw c", t=t).contiguous()
|
| 1327 |
+
anchor_frame = rearrange(anchor_frame, "b t hw c -> (b t) hw c").contiguous()
|
| 1328 |
+
context_texture = anchor_frame
|
| 1329 |
+
x = rearrange(x, "b t hw c -> (b t) hw c", b=b).contiguous()
|
| 1330 |
+
if self.ST3DCA_ca_type == 'center':
|
| 1331 |
+
x = block(x, context=context_texture)
|
| 1332 |
+
elif self.ST3DCA_ca_type == 'self':
|
| 1333 |
+
x = block(x, context=x)
|
| 1334 |
+
elif self.ST3DCA_ca_type == 'center_self':
|
| 1335 |
+
context_texture = torch.cat([context_texture, x], dim=1)
|
| 1336 |
+
x = block(x, context=context_texture)
|
| 1337 |
+
else:
|
| 1338 |
+
raise NotImplementedError
|
| 1339 |
+
# x = block(x, context_texture)
|
| 1340 |
+
|
| 1341 |
+
if self.use_linear:
|
| 1342 |
+
x = self.proj_out_temporal_ca(x)
|
| 1343 |
+
x = rearrange(x, "bt (h w) c -> bt c h w", h=h, w=w).contiguous()
|
| 1344 |
+
if not self.use_linear:
|
| 1345 |
+
x = self.proj_out_temporal_ca(x)
|
| 1346 |
+
x = x + x_in
|
| 1347 |
+
|
| 1348 |
+
x = rearrange(x, "(b t) c h w -> b c t h w", b=b, t=t).contiguous()
|
| 1349 |
+
|
| 1350 |
+
return x
|
| 1351 |
+
|
| 1352 |
+
|
| 1353 |
+
def benchmark_attn():
|
| 1354 |
+
# Lets define a helpful benchmarking function:
|
| 1355 |
+
# https://pytorch.org/tutorials/intermediate/scaled_dot_product_attention_tutorial.html
|
| 1356 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 1357 |
+
import torch.nn.functional as F
|
| 1358 |
+
import torch.utils.benchmark as benchmark
|
| 1359 |
+
|
| 1360 |
+
def benchmark_torch_function_in_microseconds(f, *args, **kwargs):
|
| 1361 |
+
t0 = benchmark.Timer(
|
| 1362 |
+
stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}
|
| 1363 |
+
)
|
| 1364 |
+
return t0.blocked_autorange().mean * 1e6
|
| 1365 |
+
|
| 1366 |
+
# Lets define the hyper-parameters of our input
|
| 1367 |
+
batch_size = 32
|
| 1368 |
+
max_sequence_len = 1024
|
| 1369 |
+
num_heads = 32
|
| 1370 |
+
embed_dimension = 32
|
| 1371 |
+
|
| 1372 |
+
dtype = torch.float16
|
| 1373 |
+
|
| 1374 |
+
query = torch.rand(
|
| 1375 |
+
batch_size,
|
| 1376 |
+
num_heads,
|
| 1377 |
+
max_sequence_len,
|
| 1378 |
+
embed_dimension,
|
| 1379 |
+
device=device,
|
| 1380 |
+
dtype=dtype,
|
| 1381 |
+
)
|
| 1382 |
+
key = torch.rand(
|
| 1383 |
+
batch_size,
|
| 1384 |
+
num_heads,
|
| 1385 |
+
max_sequence_len,
|
| 1386 |
+
embed_dimension,
|
| 1387 |
+
device=device,
|
| 1388 |
+
dtype=dtype,
|
| 1389 |
+
)
|
| 1390 |
+
value = torch.rand(
|
| 1391 |
+
batch_size,
|
| 1392 |
+
num_heads,
|
| 1393 |
+
max_sequence_len,
|
| 1394 |
+
embed_dimension,
|
| 1395 |
+
device=device,
|
| 1396 |
+
dtype=dtype,
|
| 1397 |
+
)
|
| 1398 |
+
|
| 1399 |
+
print(f"q/k/v shape:", query.shape, key.shape, value.shape)
|
| 1400 |
+
|
| 1401 |
+
# Lets explore the speed of each of the 3 implementations
|
| 1402 |
+
from torch.backends.cuda import SDPBackend, sdp_kernel
|
| 1403 |
+
|
| 1404 |
+
# Helpful arguments mapper
|
| 1405 |
+
backend_map = {
|
| 1406 |
+
SDPBackend.MATH: {
|
| 1407 |
+
"enable_math": True,
|
| 1408 |
+
"enable_flash": False,
|
| 1409 |
+
"enable_mem_efficient": False,
|
| 1410 |
+
},
|
| 1411 |
+
SDPBackend.FLASH_ATTENTION: {
|
| 1412 |
+
"enable_math": False,
|
| 1413 |
+
"enable_flash": True,
|
| 1414 |
+
"enable_mem_efficient": False,
|
| 1415 |
+
},
|
| 1416 |
+
SDPBackend.EFFICIENT_ATTENTION: {
|
| 1417 |
+
"enable_math": False,
|
| 1418 |
+
"enable_flash": False,
|
| 1419 |
+
"enable_mem_efficient": True,
|
| 1420 |
+
},
|
| 1421 |
+
}
|
| 1422 |
+
|
| 1423 |
+
from torch.profiler import ProfilerActivity, profile, record_function
|
| 1424 |
+
|
| 1425 |
+
activities = [ProfilerActivity.CPU, ProfilerActivity.CUDA]
|
| 1426 |
+
|
| 1427 |
+
print(
|
| 1428 |
+
f"The default implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds"
|
| 1429 |
+
)
|
| 1430 |
+
with profile(
|
| 1431 |
+
activities=activities, record_shapes=False, profile_memory=True
|
| 1432 |
+
) as prof:
|
| 1433 |
+
with record_function("Default detailed stats"):
|
| 1434 |
+
for _ in range(25):
|
| 1435 |
+
o = F.scaled_dot_product_attention(query, key, value)
|
| 1436 |
+
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
|
| 1437 |
+
|
| 1438 |
+
print(
|
| 1439 |
+
f"The math implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds"
|
| 1440 |
+
)
|
| 1441 |
+
with sdp_kernel(**backend_map[SDPBackend.MATH]):
|
| 1442 |
+
with profile(
|
| 1443 |
+
activities=activities, record_shapes=False, profile_memory=True
|
| 1444 |
+
) as prof:
|
| 1445 |
+
with record_function("Math implmentation stats"):
|
| 1446 |
+
for _ in range(25):
|
| 1447 |
+
o = F.scaled_dot_product_attention(query, key, value)
|
| 1448 |
+
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
|
| 1449 |
+
|
| 1450 |
+
with sdp_kernel(**backend_map[SDPBackend.FLASH_ATTENTION]):
|
| 1451 |
+
try:
|
| 1452 |
+
print(
|
| 1453 |
+
f"The flash attention implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds"
|
| 1454 |
+
)
|
| 1455 |
+
except RuntimeError:
|
| 1456 |
+
print("FlashAttention is not supported. See warnings for reasons.")
|
| 1457 |
+
with profile(
|
| 1458 |
+
activities=activities, record_shapes=False, profile_memory=True
|
| 1459 |
+
) as prof:
|
| 1460 |
+
with record_function("FlashAttention stats"):
|
| 1461 |
+
for _ in range(25):
|
| 1462 |
+
o = F.scaled_dot_product_attention(query, key, value)
|
| 1463 |
+
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
|
| 1464 |
+
|
| 1465 |
+
with sdp_kernel(**backend_map[SDPBackend.EFFICIENT_ATTENTION]):
|
| 1466 |
+
try:
|
| 1467 |
+
print(
|
| 1468 |
+
f"The memory efficient implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds"
|
| 1469 |
+
)
|
| 1470 |
+
except RuntimeError:
|
| 1471 |
+
print("EfficientAttention is not supported. See warnings for reasons.")
|
| 1472 |
+
with profile(
|
| 1473 |
+
activities=activities, record_shapes=False, profile_memory=True
|
| 1474 |
+
) as prof:
|
| 1475 |
+
with record_function("EfficientAttention stats"):
|
| 1476 |
+
for _ in range(25):
|
| 1477 |
+
o = F.scaled_dot_product_attention(query, key, value)
|
| 1478 |
+
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
|
| 1479 |
+
|
| 1480 |
+
|
| 1481 |
+
def run_model(model, x, context):
|
| 1482 |
+
return model(x, context)
|
| 1483 |
+
|
| 1484 |
+
|
| 1485 |
+
def benchmark_transformer_blocks():
|
| 1486 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 1487 |
+
import torch.utils.benchmark as benchmark
|
| 1488 |
+
|
| 1489 |
+
def benchmark_torch_function_in_microseconds(f, *args, **kwargs):
|
| 1490 |
+
t0 = benchmark.Timer(
|
| 1491 |
+
stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}
|
| 1492 |
+
)
|
| 1493 |
+
return t0.blocked_autorange().mean * 1e6
|
| 1494 |
+
|
| 1495 |
+
checkpoint = True
|
| 1496 |
+
compile = False
|
| 1497 |
+
|
| 1498 |
+
batch_size = 32
|
| 1499 |
+
h, w = 64, 64
|
| 1500 |
+
context_len = 1024
|
| 1501 |
+
embed_dimension = 1024
|
| 1502 |
+
context_dim = 1024
|
| 1503 |
+
d_head = 64
|
| 1504 |
+
|
| 1505 |
+
transformer_depth = 4
|
| 1506 |
+
|
| 1507 |
+
n_heads = embed_dimension // d_head
|
| 1508 |
+
|
| 1509 |
+
dtype = torch.float16
|
| 1510 |
+
|
| 1511 |
+
model_native = SpatialTransformer(
|
| 1512 |
+
embed_dimension,
|
| 1513 |
+
n_heads,
|
| 1514 |
+
d_head,
|
| 1515 |
+
context_dim=context_dim,
|
| 1516 |
+
use_linear=True,
|
| 1517 |
+
use_checkpoint=checkpoint,
|
| 1518 |
+
attn_type="softmax",
|
| 1519 |
+
depth=transformer_depth,
|
| 1520 |
+
sdp_backend=SDPBackend.FLASH_ATTENTION,
|
| 1521 |
+
).to(device)
|
| 1522 |
+
model_efficient_attn = SpatialTransformer(
|
| 1523 |
+
embed_dimension,
|
| 1524 |
+
n_heads,
|
| 1525 |
+
d_head,
|
| 1526 |
+
context_dim=context_dim,
|
| 1527 |
+
use_linear=True,
|
| 1528 |
+
depth=transformer_depth,
|
| 1529 |
+
use_checkpoint=checkpoint,
|
| 1530 |
+
attn_type="softmax-xformers",
|
| 1531 |
+
).to(device)
|
| 1532 |
+
if not checkpoint and compile:
|
| 1533 |
+
print("compiling models")
|
| 1534 |
+
model_native = torch.compile(model_native)
|
| 1535 |
+
model_efficient_attn = torch.compile(model_efficient_attn)
|
| 1536 |
+
|
| 1537 |
+
x = torch.rand(batch_size, embed_dimension, h, w, device=device, dtype=dtype)
|
| 1538 |
+
c = torch.rand(batch_size, context_len, context_dim, device=device, dtype=dtype)
|
| 1539 |
+
|
| 1540 |
+
from torch.profiler import ProfilerActivity, profile, record_function
|
| 1541 |
+
|
| 1542 |
+
activities = [ProfilerActivity.CPU, ProfilerActivity.CUDA]
|
| 1543 |
+
|
| 1544 |
+
with torch.autocast("cuda"):
|
| 1545 |
+
print(
|
| 1546 |
+
f"The native model runs in {benchmark_torch_function_in_microseconds(model_native.forward, x, c):.3f} microseconds"
|
| 1547 |
+
)
|
| 1548 |
+
print(
|
| 1549 |
+
f"The efficientattn model runs in {benchmark_torch_function_in_microseconds(model_efficient_attn.forward, x, c):.3f} microseconds"
|
| 1550 |
+
)
|
| 1551 |
+
|
| 1552 |
+
print(75 * "+")
|
| 1553 |
+
print("NATIVE")
|
| 1554 |
+
print(75 * "+")
|
| 1555 |
+
torch.cuda.reset_peak_memory_stats()
|
| 1556 |
+
with profile(
|
| 1557 |
+
activities=activities, record_shapes=False, profile_memory=True
|
| 1558 |
+
) as prof:
|
| 1559 |
+
with record_function("NativeAttention stats"):
|
| 1560 |
+
for _ in range(25):
|
| 1561 |
+
model_native(x, c)
|
| 1562 |
+
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
|
| 1563 |
+
print(torch.cuda.max_memory_allocated() * 1e-9, "GB used by native block")
|
| 1564 |
+
|
| 1565 |
+
print(75 * "+")
|
| 1566 |
+
print("Xformers")
|
| 1567 |
+
print(75 * "+")
|
| 1568 |
+
torch.cuda.reset_peak_memory_stats()
|
| 1569 |
+
with profile(
|
| 1570 |
+
activities=activities, record_shapes=False, profile_memory=True
|
| 1571 |
+
) as prof:
|
| 1572 |
+
with record_function("xformers stats"):
|
| 1573 |
+
for _ in range(25):
|
| 1574 |
+
model_efficient_attn(x, c)
|
| 1575 |
+
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
|
| 1576 |
+
print(torch.cuda.max_memory_allocated() * 1e-9, "GB used by xformers block")
|
| 1577 |
+
|
| 1578 |
+
|
| 1579 |
+
def test01():
|
| 1580 |
+
# conv1x1 vs linear
|
| 1581 |
+
from ..util import count_params
|
| 1582 |
+
|
| 1583 |
+
conv = nn.Conv2d(3, 32, kernel_size=1).cuda()
|
| 1584 |
+
print(count_params(conv))
|
| 1585 |
+
linear = torch.nn.Linear(3, 32).cuda()
|
| 1586 |
+
print(count_params(linear))
|
| 1587 |
+
|
| 1588 |
+
print(conv.weight.shape)
|
| 1589 |
+
|
| 1590 |
+
# use same initialization
|
| 1591 |
+
linear.weight = torch.nn.Parameter(conv.weight.squeeze(-1).squeeze(-1))
|
| 1592 |
+
linear.bias = torch.nn.Parameter(conv.bias)
|
| 1593 |
+
|
| 1594 |
+
print(linear.weight.shape)
|
| 1595 |
+
|
| 1596 |
+
x = torch.randn(11, 3, 64, 64).cuda()
|
| 1597 |
+
|
| 1598 |
+
xr = rearrange(x, "b c h w -> b (h w) c").contiguous()
|
| 1599 |
+
print(xr.shape)
|
| 1600 |
+
out_linear = linear(xr)
|
| 1601 |
+
print(out_linear.mean(), out_linear.shape)
|
| 1602 |
+
|
| 1603 |
+
out_conv = conv(x)
|
| 1604 |
+
print(out_conv.mean(), out_conv.shape)
|
| 1605 |
+
print("done with test01.\n")
|
| 1606 |
+
|
| 1607 |
+
|
| 1608 |
+
def test02():
|
| 1609 |
+
# try cosine flash attention
|
| 1610 |
+
import time
|
| 1611 |
+
|
| 1612 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 1613 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 1614 |
+
torch.backends.cudnn.benchmark = True
|
| 1615 |
+
print("testing cosine flash attention...")
|
| 1616 |
+
DIM = 1024
|
| 1617 |
+
SEQLEN = 4096
|
| 1618 |
+
BS = 16
|
| 1619 |
+
|
| 1620 |
+
print(" softmax (vanilla) first...")
|
| 1621 |
+
model = BasicTransformerBlock(
|
| 1622 |
+
dim=DIM,
|
| 1623 |
+
n_heads=16,
|
| 1624 |
+
d_head=64,
|
| 1625 |
+
dropout=0.0,
|
| 1626 |
+
context_dim=None,
|
| 1627 |
+
attn_mode="softmax",
|
| 1628 |
+
).cuda()
|
| 1629 |
+
try:
|
| 1630 |
+
x = torch.randn(BS, SEQLEN, DIM).cuda()
|
| 1631 |
+
tic = time.time()
|
| 1632 |
+
y = model(x)
|
| 1633 |
+
toc = time.time()
|
| 1634 |
+
print(y.shape, toc - tic)
|
| 1635 |
+
except RuntimeError as e:
|
| 1636 |
+
# likely oom
|
| 1637 |
+
print(str(e))
|
| 1638 |
+
|
| 1639 |
+
print("\n now softmax-xformer ...")
|
| 1640 |
+
model = BasicTransformerBlock(
|
| 1641 |
+
dim=DIM,
|
| 1642 |
+
n_heads=16,
|
| 1643 |
+
d_head=64,
|
| 1644 |
+
dropout=0.0,
|
| 1645 |
+
context_dim=None,
|
| 1646 |
+
attn_mode="softmax-xformers",
|
| 1647 |
+
).cuda()
|
| 1648 |
+
x = torch.randn(BS, SEQLEN, DIM).cuda()
|
| 1649 |
+
tic = time.time()
|
| 1650 |
+
y = model(x)
|
| 1651 |
+
toc = time.time()
|
| 1652 |
+
print(y.shape, toc - tic)
|
| 1653 |
+
print("done with test02.\n")
|
| 1654 |
+
|
| 1655 |
+
|
| 1656 |
+
if __name__ == "__main__":
|
| 1657 |
+
test01()
|
| 1658 |
+
test02()
|
| 1659 |
+
|
| 1660 |
+
benchmark_attn()
|
| 1661 |
+
# benchmark_transformer_blocks()
|
| 1662 |
+
|
| 1663 |
+
print("done.")
|
CCEdit-main/sgm/modules/autoencoding/__init__.py
ADDED
|
File without changes
|
CCEdit-main/sgm/modules/autoencoding/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (157 Bytes). View file
|
|
|
CCEdit-main/sgm/modules/autoencoding/losses/__init__.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
|
| 7 |
+
from taming.modules.losses.lpips import LPIPS
|
| 8 |
+
from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss
|
| 9 |
+
|
| 10 |
+
from ....util import default, instantiate_from_config
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def adopt_weight(weight, global_step, threshold=0, value=0.0):
|
| 14 |
+
if global_step < threshold:
|
| 15 |
+
weight = value
|
| 16 |
+
return weight
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class LatentLPIPS(nn.Module):
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
decoder_config,
|
| 23 |
+
perceptual_weight=1.0,
|
| 24 |
+
latent_weight=1.0,
|
| 25 |
+
scale_input_to_tgt_size=False,
|
| 26 |
+
scale_tgt_to_input_size=False,
|
| 27 |
+
perceptual_weight_on_inputs=0.0,
|
| 28 |
+
):
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.scale_input_to_tgt_size = scale_input_to_tgt_size
|
| 31 |
+
self.scale_tgt_to_input_size = scale_tgt_to_input_size
|
| 32 |
+
self.init_decoder(decoder_config)
|
| 33 |
+
self.perceptual_loss = LPIPS().eval()
|
| 34 |
+
self.perceptual_weight = perceptual_weight
|
| 35 |
+
self.latent_weight = latent_weight
|
| 36 |
+
self.perceptual_weight_on_inputs = perceptual_weight_on_inputs
|
| 37 |
+
|
| 38 |
+
def init_decoder(self, config):
|
| 39 |
+
self.decoder = instantiate_from_config(config)
|
| 40 |
+
if hasattr(self.decoder, "encoder"):
|
| 41 |
+
del self.decoder.encoder
|
| 42 |
+
|
| 43 |
+
def forward(self, latent_inputs, latent_predictions, image_inputs, split="train"):
|
| 44 |
+
log = dict()
|
| 45 |
+
loss = (latent_inputs - latent_predictions) ** 2
|
| 46 |
+
log[f"{split}/latent_l2_loss"] = loss.mean().detach()
|
| 47 |
+
image_reconstructions = None
|
| 48 |
+
if self.perceptual_weight > 0.0:
|
| 49 |
+
image_reconstructions = self.decoder.decode(latent_predictions)
|
| 50 |
+
image_targets = self.decoder.decode(latent_inputs)
|
| 51 |
+
perceptual_loss = self.perceptual_loss(
|
| 52 |
+
image_targets.contiguous(), image_reconstructions.contiguous()
|
| 53 |
+
)
|
| 54 |
+
loss = (
|
| 55 |
+
self.latent_weight * loss.mean()
|
| 56 |
+
+ self.perceptual_weight * perceptual_loss.mean()
|
| 57 |
+
)
|
| 58 |
+
log[f"{split}/perceptual_loss"] = perceptual_loss.mean().detach()
|
| 59 |
+
|
| 60 |
+
if self.perceptual_weight_on_inputs > 0.0:
|
| 61 |
+
image_reconstructions = default(
|
| 62 |
+
image_reconstructions, self.decoder.decode(latent_predictions)
|
| 63 |
+
)
|
| 64 |
+
if self.scale_input_to_tgt_size:
|
| 65 |
+
image_inputs = torch.nn.functional.interpolate(
|
| 66 |
+
image_inputs,
|
| 67 |
+
image_reconstructions.shape[2:],
|
| 68 |
+
mode="bicubic",
|
| 69 |
+
antialias=True,
|
| 70 |
+
)
|
| 71 |
+
elif self.scale_tgt_to_input_size:
|
| 72 |
+
image_reconstructions = torch.nn.functional.interpolate(
|
| 73 |
+
image_reconstructions,
|
| 74 |
+
image_inputs.shape[2:],
|
| 75 |
+
mode="bicubic",
|
| 76 |
+
antialias=True,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
perceptual_loss2 = self.perceptual_loss(
|
| 80 |
+
image_inputs.contiguous(), image_reconstructions.contiguous()
|
| 81 |
+
)
|
| 82 |
+
loss = loss + self.perceptual_weight_on_inputs * perceptual_loss2.mean()
|
| 83 |
+
log[f"{split}/perceptual_loss_on_inputs"] = perceptual_loss2.mean().detach()
|
| 84 |
+
return loss, log
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class GeneralLPIPSWithDiscriminator(nn.Module):
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
disc_start: int,
|
| 91 |
+
logvar_init: float = 0.0,
|
| 92 |
+
pixelloss_weight=1.0,
|
| 93 |
+
disc_num_layers: int = 3,
|
| 94 |
+
disc_in_channels: int = 3,
|
| 95 |
+
disc_factor: float = 1.0,
|
| 96 |
+
disc_weight: float = 1.0,
|
| 97 |
+
perceptual_weight: float = 1.0,
|
| 98 |
+
disc_loss: str = "hinge",
|
| 99 |
+
scale_input_to_tgt_size: bool = False,
|
| 100 |
+
dims: int = 2,
|
| 101 |
+
learn_logvar: bool = False,
|
| 102 |
+
regularization_weights: Union[None, dict] = None,
|
| 103 |
+
):
|
| 104 |
+
super().__init__()
|
| 105 |
+
self.dims = dims
|
| 106 |
+
if self.dims > 2:
|
| 107 |
+
print(
|
| 108 |
+
f"running with dims={dims}. This means that for perceptual loss calculation, "
|
| 109 |
+
f"the LPIPS loss will be applied to each frame independently. "
|
| 110 |
+
)
|
| 111 |
+
self.scale_input_to_tgt_size = scale_input_to_tgt_size
|
| 112 |
+
assert disc_loss in ["hinge", "vanilla"]
|
| 113 |
+
self.pixel_weight = pixelloss_weight
|
| 114 |
+
self.perceptual_loss = LPIPS().eval()
|
| 115 |
+
self.perceptual_weight = perceptual_weight
|
| 116 |
+
# output log variance
|
| 117 |
+
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
|
| 118 |
+
self.learn_logvar = learn_logvar
|
| 119 |
+
|
| 120 |
+
self.discriminator = NLayerDiscriminator(
|
| 121 |
+
input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=False
|
| 122 |
+
).apply(weights_init)
|
| 123 |
+
self.discriminator_iter_start = disc_start
|
| 124 |
+
self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
|
| 125 |
+
self.disc_factor = disc_factor
|
| 126 |
+
self.discriminator_weight = disc_weight
|
| 127 |
+
self.regularization_weights = default(regularization_weights, {})
|
| 128 |
+
|
| 129 |
+
def get_trainable_parameters(self) -> Any:
|
| 130 |
+
return self.discriminator.parameters()
|
| 131 |
+
|
| 132 |
+
def get_trainable_autoencoder_parameters(self) -> Any:
|
| 133 |
+
if self.learn_logvar:
|
| 134 |
+
yield self.logvar
|
| 135 |
+
yield from ()
|
| 136 |
+
|
| 137 |
+
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
|
| 138 |
+
if last_layer is not None:
|
| 139 |
+
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
|
| 140 |
+
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
|
| 141 |
+
else:
|
| 142 |
+
nll_grads = torch.autograd.grad(
|
| 143 |
+
nll_loss, self.last_layer[0], retain_graph=True
|
| 144 |
+
)[0]
|
| 145 |
+
g_grads = torch.autograd.grad(
|
| 146 |
+
g_loss, self.last_layer[0], retain_graph=True
|
| 147 |
+
)[0]
|
| 148 |
+
|
| 149 |
+
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
|
| 150 |
+
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
|
| 151 |
+
d_weight = d_weight * self.discriminator_weight
|
| 152 |
+
return d_weight
|
| 153 |
+
|
| 154 |
+
def forward(
|
| 155 |
+
self,
|
| 156 |
+
regularization_log,
|
| 157 |
+
inputs,
|
| 158 |
+
reconstructions,
|
| 159 |
+
optimizer_idx,
|
| 160 |
+
global_step,
|
| 161 |
+
last_layer=None,
|
| 162 |
+
split="train",
|
| 163 |
+
weights=None,
|
| 164 |
+
):
|
| 165 |
+
if self.scale_input_to_tgt_size:
|
| 166 |
+
inputs = torch.nn.functional.interpolate(
|
| 167 |
+
inputs, reconstructions.shape[2:], mode="bicubic", antialias=True
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
if self.dims > 2:
|
| 171 |
+
inputs, reconstructions = map(
|
| 172 |
+
lambda x: rearrange(x, "b c t h w -> (b t) c h w"),
|
| 173 |
+
(inputs, reconstructions),
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
|
| 177 |
+
if self.perceptual_weight > 0:
|
| 178 |
+
p_loss = self.perceptual_loss(
|
| 179 |
+
inputs.contiguous(), reconstructions.contiguous()
|
| 180 |
+
)
|
| 181 |
+
rec_loss = rec_loss + self.perceptual_weight * p_loss
|
| 182 |
+
|
| 183 |
+
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
|
| 184 |
+
weighted_nll_loss = nll_loss
|
| 185 |
+
if weights is not None:
|
| 186 |
+
weighted_nll_loss = weights * nll_loss
|
| 187 |
+
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
|
| 188 |
+
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
|
| 189 |
+
|
| 190 |
+
# now the GAN part
|
| 191 |
+
if optimizer_idx == 0:
|
| 192 |
+
# generator update
|
| 193 |
+
logits_fake = self.discriminator(reconstructions.contiguous())
|
| 194 |
+
g_loss = -torch.mean(logits_fake)
|
| 195 |
+
|
| 196 |
+
if self.disc_factor > 0.0:
|
| 197 |
+
try:
|
| 198 |
+
d_weight = self.calculate_adaptive_weight(
|
| 199 |
+
nll_loss, g_loss, last_layer=last_layer
|
| 200 |
+
)
|
| 201 |
+
except RuntimeError:
|
| 202 |
+
assert not self.training
|
| 203 |
+
d_weight = torch.tensor(0.0)
|
| 204 |
+
else:
|
| 205 |
+
d_weight = torch.tensor(0.0)
|
| 206 |
+
|
| 207 |
+
disc_factor = adopt_weight(
|
| 208 |
+
self.disc_factor, global_step, threshold=self.discriminator_iter_start
|
| 209 |
+
)
|
| 210 |
+
loss = weighted_nll_loss + d_weight * disc_factor * g_loss
|
| 211 |
+
log = dict()
|
| 212 |
+
for k in regularization_log:
|
| 213 |
+
if k in self.regularization_weights:
|
| 214 |
+
loss = loss + self.regularization_weights[k] * regularization_log[k]
|
| 215 |
+
log[f"{split}/{k}"] = regularization_log[k].detach().mean()
|
| 216 |
+
|
| 217 |
+
log.update(
|
| 218 |
+
{
|
| 219 |
+
"{}/total_loss".format(split): loss.clone().detach().mean(),
|
| 220 |
+
"{}/logvar".format(split): self.logvar.detach(),
|
| 221 |
+
"{}/nll_loss".format(split): nll_loss.detach().mean(),
|
| 222 |
+
"{}/rec_loss".format(split): rec_loss.detach().mean(),
|
| 223 |
+
"{}/d_weight".format(split): d_weight.detach(),
|
| 224 |
+
"{}/disc_factor".format(split): torch.tensor(disc_factor),
|
| 225 |
+
"{}/g_loss".format(split): g_loss.detach().mean(),
|
| 226 |
+
}
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
return loss, log
|
| 230 |
+
|
| 231 |
+
if optimizer_idx == 1:
|
| 232 |
+
# second pass for discriminator update
|
| 233 |
+
logits_real = self.discriminator(inputs.contiguous().detach())
|
| 234 |
+
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
| 235 |
+
|
| 236 |
+
disc_factor = adopt_weight(
|
| 237 |
+
self.disc_factor, global_step, threshold=self.discriminator_iter_start
|
| 238 |
+
)
|
| 239 |
+
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
|
| 240 |
+
|
| 241 |
+
log = {
|
| 242 |
+
"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
|
| 243 |
+
"{}/logits_real".format(split): logits_real.detach().mean(),
|
| 244 |
+
"{}/logits_fake".format(split): logits_fake.detach().mean(),
|
| 245 |
+
}
|
| 246 |
+
return d_loss, log
|
CCEdit-main/sgm/modules/autoencoding/regularizers/__init__.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import abstractmethod
|
| 2 |
+
from typing import Any, Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
from ....modules.distributions.distributions import DiagonalGaussianDistribution
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class AbstractRegularizer(nn.Module):
|
| 12 |
+
def __init__(self):
|
| 13 |
+
super().__init__()
|
| 14 |
+
|
| 15 |
+
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
|
| 16 |
+
raise NotImplementedError()
|
| 17 |
+
|
| 18 |
+
@abstractmethod
|
| 19 |
+
def get_trainable_parameters(self) -> Any:
|
| 20 |
+
raise NotImplementedError()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class DiagonalGaussianRegularizer(AbstractRegularizer):
|
| 24 |
+
def __init__(self, sample: bool = True):
|
| 25 |
+
super().__init__()
|
| 26 |
+
self.sample = sample
|
| 27 |
+
|
| 28 |
+
def get_trainable_parameters(self) -> Any:
|
| 29 |
+
yield from ()
|
| 30 |
+
|
| 31 |
+
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
|
| 32 |
+
log = dict()
|
| 33 |
+
posterior = DiagonalGaussianDistribution(z)
|
| 34 |
+
if self.sample:
|
| 35 |
+
z = posterior.sample()
|
| 36 |
+
else:
|
| 37 |
+
z = posterior.mode()
|
| 38 |
+
kl_loss = posterior.kl()
|
| 39 |
+
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
|
| 40 |
+
log["kl_loss"] = kl_loss
|
| 41 |
+
return z, log
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def measure_perplexity(predicted_indices, num_centroids):
|
| 45 |
+
# src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py
|
| 46 |
+
# eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally
|
| 47 |
+
encodings = (
|
| 48 |
+
F.one_hot(predicted_indices, num_centroids).float().reshape(-1, num_centroids)
|
| 49 |
+
)
|
| 50 |
+
avg_probs = encodings.mean(0)
|
| 51 |
+
perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp()
|
| 52 |
+
cluster_use = torch.sum(avg_probs > 0)
|
| 53 |
+
return perplexity, cluster_use
|
CCEdit-main/sgm/modules/autoencoding/regularizers/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (2.47 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (517 Bytes). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_scaling.cpython-39.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/model.cpython-39.pyc
ADDED
|
Binary file (16.9 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/openaimodel.cpython-39.pyc
ADDED
|
Binary file (38.5 kB). View file
|
|
|