diff --git a/CCEdit-main/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml b/CCEdit-main/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..482b25901b749731901aa54a9d07888e8f74a08b --- /dev/null +++ b/CCEdit-main/configs/example_training/autoencoder/kl-f4/imagenet-attnfree-logvar.yaml @@ -0,0 +1,115 @@ +model: + base_learning_rate: 4.5e-6 + target: sgm.models.autoencoder.AutoencodingEngine + params: + input_key: jpg + monitor: val/rec_loss + + loss_config: + target: sgm.modules.autoencoding.losses.GeneralLPIPSWithDiscriminator + params: + perceptual_weight: 0.25 + disc_start: 20001 + disc_weight: 0.5 + learn_logvar: True + + regularization_weights: + kl_loss: 1.0 + + regularizer_config: + target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer + + encoder_config: + target: sgm.modules.diffusionmodules.model.Encoder + params: + attn_type: none + double_z: True + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1, 2, 4 ] + num_res_blocks: 4 + attn_resolutions: [ ] + dropout: 0.0 + + decoder_config: + target: sgm.modules.diffusionmodules.model.Decoder + params: + attn_type: none + double_z: False + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1, 2, 4 ] + num_res_blocks: 4 + attn_resolutions: [ ] + dropout: 0.0 + +data: + target: sgm.data.dataset.StableDataModuleFromConfig + params: + train: + datapipeline: + urls: + - "DATA-PATH" + pipeline_config: + shardshuffle: 10000 + sample_shuffle: 10000 + + decoders: + - "pil" + + postprocessors: + - target: sdata.mappers.TorchVisionImageTransforms + params: + key: 'jpg' + transforms: + - target: torchvision.transforms.Resize + params: + size: 256 + interpolation: 3 + - target: torchvision.transforms.ToTensor + - target: sdata.mappers.Rescaler + - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare + params: + h_key: height + w_key: width + + loader: + batch_size: 8 + num_workers: 4 + + +lightning: + strategy: + target: pytorch_lightning.strategies.DDPStrategy + params: + find_unused_parameters: True + + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 50000 + + image_logger: + target: main.ImageLogger + params: + enable_autocast: False + batch_frequency: 1000 + max_images: 8 + increase_log_steps: True + + trainer: + devices: 0, + limit_val_batches: 50 + benchmark: True + accumulate_grad_batches: 1 + val_check_interval: 10000 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/imagenet-f8_cond.yaml b/CCEdit-main/configs/example_training/imagenet-f8_cond.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60627331bc57349414a49ac5b9a04893fbe3f2be --- /dev/null +++ b/CCEdit-main/configs/example_training/imagenet-f8_cond.yaml @@ -0,0 +1,188 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + log_keys: + - cls + + scheduler_config: + target: sgm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [10000] + cycle_lengths: [10000000000000] + f_start: [1.e-6] + f_max: [1.] + f_min: [1.] + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + in_channels: 4 + out_channels: 4 + model_channels: 256 + attention_resolutions: [1, 2, 4] + num_res_blocks: 2 + channel_mult: [1, 2, 4] + num_head_channels: 64 + num_classes: sequential + adm_in_channels: 1024 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 1024 + spatial_transformer_attn_type: softmax-xformers + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: True + input_key: cls + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ClassEmbedder + params: + add_sequence_dim: True # will be used through crossattn then + embed_dim: 1024 + n_classes: 1000 + # vector cond + - is_trainable: False + ucg_rate: 0.2 + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + # vector cond + - is_trainable: False + input_key: crop_coords_top_left + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + ckpt_path: CKPT_PATH + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling + params: + num_idx: 1000 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFG + params: + scale: 5.0 + +data: + target: sgm.data.dataset.StableDataModuleFromConfig + params: + train: + datapipeline: + urls: + # USER: adapt this path the root of your custom dataset + - "DATA_PATH" + pipeline_config: + shardshuffle: 10000 + sample_shuffle: 10000 # USER: you might wanna adapt depending on your available RAM + + decoders: + - "pil" + + postprocessors: + - target: sdata.mappers.TorchVisionImageTransforms + params: + key: 'jpg' # USER: you might wanna adapt this for your custom dataset + transforms: + - target: torchvision.transforms.Resize + params: + size: 256 + interpolation: 3 + - target: torchvision.transforms.ToTensor + - target: sdata.mappers.Rescaler + + - target: sdata.mappers.AddOriginalImageSizeAsTupleAndCropToSquare + params: + h_key: height # USER: you might wanna adapt this for your custom dataset + w_key: width # USER: you might wanna adapt this for your custom dataset + + loader: + batch_size: 64 + num_workers: 6 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + enable_autocast: False + batch_frequency: 1000 + max_images: 8 + increase_log_steps: True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 8 + n_rows: 2 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 1000 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/sd_1_5_controlldm-test-cp-no2ndca-add-cfca-depthmidas.yaml b/CCEdit-main/configs/example_training/sd_1_5_controlldm-test-cp-no2ndca-add-cfca-depthmidas.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6a752fa913ca119c96a69ee964b51243da8f305c --- /dev/null +++ b/CCEdit-main/configs/example_training/sd_1_5_controlldm-test-cp-no2ndca-add-cfca-depthmidas.yaml @@ -0,0 +1,270 @@ +# tvi2v: condition on the frame, text, and video to generate the video +# cp: copy weights from diffusion_model/unet to controlnet_img. Use VAE to extract the 8x downsampled image. +# no2ndca: no second cross attention (text cross attention) in the temporal layers +# add: add the features of reference image on the features of center frame of the main SD model +# cfca: cross-frame cross-attention, in the main SD model, for each token as query, take the features of center image and current frame as the key and value + + + +InputFPS: &InputFPS 4 +FrameLength: &FrameLength 17 +BatchSize: &BatchSize 1 +NumGPU: &NumGPU 1 +NumNodes: &NumNodes 1 +BaseLearningRate: &BaseLearningRate 5.0e-5 +DataDir: &DataDir /PATH/TO/YOUR/DATA # specify your data dir +MetadataDir: &MetadataDir /PATH/TO/YOUR/METADATA # specify your metadata dir +ResolutionH: &ResolutionH 384 +ResolutionW: &ResolutionW 512 +Split: &Split "val" # * Debug setting +Cut: &Cut "10M" # * Debug setting +CkptPath: &CkptPath /PATH/TO/YOUR/CHECKPOINT # specify your checkpoint dir + +Ckpt_log_every: &Ckpt_log_every 20000 # * Debug setting, 4000 +Image_log_every: &Image_log_every 10 # * Debug setting, 2000 +AccumulateGradBatches: &AccumulateGradBatches 1 +# DEBUG SETTINGS +# Model_channels: &Model_channels 64 +Model_channels: &Model_channels 320 + +model: + base_learning_rate: *BaseLearningRate + target: sgm.models.diffusion.VideoDiffusionEngineTV2V + params: + use_ema: False # Default is False + scale_factor: 0.18215 + disable_first_stage_autocast: True + log_keys: + - txt + ckpt_path: *CkptPath + freeze_model: spatial # none indicates no freezing + + scheduler_config: + target: sgm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 100 ] + cycle_lengths: [ 10000000000000 ] + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V + params: + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: *Model_channels + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + disable_temporal_text_ca: True + # -> use "temporal_ca" modules + enable_attention3d_crossframe: True + ST3DCA_ca_type: 'center_self' + # crossframe_type: 'reference' # not use the reference image as k,v, so comment it. + controlnet_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: True + in_channels: 4 + hint_channels: 3 + model_channels: *Model_channels + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + control_scales: 1.0 + controlnet_img_config: # process the anchor frame + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: True + in_channels: 4 + hint_channels: 3 + model_channels: *Model_channels + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + # -> add on center frame, strengthen the control + control_scales: 1.0 + # control_scales: 0.0 # use crossattention, instead of add in controlnet + # -> not add the noised x to controlnet_img + no_add_x: True # no need to add x + set_input_hint_block_as_identity: True # ATTENTION: newly added. default: False + # -> disbale the text cross attention in controlnet_img + disable_text_ca: True + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + ucg_rate: 0.5 + legacy_ucg_value: "" + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + freeze: true + - is_trainable: False + input_key: control_hint + ucg_rate: 0.0 + target: sgm.modules.encoders.modules.DepthMidasEncoder + - is_trainable: False + input_key: cond_img + ucg_rate: 0.0 + target: sgm.modules.encoders.modules.VAEEmbedder + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling + params: + num_idx: 1000 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + offset_noise_level: 0.1 + offset_noise_varying_dim: 3 + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerAncestralSampler # ATTENTION: newly add. Default: EulerEDMSampler + params: + num_steps: 30 # ATTENTION: newly add. default: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V + params: + scale: 7.5 + +data: + target: sgm.data.detaset_webvid.DataModuleFromConfig + params: + batch_size: *BatchSize # TODO need to change batch_size + num_workers: 8 + wrap: False + train: + target: sgm.data.webvid.webvid_dataset.WebVid + params: + dataset_name: WebVid + data_dir: *DataDir # TODO check the data_dir + metadata_dir: *MetadataDir + split: *Split + cut: *Cut + # key: *Key # TODO check data file name, default cleaned + subsample: 1 + text_params: + input: text + video_params: + input_res_h: *ResolutionH + input_res_w: *ResolutionW + tsfm_params: + norm_mean: [0.5, 0.5, 0.5] + norm_std: [0.5, 0.5, 0.5] + randcrop_scale: [0.8, 1.0] # ATTENTION: newly add. + num_frames: *FrameLength + prop_factor: *InputFPS + loading: lax + metadata_folder_name: webvid10m_meta + first_stage_key: jpg + cond_stage_key: txt + skip_missing_files: True + use_control_hint: True + # -> use center frame as the condition image + # random_cond_img: True + +lightning: + modelcheckpoint: + params: + every_n_train_steps: *Ckpt_log_every + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + enable_autocast: False + batch_frequency: *Image_log_every + max_images: 32 + increase_log_steps: False # default is True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 8 + n_rows: *FrameLength + video_fps: *InputFPS + + trainer: + precision: 16 + devices: *NumGPU + num_nodes: *NumNodes + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: *AccumulateGradBatches + max_epochs: 1000 + + strategy: + target: pytorch_lightning.strategies.DDPStrategy + params: + find_unused_parameters: True + + # strategy: + # target: pytorch_lightning.strategies.DeepSpeedStrategy + # params: + # stage: 2 + # allgather_bucket_size: 8e8 + # reduce_bucket_size: 8e8 + # load_full_weights: True \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/sd_1_5_controlldm-test-tvi2v-cp-no2ndca-add-cfca-depthmidas.yaml b/CCEdit-main/configs/example_training/sd_1_5_controlldm-test-tvi2v-cp-no2ndca-add-cfca-depthmidas.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8fb848f9539cb12bdef80e347431faa139db6d44 --- /dev/null +++ b/CCEdit-main/configs/example_training/sd_1_5_controlldm-test-tvi2v-cp-no2ndca-add-cfca-depthmidas.yaml @@ -0,0 +1,269 @@ +# tvi2v: condition on the frame, text, and video to generate the video +# cp: copy weights from diffusion_model/unet to controlnet_img. Use VAE to extract the 8x downsampled image. +# no2ndca: no second cross attention (text cross attention) in the temporal layers +# add: add the features of reference image on the features of center frame of the main SD model +# cfca: cross-frame cross-attention, in the main SD model, for each token as query, take the features of center image and current frame as the key and value + + + +InputFPS: &InputFPS 4 +FrameLength: &FrameLength 17 +BatchSize: &BatchSize 1 +NumGPU: &NumGPU 1 +NumNodes: &NumNodes 1 +BaseLearningRate: &BaseLearningRate 5.0e-5 +DataDir: &DataDir /PATH/TO/YOUR/DATA # specify your data dir +MetadataDir: &MetadataDir /PATH/TO/YOUR/METADATA # specify your metadata dir +ResolutionH: &ResolutionH 384 +ResolutionW: &ResolutionW 512 +Split: &Split "val" # * Debug setting +Cut: &Cut "10M" # * Debug setting +CkptPath: &CkptPath /PATH/TO/YOUR/CHECKPOINT # specify your checkpoint dir +Ckpt_log_every: &Ckpt_log_every 20000 # * Debug setting, 4000 +Image_log_every: &Image_log_every 10 # * Debug setting, 2000 +AccumulateGradBatches: &AccumulateGradBatches 1 +# DEBUG SETTINGS +# Model_channels: &Model_channels 64 +Model_channels: &Model_channels 320 + +model: + base_learning_rate: *BaseLearningRate + target: sgm.models.diffusion.VideoDiffusionEngineTV2V + params: + use_ema: False # Default is False + scale_factor: 0.18215 + disable_first_stage_autocast: True + log_keys: + - txt + ckpt_path: *CkptPath # TODO: for fast debugging, I comment this line + freeze_model: spatial # none indicates no freezing + + scheduler_config: + target: sgm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 100 ] + cycle_lengths: [ 10000000000000 ] + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V + params: + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: *Model_channels + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + disable_temporal_text_ca: True + # -> use "temporal_ca" modules + enable_attention3d_crossframe: True + ST3DCA_ca_type: 'center_self' + # crossframe_type: 'reference' # not use the reference image as k,v, so comment it. + controlnet_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: True + in_channels: 4 + hint_channels: 3 + model_channels: *Model_channels + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + control_scales: 1.0 + controlnet_img_config: # process the anchor frame + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: True + in_channels: 4 + hint_channels: 3 + model_channels: *Model_channels + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + # -> add on center frame, strengthen the control + control_scales: 1.0 + # control_scales: 0.0 # use crossattention, instead of add in controlnet + # -> not add the noised x to controlnet_img + no_add_x: True # no need to add x + set_input_hint_block_as_identity: True # ATTENTION: newly added. default: False + # -> disbale the text cross attention in controlnet_img + disable_text_ca: True + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + ucg_rate: 0.5 + legacy_ucg_value: "" + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + freeze: true + - is_trainable: False + input_key: control_hint + ucg_rate: 0.0 + target: sgm.modules.encoders.modules.DepthMidasEncoder + - is_trainable: False + input_key: cond_img + ucg_rate: 0.0 + target: sgm.modules.encoders.modules.VAEEmbedder + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling + params: + num_idx: 1000 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + offset_noise_level: 0.1 + offset_noise_varying_dim: 3 + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerAncestralSampler # ATTENTION: newly add. Default: EulerEDMSampler + params: + num_steps: 30 # ATTENTION: newly add. default: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V + params: + scale: 7.5 + +data: + target: sgm.data.detaset_webvid.DataModuleFromConfig + params: + batch_size: *BatchSize # TODO need to change batch_size + num_workers: 8 + wrap: False + train: + target: sgm.data.webvid.webvid_dataset.WebVid + params: + dataset_name: WebVid + data_dir: *DataDir # TODO check the data_dir + metadata_dir: *MetadataDir + split: *Split + cut: *Cut + # key: *Key # TODO check data file name, default cleaned + subsample: 1 + text_params: + input: text + video_params: + input_res_h: *ResolutionH + input_res_w: *ResolutionW + tsfm_params: + norm_mean: [0.5, 0.5, 0.5] + norm_std: [0.5, 0.5, 0.5] + randcrop_scale: [0.8, 1.0] # ATTENTION: newly add. + num_frames: *FrameLength + prop_factor: *InputFPS + loading: lax + metadata_folder_name: webvid10m_meta + first_stage_key: jpg + cond_stage_key: txt + skip_missing_files: True + use_control_hint: True + # -> use center frame as the condition image + # random_cond_img: True + +lightning: + modelcheckpoint: + params: + every_n_train_steps: *Ckpt_log_every + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + enable_autocast: False + batch_frequency: *Image_log_every + max_images: 32 + increase_log_steps: False # default is True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 8 + n_rows: *FrameLength + video_fps: *InputFPS + + trainer: + precision: 16 + devices: *NumGPU + num_nodes: *NumNodes + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: *AccumulateGradBatches + max_epochs: 1000 + + strategy: + target: pytorch_lightning.strategies.DDPStrategy + params: + find_unused_parameters: True + + # strategy: + # target: pytorch_lightning.strategies.DeepSpeedStrategy + # params: + # stage: 2 + # allgather_bucket_size: 8e8 + # reduce_bucket_size: 8e8 + # load_full_weights: True \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/toy/cifar10_cond.yaml b/CCEdit-main/configs/example_training/toy/cifar10_cond.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36ba2527a0a8364b91052ef40a3af099996fdff5 --- /dev/null +++ b/CCEdit-main/configs/example_training/toy/cifar10_cond.yaml @@ -0,0 +1,99 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.Denoiser + params: + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting + params: + sigma_data: 1.0 + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling + params: + sigma_data: 1.0 + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + in_channels: 3 + out_channels: 3 + model_channels: 32 + attention_resolutions: [] + num_res_blocks: 4 + channel_mult: [1, 2, 2] + num_head_channels: 32 + num_classes: sequential + adm_in_channels: 128 + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: True + input_key: cls + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 128 + n_classes: 10 + + first_stage_config: + target: sgm.models.autoencoder.IdentityFirstStage + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFG + params: + scale: 3.0 + +data: + target: sgm.data.cifar10.CIFAR10Loader + params: + batch_size: 512 + num_workers: 1 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + batch_frequency: 1000 + max_images: 64 + increase_log_steps: True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 64 + n_rows: 8 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 20 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/toy/mnist.yaml b/CCEdit-main/configs/example_training/toy/mnist.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44d8e6fea88155ad9e8bcd9724d0f074f6796798 --- /dev/null +++ b/CCEdit-main/configs/example_training/toy/mnist.yaml @@ -0,0 +1,80 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.Denoiser + params: + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting + params: + sigma_data: 1.0 + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling + params: + sigma_data: 1.0 + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + in_channels: 1 + out_channels: 1 + model_channels: 32 + attention_resolutions: [] + num_res_blocks: 4 + channel_mult: [1, 2, 2] + num_head_channels: 32 + + first_stage_config: + target: sgm.models.autoencoder.IdentityFirstStage + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization + +data: + target: sgm.data.mnist.MNISTLoader + params: + batch_size: 512 + num_workers: 1 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + batch_frequency: 1000 + max_images: 64 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 64 + n_rows: 8 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 10 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/toy/mnist_cond.yaml b/CCEdit-main/configs/example_training/toy/mnist_cond.yaml new file mode 100644 index 0000000000000000000000000000000000000000..557be128b9493428e3378621173493588069a780 --- /dev/null +++ b/CCEdit-main/configs/example_training/toy/mnist_cond.yaml @@ -0,0 +1,99 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.Denoiser + params: + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting + params: + sigma_data: 1.0 + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling + params: + sigma_data: 1.0 + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + in_channels: 1 + out_channels: 1 + model_channels: 32 + attention_resolutions: [ ] + num_res_blocks: 4 + channel_mult: [ 1, 2, 2 ] + num_head_channels: 32 + num_classes: sequential + adm_in_channels: 128 + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: True + input_key: "cls" + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 128 + n_classes: 10 + + first_stage_config: + target: sgm.models.autoencoder.IdentityFirstStage + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFG + params: + scale: 3.0 + +data: + target: sgm.data.mnist.MNISTLoader + params: + batch_size: 512 + num_workers: 1 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + batch_frequency: 1000 + max_images: 16 + increase_log_steps: True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 16 + n_rows: 4 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 20 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/toy/mnist_cond_discrete_eps.yaml b/CCEdit-main/configs/example_training/toy/mnist_cond_discrete_eps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f92b4cdf08e65d0cdc61f852e079be06b562f2f7 --- /dev/null +++ b/CCEdit-main/configs/example_training/toy/mnist_cond_discrete_eps.yaml @@ -0,0 +1,104 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + in_channels: 1 + out_channels: 1 + model_channels: 32 + attention_resolutions: [ ] + num_res_blocks: 4 + channel_mult: [ 1, 2, 2 ] + num_head_channels: 32 + num_classes: sequential + adm_in_channels: 128 + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: True + input_key: "cls" + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 128 + n_classes: 10 + + first_stage_config: + target: sgm.models.autoencoder.IdentityFirstStage + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.DiscreteSampling + params: + num_idx: 1000 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFG + params: + scale: 5.0 + +data: + target: sgm.data.mnist.MNISTLoader + params: + batch_size: 512 + num_workers: 1 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + batch_frequency: 1000 + max_images: 16 + increase_log_steps: True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 16 + n_rows: 4 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 20 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/toy/mnist_cond_l1_loss.yaml b/CCEdit-main/configs/example_training/toy/mnist_cond_l1_loss.yaml new file mode 100644 index 0000000000000000000000000000000000000000..42b153004a925aaee147ff35cac574be79db8a80 --- /dev/null +++ b/CCEdit-main/configs/example_training/toy/mnist_cond_l1_loss.yaml @@ -0,0 +1,104 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.Denoiser + params: + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting + params: + sigma_data: 1.0 + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling + params: + sigma_data: 1.0 + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + in_channels: 1 + out_channels: 1 + model_channels: 32 + attention_resolutions: [] + num_res_blocks: 4 + channel_mult: [1, 2, 2] + num_head_channels: 32 + num_classes: "sequential" + adm_in_channels: 128 + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: True + input_key: "cls" + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 128 + n_classes: 10 + + first_stage_config: + target: sgm.models.autoencoder.IdentityFirstStage + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFG + params: + scale: 3.0 + + loss_config: + target: sgm.modules.diffusionmodules.StandardDiffusionLoss + params: + type: l1 + +data: + target: sgm.data.mnist.MNISTLoader + params: + batch_size: 512 + num_workers: 1 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + batch_frequency: 1000 + max_images: 64 + increase_log_steps: True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 64 + n_rows: 8 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 20 \ No newline at end of file diff --git a/CCEdit-main/configs/example_training/toy/mnist_cond_with_ema.yaml b/CCEdit-main/configs/example_training/toy/mnist_cond_with_ema.yaml new file mode 100644 index 0000000000000000000000000000000000000000..632e8b420b6a61d69e53911b70fd61b6304ec2dc --- /dev/null +++ b/CCEdit-main/configs/example_training/toy/mnist_cond_with_ema.yaml @@ -0,0 +1,101 @@ +model: + base_learning_rate: 1.0e-4 + target: sgm.models.diffusion.DiffusionEngine + params: + use_ema: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.Denoiser + params: + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EDMWeighting + params: + sigma_data: 1.0 + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EDMScaling + params: + sigma_data: 1.0 + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + in_channels: 1 + out_channels: 1 + model_channels: 32 + attention_resolutions: [] + num_res_blocks: 4 + channel_mult: [1, 2, 2] + num_head_channels: 32 + num_classes: sequential + adm_in_channels: 128 + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: True + input_key: cls + ucg_rate: 0.2 + target: sgm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 128 + n_classes: 10 + + first_stage_config: + target: sgm.models.autoencoder.IdentityFirstStage + + loss_fn_config: + target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss + params: + sigma_sampler_config: + target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling + + sampler_config: + target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler + params: + num_steps: 50 + + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization + + guider_config: + target: sgm.modules.diffusionmodules.guiders.VanillaCFG + params: + scale: 3.0 + +data: + target: sgm.data.mnist.MNISTLoader + params: + batch_size: 512 + num_workers: 1 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 25000 + + image_logger: + target: main.ImageLogger + params: + disabled: False + batch_frequency: 1000 + max_images: 64 + increase_log_steps: True + log_first_step: False + log_images_kwargs: + use_ema_scope: False + N: 64 + n_rows: 8 + + trainer: + devices: 0, + benchmark: True + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 + max_epochs: 20 \ No newline at end of file diff --git a/CCEdit-main/configs/inference/sd_2_1.yaml b/CCEdit-main/configs/inference/sd_2_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22bb63d197ed1427a4e41b5508c8b2a1a99760ff --- /dev/null +++ b/CCEdit-main/configs/inference/sd_2_1.yaml @@ -0,0 +1,66 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.18215 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_head_channels: 64 + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: true + layer: penultimate + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity \ No newline at end of file diff --git a/CCEdit-main/configs/inference/sd_2_1_768.yaml b/CCEdit-main/configs/inference/sd_2_1_768.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71a0a121f74c406bda32490bae19e4438d5a1018 --- /dev/null +++ b/CCEdit-main/configs/inference/sd_2_1_768.yaml @@ -0,0 +1,66 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.18215 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.VWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_head_channels: 64 + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: true + layer: penultimate + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity \ No newline at end of file diff --git a/CCEdit-main/configs/inference/sd_xl_refiner.yaml b/CCEdit-main/configs/inference/sd_xl_refiner.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cab5fe283d77bf86e0f29e99f3ed0d3c7d9c752f --- /dev/null +++ b/CCEdit-main/configs/inference/sd_xl_refiner.yaml @@ -0,0 +1,91 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + adm_in_channels: 2560 + num_classes: sequential + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: 384 + attention_resolutions: [4, 2] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_head_channels: 64 + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 4 + context_dim: [1280, 1280, 1280, 1280] # 1280 + spatial_transformer_attn_type: softmax-xformers + legacy: False + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn and vector cond + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2 + params: + arch: ViT-bigG-14 + version: laion2b_s39b_b160k + legacy: False + freeze: True + layer: penultimate + always_return_pooled: True + # vector cond + - is_trainable: False + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + # vector cond + - is_trainable: False + input_key: crop_coords_top_left + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by two + # vector cond + - is_trainable: False + input_key: aesthetic_score + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 # multiplied by one + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity diff --git a/CCEdit-main/configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml b/CCEdit-main/configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37be042508e454538c645f4aa3def47e35fe24e0 --- /dev/null +++ b/CCEdit-main/configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml @@ -0,0 +1,93 @@ +NumSteps: &NumSteps 30 + +model: + target: sgm.models.diffusion.VideoDiffusionEngineTV2V + params: + use_ema: False # Default is False + scale_factor: 0.18215 + disable_first_stage_autocast: True + log_keys: + - txt + freeze_model: spatial + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V + params: + use_checkpoint: False + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + disable_temporal_text_ca: True + controlnet_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: False + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + control_scales: 1.0 + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + ucg_rate: 0.5 + legacy_ucg_value: "" + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + freeze: true + - is_trainable: False + input_key: control_hint + ucg_rate: 0.01 + target: sgm.modules.encoders.modules.DepthMidasEncoder + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + diff --git a/CCEdit-main/configs/inference_ccedit/keyframe_ref_cp_no2ndca_add_cfca_depthzoe.yaml b/CCEdit-main/configs/inference_ccedit/keyframe_ref_cp_no2ndca_add_cfca_depthzoe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1477e243d41c43d328c3cfd62c733e15363fad3f --- /dev/null +++ b/CCEdit-main/configs/inference_ccedit/keyframe_ref_cp_no2ndca_add_cfca_depthzoe.yaml @@ -0,0 +1,130 @@ +model: + target: sgm.models.diffusion.VideoDiffusionEngineTV2V + params: + use_ema: False # Default is False + scale_factor: 0.18215 + disable_first_stage_autocast: True + log_keys: + - txt + freeze_model: spatial + + scheduler_config: + target: sgm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 1000 ] + cycle_lengths: [ 10000000000000 ] + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + weighting_config: + target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlledUNetModel3DTV2V + params: + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + disable_temporal_text_ca: True + # -> use "temporal_ca" modules + enable_attention3d_crossframe: True + ST3DCA_ca_type: 'center_self' + # crossframe_type: 'reference' # not use the reference image as k,v, so comment it. + controlnet_config: + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: True + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + control_scales: 1.0 + controlnet_img_config: # process the anchor frame + target: sgm.modules.diffusionmodules.controlmodel.ControlNet2D + params: + use_checkpoint: True + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [4, 2, 1] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + legacy: False + # -> add on center frame, strengthen the control + control_scales: 1.0 + # control_scales: 0.0 # use crossattention, instead of add in controlnet + # -> not add the noised x to controlnet_img + no_add_x: True # no need to add x + set_input_hint_block_as_identity: True # ATTENTION: newly added. default: False + # -> disbale the text cross attention in controlnet_img + disable_text_ca: True + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + # crossattn cond + - is_trainable: False + input_key: txt + ucg_rate: 0.5 + legacy_ucg_value: "" + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + freeze: true + - is_trainable: False + input_key: control_hint + ucg_rate: 0.0 + target: sgm.modules.encoders.modules.DepthZoeEncoder + - is_trainable: False + input_key: cond_img + ucg_rate: 0.0 + target: sgm.modules.encoders.modules.VAEEmbedder + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity \ No newline at end of file diff --git a/CCEdit-main/scripts/__init__.py b/CCEdit-main/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/sgm/__init__.py b/CCEdit-main/sgm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9c7dc57106df294e97880913ef57471d587292 --- /dev/null +++ b/CCEdit-main/sgm/__init__.py @@ -0,0 +1,3 @@ +from .data import StableDataModuleFromConfig +from .models import AutoencodingEngine, DiffusionEngine +from .util import instantiate_from_config diff --git a/CCEdit-main/sgm/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc87c03dd5eca4c77e5b5dde2fde56753d9b7a6d Binary files /dev/null and b/CCEdit-main/sgm/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/__pycache__/util.cpython-39.pyc b/CCEdit-main/sgm/__pycache__/util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e853af6f48c346748526dc385a39fb0d64029c63 Binary files /dev/null and b/CCEdit-main/sgm/__pycache__/util.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/data/__init__.py b/CCEdit-main/sgm/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7664a25c655c376bd1a7b0ccbaca7b983a2bf9ad --- /dev/null +++ b/CCEdit-main/sgm/data/__init__.py @@ -0,0 +1 @@ +from .dataset import StableDataModuleFromConfig diff --git a/CCEdit-main/sgm/data/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28d490ad173c87af04cdfb574b9d4ed7b65d9ca9 Binary files /dev/null and b/CCEdit-main/sgm/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/data/__pycache__/dataset.cpython-39.pyc b/CCEdit-main/sgm/data/__pycache__/dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cae74fabd1d7936bf03563136e54df9029f73c4e Binary files /dev/null and b/CCEdit-main/sgm/data/__pycache__/dataset.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/data/cifar10.py b/CCEdit-main/sgm/data/cifar10.py new file mode 100644 index 0000000000000000000000000000000000000000..aa3ae67772e751164f2f72fb65805be1e90a02dd --- /dev/null +++ b/CCEdit-main/sgm/data/cifar10.py @@ -0,0 +1,67 @@ +import torchvision +import pytorch_lightning as pl +from torchvision import transforms +from torch.utils.data import DataLoader, Dataset + + +class CIFAR10DataDictWrapper(Dataset): + def __init__(self, dset): + super().__init__() + self.dset = dset + + def __getitem__(self, i): + x, y = self.dset[i] + return {"jpg": x, "cls": y} + + def __len__(self): + return len(self.dset) + + +class CIFAR10Loader(pl.LightningDataModule): + def __init__(self, batch_size, num_workers=0, shuffle=True): + super().__init__() + + transform = transforms.Compose( + [transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)] + ) + + self.batch_size = batch_size + self.num_workers = num_workers + self.shuffle = shuffle + self.train_dataset = CIFAR10DataDictWrapper( + torchvision.datasets.CIFAR10( + root=".data/", train=True, download=True, transform=transform + ) + ) + self.test_dataset = CIFAR10DataDictWrapper( + torchvision.datasets.CIFAR10( + root=".data/", train=False, download=True, transform=transform + ) + ) + + def prepare_data(self): + pass + + def train_dataloader(self): + return DataLoader( + self.train_dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + num_workers=self.num_workers, + ) + + def test_dataloader(self): + return DataLoader( + self.test_dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + num_workers=self.num_workers, + ) + + def val_dataloader(self): + return DataLoader( + self.test_dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + num_workers=self.num_workers, + ) diff --git a/CCEdit-main/sgm/data/dataset.py b/CCEdit-main/sgm/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b726149996591c6c3db69230e1bb68c07d2faa12 --- /dev/null +++ b/CCEdit-main/sgm/data/dataset.py @@ -0,0 +1,80 @@ +from typing import Optional + +import torchdata.datapipes.iter +import webdataset as wds +from omegaconf import DictConfig +from pytorch_lightning import LightningDataModule + +try: + from sdata import create_dataset, create_dummy_dataset, create_loader +except ImportError as e: + print("#" * 100) + print("Datasets not yet available") + print("to enable, we need to add stable-datasets as a submodule") + print("please use ``git submodule update --init --recursive``") + print("and do ``pip install -e stable-datasets/`` from the root of this repo") + print("#" * 100) + exit(1) + + +class StableDataModuleFromConfig(LightningDataModule): + def __init__( + self, + train: DictConfig, + validation: Optional[DictConfig] = None, + test: Optional[DictConfig] = None, + skip_val_loader: bool = False, + dummy: bool = False, + ): + super().__init__() + self.train_config = train + assert ( + "datapipeline" in self.train_config and "loader" in self.train_config + ), "train config requires the fields `datapipeline` and `loader`" + + self.val_config = validation + if not skip_val_loader: + if self.val_config is not None: + assert ( + "datapipeline" in self.val_config and "loader" in self.val_config + ), "validation config requires the fields `datapipeline` and `loader`" + else: + print( + "Warning: No Validation datapipeline defined, using that one from training" + ) + self.val_config = train + + self.test_config = test + if self.test_config is not None: + assert ( + "datapipeline" in self.test_config and "loader" in self.test_config + ), "test config requires the fields `datapipeline` and `loader`" + + self.dummy = dummy + if self.dummy: + print("#" * 100) + print("USING DUMMY DATASET: HOPE YOU'RE DEBUGGING ;)") + print("#" * 100) + + def setup(self, stage: str) -> None: + print("Preparing datasets") + if self.dummy: + data_fn = create_dummy_dataset + else: + data_fn = create_dataset + + self.train_datapipeline = data_fn(**self.train_config.datapipeline) + if self.val_config: + self.val_datapipeline = data_fn(**self.val_config.datapipeline) + if self.test_config: + self.test_datapipeline = data_fn(**self.test_config.datapipeline) + + def train_dataloader(self) -> torchdata.datapipes.iter.IterDataPipe: + loader = create_loader(self.train_datapipeline, **self.train_config.loader) + return loader + + def val_dataloader(self) -> wds.DataPipeline: + return create_loader(self.val_datapipeline, **self.val_config.loader) + + def test_dataloader(self) -> wds.DataPipeline: + return create_loader(self.test_datapipeline, **self.test_config.loader) diff --git a/CCEdit-main/sgm/data/detaset_webvid.py b/CCEdit-main/sgm/data/detaset_webvid.py new file mode 100644 index 0000000000000000000000000000000000000000..985593b8cdbcd7625a4cacce1db55ff012f672c2 --- /dev/null +++ b/CCEdit-main/sgm/data/detaset_webvid.py @@ -0,0 +1,182 @@ +import pytorch_lightning as pl +from functools import partial +from sgm.util import ( + exists, + instantiate_from_config, + isheatmap, +) +import torch +import numpy as np +from torch.utils.data import random_split, DataLoader, Dataset, Subset, IterableDataset +from abc import abstractmethod + + +class Txt2ImgIterableBaseDataset(IterableDataset): + """ + Define an interface to make the IterableDatasets for text2img data chainable + """ + + def __init__(self, num_records=0, valid_ids=None, size=256): + super().__init__() + self.num_records = num_records + self.valid_ids = valid_ids + self.sample_ids = valid_ids + self.size = size + + print(f"{self.__class__.__name__} dataset contains {self.__len__()} examples.") + + def __len__(self): + return self.num_records + + @abstractmethod + def __iter__(self): + pass + + +class WrappedDataset(Dataset): + """Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset""" + + def __init__(self, dataset): + self.data = dataset + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + return self.data[idx] + + +def worker_init_fn(_): + worker_info = torch.utils.data.get_worker_info() + + dataset = worker_info.dataset + worker_id = worker_info.id + + if isinstance(dataset, Txt2ImgIterableBaseDataset): + split_size = dataset.num_records // worker_info.num_workers + # reset num_records to the true number to retain reliable length information + dataset.sample_ids = dataset.valid_ids[ + worker_id * split_size : (worker_id + 1) * split_size + ] + current_id = np.random.choice(len(np.random.get_state()[1]), 1) + return np.random.seed(np.random.get_state()[1][current_id] + worker_id) + else: + return np.random.seed(np.random.get_state()[1][0] + worker_id) + + +class DataModuleFromConfig(pl.LightningDataModule): + def __init__( + self, + batch_size, + train=None, + validation=None, + test=None, + predict=None, + wrap=False, + num_workers=None, + shuffle_test_loader=False, + use_worker_init_fn=False, + shuffle_val_dataloader=False, + ): + super().__init__() + self.batch_size = batch_size + self.dataset_configs = dict() + self.num_workers = num_workers if num_workers is not None else batch_size * 2 + self.use_worker_init_fn = use_worker_init_fn + if train is not None: + self.dataset_configs["train"] = train + self.train_dataloader = self._train_dataloader + if validation is not None: + self.dataset_configs["validation"] = validation + self.val_dataloader = partial( + self._val_dataloader, shuffle=shuffle_val_dataloader + ) + if test is not None: + self.dataset_configs["test"] = test + self.test_dataloader = partial( + self._test_dataloader, shuffle=shuffle_test_loader + ) + if predict is not None: + self.dataset_configs["predict"] = predict + self.predict_dataloader = self._predict_dataloader + self.wrap = wrap + + def prepare_data(self): + for data_cfg in self.dataset_configs.values(): + instantiate_from_config(data_cfg) + + def setup(self, stage=None): + self.datasets = dict( + (k, instantiate_from_config(self.dataset_configs[k])) + for k in self.dataset_configs + ) + if self.wrap: + for k in self.datasets: + self.datasets[k] = WrappedDataset(self.datasets[k]) + + def _train_dataloader(self): + is_iterable_dataset = isinstance( + self.datasets["train"], Txt2ImgIterableBaseDataset + ) + if is_iterable_dataset or self.use_worker_init_fn: + init_fn = worker_init_fn + else: + init_fn = None + return DataLoader( + self.datasets["train"], + batch_size=self.batch_size, + num_workers=self.num_workers, + shuffle=False if is_iterable_dataset else True, + worker_init_fn=init_fn, + ) + + def _val_dataloader(self, shuffle=False): + if ( + isinstance(self.datasets["validation"], Txt2ImgIterableBaseDataset) + or self.use_worker_init_fn + ): + init_fn = worker_init_fn + else: + init_fn = None + return DataLoader( + self.datasets["validation"], + batch_size=self.batch_size, + num_workers=self.num_workers, + worker_init_fn=init_fn, + shuffle=shuffle, + ) + + def _test_dataloader(self, shuffle=False): + is_iterable_dataset = isinstance( + self.datasets["train"], Txt2ImgIterableBaseDataset + ) + if is_iterable_dataset or self.use_worker_init_fn: + init_fn = worker_init_fn + else: + init_fn = None + + # do not shuffle dataloader for iterable dataset + shuffle = shuffle and (not is_iterable_dataset) + + return DataLoader( + self.datasets["test"], + batch_size=self.batch_size, + num_workers=self.num_workers, + worker_init_fn=init_fn, + shuffle=shuffle, + ) + + def _predict_dataloader(self, shuffle=False): + if ( + isinstance(self.datasets["predict"], Txt2ImgIterableBaseDataset) + or self.use_worker_init_fn + ): + init_fn = worker_init_fn + else: + init_fn = None + return DataLoader( + self.datasets["predict"], + batch_size=self.batch_size, + num_workers=self.num_workers, + worker_init_fn=init_fn, + ) diff --git a/CCEdit-main/sgm/data/mnist.py b/CCEdit-main/sgm/data/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..ab7478f40580fe3a69017c316d5dd9407b15ecd5 --- /dev/null +++ b/CCEdit-main/sgm/data/mnist.py @@ -0,0 +1,85 @@ +import torchvision +import pytorch_lightning as pl +from torchvision import transforms +from torch.utils.data import DataLoader, Dataset + + +class MNISTDataDictWrapper(Dataset): + def __init__(self, dset): + super().__init__() + self.dset = dset + + def __getitem__(self, i): + x, y = self.dset[i] + return {"jpg": x, "cls": y} + + def __len__(self): + return len(self.dset) + + +class MNISTLoader(pl.LightningDataModule): + def __init__(self, batch_size, num_workers=0, prefetch_factor=2, shuffle=True): + super().__init__() + + transform = transforms.Compose( + [transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)] + ) + + self.batch_size = batch_size + self.num_workers = num_workers + self.prefetch_factor = prefetch_factor if num_workers > 0 else 0 + self.shuffle = shuffle + self.train_dataset = MNISTDataDictWrapper( + torchvision.datasets.MNIST( + root=".data/", train=True, download=True, transform=transform + ) + ) + self.test_dataset = MNISTDataDictWrapper( + torchvision.datasets.MNIST( + root=".data/", train=False, download=True, transform=transform + ) + ) + + def prepare_data(self): + pass + + def train_dataloader(self): + return DataLoader( + self.train_dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + num_workers=self.num_workers, + prefetch_factor=self.prefetch_factor, + ) + + def test_dataloader(self): + return DataLoader( + self.test_dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + num_workers=self.num_workers, + prefetch_factor=self.prefetch_factor, + ) + + def val_dataloader(self): + return DataLoader( + self.test_dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + num_workers=self.num_workers, + prefetch_factor=self.prefetch_factor, + ) + + +if __name__ == "__main__": + dset = MNISTDataDictWrapper( + torchvision.datasets.MNIST( + root=".data/", + train=False, + download=True, + transform=transforms.Compose( + [transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)] + ), + ) + ) + ex = dset[0] diff --git a/CCEdit-main/sgm/data/webvid/base_video_dataset.py b/CCEdit-main/sgm/data/webvid/base_video_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b76c1cce31fa1bfa2bd33fcdd80f57bfdc7d6a38 --- /dev/null +++ b/CCEdit-main/sgm/data/webvid/base_video_dataset.py @@ -0,0 +1,521 @@ +import torch +import os +import random +from abc import abstractmethod + +import av +import cv2 +import decord +import numpy as np +from PIL import Image +from torch.utils.data import Dataset, get_worker_info +from torchvision import transforms + + +# def init_transform_dict( +# input_res_h=224, +# input_res_w=224, +# randcrop_scale=(0.5, 1.0), +# color_jitter=(0, 0, 0), +# norm_mean=(0.5, 0.5, 0.5), +# norm_std=(0.5, 0.5, 0.5), +# ): +# # todo: This part need to be discussed and designed carefully. +# normalize = transforms.Normalize(mean=norm_mean, std=norm_std) +# tsfm_dict = { +# "train": transforms.Compose( +# [ +# transforms.RandomResizedCrop( +# (input_res_h, input_res_w), scale=randcrop_scale, antialias=True +# ), +# normalize, +# ] +# ), +# "val": transforms.Compose( +# [ +# # todo: should we use crop for validation and test? +# transforms.Resize((input_res_h, input_res_w), antialias=True), +# normalize, +# ] +# ), +# "test": transforms.Compose( +# [ +# transforms.Resize((input_res_h, input_res_w), antialias=True), +# normalize, +# ] +# ), +# } +# return tsfm_dict +def init_transform_dict( + input_res_h=224, + input_res_w=224, + randcrop_scale=(0.5, 1.0), + color_jitter=(0, 0, 0), + norm_mean=(0.5, 0.5, 0.5), + norm_std=(0.5, 0.5, 0.5), +): + # todo: this implementation might cause bug sometimes. + # todo: make it safer, please. + normalize = transforms.Normalize(mean=norm_mean, std=norm_std) + tsfm_dict = { + "train": transforms.Compose( + [ + transforms.Resize(input_res_h, antialias=True), + transforms.CenterCrop((input_res_h, input_res_w)), + normalize, + ] + ), + "val": transforms.Compose( + [ + transforms.Resize(input_res_h, antialias=True), + transforms.CenterCrop((input_res_h, input_res_w)), + normalize, + ] + ), + "test": transforms.Compose( + [ + transforms.Resize(input_res_h, antialias=True), + transforms.CenterCrop((input_res_h, input_res_w)), + normalize, + ] + ), + } + return tsfm_dict + + +class TextVideoDataset(Dataset): + def __init__( + self, + dataset_name, + text_params, + video_params, + data_dir, + metadata_dir=None, + metadata_folder_name=None, # "webvid10m_meta", + split="train", + tsfms=None, + cut=None, + key=None, + subsample=1, + sliding_window_stride=-1, + reader="decord", + first_stage_key="video", + cond_stage_key="txt", + skip_missing_files=True, + use_control_hint=False, + random_cond_img=False, + ): + # print(dataset_name, text_params, video_params) + # WebVid {'input': 'text'} {'input_res': 224, 'num_frames': 1, 'loading': 'lax'} + self.dataset_name = dataset_name + self.text_params = text_params + self.video_params = video_params + # check for environment variables + self.data_dir = os.path.expandvars(data_dir) + if metadata_dir is not None: + self.metadata_dir = os.path.expandvars(metadata_dir) + else: + self.metadata_dir = self.data_dir + # added parameters + self.metadata_folder_name = metadata_folder_name + self.first_stage_key = first_stage_key + self.cond_stage_key = cond_stage_key + self.skip = skip_missing_files + self.lack_files = [] + self.split = split + self.key = key + tsfm_params = ( + {} + if "tsfm_params" not in video_params.keys() + else video_params["tsfm_params"] + ) + # tsfm_params['input_res'] = video_params['input_res'] + tsfm_params["input_res_h"] = video_params["input_res_h"] + tsfm_params["input_res_w"] = video_params["input_res_w"] + tsfm_dict = init_transform_dict(**tsfm_params) + + if split not in ["train", "val", "test"]: + print( + 'Warning: split is not in ["train", "val", "test"], ' + 'what you set is "{}", ' + 'set it to "train"'.format(split) + ) + split = "train" + + tsfms = tsfm_dict[split] + + self.transforms = tsfms + self.cut = cut + self.subsample = subsample + self.sliding_window_stride = sliding_window_stride + self.video_reader = video_reader[reader] + self.label_type = "caption" + self.frame_sample = video_params.get("frame_sample", "proportional") + self._load_metadata() + if self.sliding_window_stride != -1: + if self.split != "test": + raise ValueError( + "Fixing frame sampling is for test time only. can remove but..." + ) + self._fix_temporal_samples() + self.use_control_hint = use_control_hint + self.random_cond_img = random_cond_img + + @abstractmethod + def _load_metadata(self): + raise NotImplementedError("Metadata loading must be implemented by subclass") + + @abstractmethod + def _get_video_path(self, sample): + raise NotImplementedError( + "Get video path function must be implemented by subclass" + ) + + def _get_caption(self, sample): + raise NotImplementedError( + "Get caption function must be implemented by subclass" + ) + + def _get_video_lens(self): + vlen_li = [] + for idx, row in self.metadata.iterrows(): + video_path = self._get_video_path(row)[0] + vlen_li.append(get_video_len(video_path)) + + return vlen_li + + def _fix_temporal_samples(self): + self.metadata["vlen"] = self._get_video_lens() + self.metadata["frame_intervals"] = self.metadata["vlen"].apply( + lambda x: np.linspace( + start=0, stop=x, num=min(x, self.video_params["num_frames"]) + 1 + ).astype(int) + ) + self.metadata["fix_start"] = self.metadata["frame_intervals"].apply( + lambda x: np.arange(0, int(x[-1] / len(x - 1)), self.sliding_window_stride) + ) + self.metadata = self.metadata.explode("fix_start") + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, item): + item = item % len(self.metadata) + sample = self.metadata.iloc[item] + video_fp, rel_fp = self._get_video_path(sample) + # if not os.path.exists(video_fp): + # return self.__getitem__(np.random.choice(self.__len__())) + caption = self._get_caption(sample) + + video_loading = self.video_params.get("loading", "strict") # + # frame_sample = 'rand' + fix_start = None + # if self.split == 'test': + # frame_sample = 'uniform' + if self.sliding_window_stride != -1: + fix_start = sample["fix_start"] + + try: + if os.path.isfile(video_fp): + # imgs, idxs = self.video_reader(video_fp, self.video_params['num_frames'], frame_sample, + # fix_start=fix_start) + if self.frame_sample == "equally spaced": + sample_factor = self.video_params.get("es_interval", 10) + elif self.frame_sample == "proportional": + sample_factor = self.video_params.get("prop_factor", 3) + imgs, idxs = self.video_reader( + video_fp, + self.video_params["num_frames"], + self.frame_sample, + fix_start=fix_start, + sample_factor=sample_factor, + ) + if self.random_cond_img: + random_cond_img, _ = self.video_reader( + video_fp, + 1, + self.frame_sample, + fix_start=fix_start, + sample_factor=sample_factor, + ) + else: + print_str = f"Warning: missing video file {video_fp}." + if video_fp not in self.lack_files: + self.lack_files.append(video_fp) + if self.skip: + print_str += " Resampling another video." + print(print_str) + return self.__getitem__(np.random.choice(self.__len__())) + else: + print(print_str) + assert False + + except Exception as e: + if video_loading == "strict": + raise ValueError( + f"Video loading failed for {video_fp}, video loading for this dataset is strict." + ) from e + else: + print("Warning: using the pure black image as the frame sample") + # imgs = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0)) + imgs = Image.new( + "RGB", + ( + self.video_params["input_res_w"], + self.video_params["input_res_h"], + ), + (0, 0, 0), + ) + imgs = transforms.ToTensor()(imgs).unsqueeze(0) + if self.random_cond_img: + random_cond_img = Image.new( + "RGB", + ( + self.video_params["input_res_w"], + self.video_params["input_res_h"], + ), + (0, 0, 0), + ) + random_cond_img = transforms.ToTensor()(random_cond_img).unsqueeze(0) + + if self.transforms is not None: + imgs = self.transforms(imgs) # normalize or 2 * x - 1 ? + + # final = torch.zeros([self.video_params['num_frames'], 3, self.video_params['input_res'], + # self.video_params['input_res']]) + final = torch.zeros( + [ + self.video_params["num_frames"], + 3, + self.video_params["input_res_h"], + self.video_params["input_res_w"], + ] + ) + + final[: imgs.shape[0]] = imgs + if self.random_cond_img: + # import pdb; pdb.set_trace() + # import torchvision + # torchvision.utils.save_image(random_cond_img, 'debug_random_cond_img.png', normalize=True) + # torchvision.utils.save_image(imgs, 'debug_imgs.png', normalize=True) + cond_img = self.transforms(random_cond_img).squeeze(0) + else: + cond_img = final[final.shape[0] // 2, ...] + final = final.permute(1, 0, 2, 3) # (C, T, H, W) + interpolate_first_last = final[:, [0, -1], ...] + + meta_arr = { + "raw_captions": caption, + "paths": rel_fp, + "dataset": self.dataset_name, + } + data = { + self.first_stage_key: final, + self.cond_stage_key: caption, + "cond_img": cond_img, + 'interpolate_first_last': interpolate_first_last, + "original_size_as_tuple": torch.tensor( + [self.video_params["input_res_w"], self.video_params["input_res_h"]] + ), # TODO only for debug + "target_size_as_tuple": torch.tensor( + [self.video_params["input_res_w"], self.video_params["input_res_h"]] + ), # TODO only for debug + "crop_coords_top_left": torch.tensor([0, 0]), # TODO only for debug + "meta": meta_arr, + } + if self.use_control_hint: + data["control_hint"] = final + return data + + +class TextImageDataset(TextVideoDataset): + def __getitem__(self, item): + item = item % len(self.metadata) + sample = self.metadata.iloc[item] + video_fp, rel_fp = self._get_video_path(sample) + caption = self._get_caption(sample) + + video_loading = self.video_params.get("loading", "strict") + + try: + img = Image.open(video_fp).convert("RGB") + except: + if video_loading == "strict": + raise ValueError( + f"Image loading failed for {video_fp}, image loading for this dataset is strict." + ) + else: + # img = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0)) + img = Image.new( + "RGB", + ( + self.video_params["input_res_w"], + self.video_params["input_res_h"], + ), + (0, 0, 0), + ) + + # convert to tensor because video transforms don't, expand such that its a 1-frame video. + img = transforms.ToTensor()(img).unsqueeze(0) + if self.transforms is not None: + img = self.transforms(img) + meta_arr = { + "raw_captions": caption, + "paths": rel_fp, + "dataset": self.dataset_name, + } + data = {"video": img, "text": caption, "meta": meta_arr} + return data + + +def sample_frames( + num_frames, vlen, sample="rand", fix_start=None, **kwargs +): # TBD, what do you need + """ + num_frames: The number of frames to sample. + vlen: The length of the video. + sample: The sampling method. + choices of frame_sample: + - 'equally spaced': sample frames equally spaced + e.g.,1s video has 30 frames, when 'es_interval'=8, we sample frames with spacing of 8 + - 'proportional': sample frames proportional to the length of the frames in one second + e.g., 1s video has 30 frames, when 'prop_factor'=3, we sample frames with spacing of 30/3=10 + - 'random': sample frames randomly (not recommended) + - 'uniform': sample frames uniformly (not recommended) + fix_start: The starting frame index. If it is not None, then it will be used as the starting frame index. + """ + acc_samples = min(num_frames, vlen) + if sample in ["rand", "uniform"]: + intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int) + ranges = [] + for idx, interv in enumerate(intervals[:-1]): + ranges.append((interv, intervals[idx + 1] - 1)) + if sample == "rand": + frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges] + elif fix_start is not None: + frame_idxs = [x[0] + fix_start for x in ranges] + elif sample == "uniform": + frame_idxs = [(x[0] + x[1]) // 2 for x in ranges] + elif sample in ["equally spaced", "proportional"]: + if sample == "equally spaced": + raise NotImplementedError # need to pass in the corresponding parameters + else: + interval = round(kwargs["fps"] / kwargs["sample_factor"]) + needed_frames = (acc_samples - 1) * interval + + if fix_start is not None: + start = fix_start + else: + if vlen - needed_frames - 1 < 0: + start = 0 + else: + start = random.randint(0, vlen - needed_frames - 1) + frame_idxs = np.linspace( + start=start, stop=min(vlen - 1, start + needed_frames), num=acc_samples + ).astype(int) + else: + raise NotImplementedError + + return frame_idxs + + +def read_frames_cv2(video_path, num_frames, sample="rand", fix_start=None, **kwargs): + cap = cv2.VideoCapture(video_path) + assert cap.isOpened() + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + # get indexes of sampled frames + fps = cap.get(cv2.CAP_PROP_FPS) # not verified yet, might cause bug. + frame_idxs = sample_frames( + num_frames, + vlen, + sample=sample, + fix_start=fix_start, + fps=fps, + sample_factor=kwargs["sample_factor"], + ) + frames = [] + success_idxs = [] + for index in frame_idxs: + cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1) + ret, frame = cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = torch.from_numpy(frame) + # (H x W x C) to (C x H x W) + frame = frame.permute(2, 0, 1) + frames.append(frame) + success_idxs.append(index) + else: + pass + # print(frame_idxs, ' fail ', index, f' (vlen {vlen})') + + frames = torch.stack(frames).float() / 255 + cap.release() + return frames, success_idxs + + +def read_frames_av(video_path, num_frames, sample="rand", fix_start=None, **kwargs): + reader = av.open(video_path) + try: + frames = [] + frames = [ + torch.from_numpy(f.to_rgb().to_ndarray()) for f in reader.decode(video=0) + ] + except (RuntimeError, ZeroDivisionError) as exception: + print( + "{}: WEBM reader cannot open {}. Empty " + "list returned.".format(type(exception).__name__, video_path) + ) + vlen = len(frames) + # frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + fps = reader.streams.video[0].average_rate # not verified yet, might cause bug. + frame_idxs = sample_frames( + num_frames, + vlen, + sample=sample, + fix_start=fix_start, + fps=fps, + sample_factor=kwargs["sample_factor"], + ) + frames = torch.stack([frames[idx] for idx in frame_idxs]).float() / 255 + frames = frames.permute(0, 3, 1, 2) + return frames, frame_idxs + + +decord.bridge.set_bridge("torch") + + +def read_frames_decord(video_path, num_frames, sample="rand", fix_start=None, **kwargs): + video_reader = decord.VideoReader(video_path, num_threads=0) + vlen = len(video_reader) + # frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + fps = video_reader.get_avg_fps() # note that the fps here is float. + frame_idxs = sample_frames( + num_frames, + vlen, + sample=sample, + fix_start=fix_start, + fps=fps, + sample_factor=kwargs["sample_factor"], + ) + frames = video_reader.get_batch(frame_idxs) + frames = frames.float() / 255 + frames = frames.permute(0, 3, 1, 2) + return frames, frame_idxs + + +def get_video_len(video_path): + cap = cv2.VideoCapture(video_path) + if not (cap.isOpened()): + return False + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + cap.release() + return vlen + + +video_reader = { + "av": read_frames_av, + "cv2": read_frames_cv2, + "decord": read_frames_decord, +} diff --git a/CCEdit-main/sgm/data/webvid/webvid_dataset.py b/CCEdit-main/sgm/data/webvid/webvid_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6b35b7a2bfef83eeda0e8ed94470729865fd7f --- /dev/null +++ b/CCEdit-main/sgm/data/webvid/webvid_dataset.py @@ -0,0 +1,152 @@ +import os + +import pandas as pd + +from .base_video_dataset import TextVideoDataset + + +class WebVid(TextVideoDataset): + """ + WebVid Dataset. + Assumes webvid data is structured as follows. + Webvid/ + videos/ + 000001_000050/ ($page_dir) + 1.mp4 (videoid.mp4) + ... + 5000.mp4 + ... + """ + + def _load_metadata(self): + assert self.metadata_folder_name is not None + assert self.cut is not None + metadata_dir = os.path.join(self.metadata_dir, self.metadata_folder_name) + if self.key is None: + metadata_fp = os.path.join( + metadata_dir, f"results_{self.cut}_{self.split}.csv" + ) + else: + metadata_fp = os.path.join( + metadata_dir, f"results_{self.cut}_{self.split}_{self.key}.csv" + ) + print(metadata_fp) + metadata = pd.read_csv( + metadata_fp, + on_bad_lines="skip", + encoding="ISO-8859-1", + engine="python", + sep=",", + ) + + if self.subsample < 1: + metadata = metadata.sample(frac=self.subsample) + elif self.split == "val": + try: + metadata = metadata.sample(1000, random_state=0) + except: + print( + "there are less than 1000 samples in the val set, thus no downsampling is done" + ) + pass + + metadata["caption"] = metadata["name"] + del metadata["name"] + self.metadata = metadata + self.metadata.dropna(inplace=True) + + def _get_video_path(self, sample): + rel_video_fp = str(sample["videoid"]) + ".mp4" + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + if not os.path.exists(full_video_fp): + full_video_fp = os.path.join(self.data_dir, "videos", rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + return sample["caption"] + + +if __name__ == "__main__": + from tqdm import tqdm + import imageio + import argparse + import numpy as np + + parser = argparse.ArgumentParser() + parser.add_argument("--out_path", type=str, default=None) + parser.add_argument("--num_frames", type=int, default=17) + parser.add_argument("--motion_scale", type=int, default=4) + opt = parser.parse_known_args()[0] + + def write_text_to_file(text, file_path): + with open(file_path, "w") as file: + file.write(text) + + config = { + "dataset_name": "WebVid", + "data_dir": "/msra_data/videos_rmwm", + "metadata_dir": "/msra_data", + "split": "val", + "cut": "2M", + "key": "wmrm_all", + "subsample": 1, + "text_params": {"input": "text"}, + "video_params": { + "input_res_h": 320, # todo: check the input_res_h + "input_res_w": 320, # todo: check the input_res_w + "tsfm_params": { + "norm_mean": [0.5, 0.5, 0.5], + "norm_std": [0.5, 0.5, 0.5], + }, + "num_frames": opt.num_frames, + "prop_factor": 30, + "loading": "lax", + }, + "metadata_folder_name": "webvid10m_meta", + "first_stage_key": "jpg", + "cond_stage_key": "txt", + "skip_missing_files": False, + } + + dataset = WebVid(**config) + length = dataset.__len__() + + txt_out_path = os.path.join( + opt.out_path, f"num{opt.num_frames}_ms{opt.motion_scale}", "txt" + ) + video_out_high_path = os.path.join( + opt.out_path, f"num{opt.num_frames}_ms{opt.motion_scale}", "videoHigh" + ) + video_out_low_path = os.path.join( + opt.out_path, f"num{opt.num_frames}_ms{opt.motion_scale}", "videoLow" + ) + os.makedirs(txt_out_path, exist_ok=True) + os.makedirs(video_out_high_path, exist_ok=True) + os.makedirs(video_out_low_path, exist_ok=True) + + for idx in tqdm(range(length)): + print(idx) + item = dataset.__getitem__(idx) + video = item["jpg"] + txt = item["txt"] + + video_new = ( + ((video.transpose(3, 1) * 0.5 + 0.5).clamp(0, 1) * 255.0) + .numpy() + .astype(np.uint8) + ) + video_list = [img for img in video_new] + imageio.mimsave( + os.path.join(video_out_high_path, f"{idx:09d}.gif"), + video_list, + duration=1, + loops=1, + ) + imageio.mimsave( + os.path.join(video_out_low_path, f"{idx:09d}.gif"), + video_list[:: opt.motion_scale], + duration=1 * opt.motion_scale, + loops=1, + ) + + write_text_to_file(txt, os.path.join(txt_out_path, f"{idx:09d}.txt")) diff --git a/CCEdit-main/sgm/lr_scheduler.py b/CCEdit-main/sgm/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..b2f4d384c1fcaff0df13e0564450d3fa972ace42 --- /dev/null +++ b/CCEdit-main/sgm/lr_scheduler.py @@ -0,0 +1,135 @@ +import numpy as np + + +class LambdaWarmUpCosineScheduler: + """ + note: use with a base_lr of 1.0 + """ + + def __init__( + self, + warm_up_steps, + lr_min, + lr_max, + lr_start, + max_decay_steps, + verbosity_interval=0, + ): + self.lr_warm_up_steps = warm_up_steps + self.lr_start = lr_start + self.lr_min = lr_min + self.lr_max = lr_max + self.lr_max_decay_steps = max_decay_steps + self.last_lr = 0.0 + self.verbosity_interval = verbosity_interval + + def schedule(self, n, **kwargs): + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: + print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") + if n < self.lr_warm_up_steps: + lr = ( + self.lr_max - self.lr_start + ) / self.lr_warm_up_steps * n + self.lr_start + self.last_lr = lr + return lr + else: + t = (n - self.lr_warm_up_steps) / ( + self.lr_max_decay_steps - self.lr_warm_up_steps + ) + t = min(t, 1.0) + lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( + 1 + np.cos(t * np.pi) + ) + self.last_lr = lr + return lr + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaWarmUpCosineScheduler2: + """ + supports repeated iterations, configurable via lists + note: use with a base_lr of 1.0. + """ + + def __init__( + self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0 + ): + assert ( + len(warm_up_steps) + == len(f_min) + == len(f_max) + == len(f_start) + == len(cycle_lengths) + ) + self.lr_warm_up_steps = warm_up_steps + self.f_start = f_start + self.f_min = f_min + self.f_max = f_max + self.cycle_lengths = cycle_lengths + self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) + self.last_f = 0.0 + self.verbosity_interval = verbosity_interval + + def find_in_interval(self, n): + interval = 0 + for cl in self.cum_cycles[1:]: + if n <= cl: + return interval + interval += 1 + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: + print( + f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}" + ) + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[ + cycle + ] * n + self.f_start[cycle] + self.last_f = f + return f + else: + t = (n - self.lr_warm_up_steps[cycle]) / ( + self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle] + ) + t = min(t, 1.0) + f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( + 1 + np.cos(t * np.pi) + ) + self.last_f = f + return f + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: + print( + f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}" + ) + + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[ + cycle + ] * n + self.f_start[cycle] + self.last_f = f + return f + else: + f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * ( + self.cycle_lengths[cycle] - n + ) / (self.cycle_lengths[cycle]) + self.last_f = f + return f diff --git a/CCEdit-main/sgm/models/__init__.py b/CCEdit-main/sgm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c410b3747afc208e4204c8f140170e0a7808eace --- /dev/null +++ b/CCEdit-main/sgm/models/__init__.py @@ -0,0 +1,2 @@ +from .autoencoder import AutoencodingEngine +from .diffusion import DiffusionEngine diff --git a/CCEdit-main/sgm/models/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fd19549e277142334c352b8eb9cbc11aaf3e83e Binary files /dev/null and b/CCEdit-main/sgm/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/models/__pycache__/autoencoder.cpython-39.pyc b/CCEdit-main/sgm/models/__pycache__/autoencoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bbd47b0447a8dd57531bc642113800a781e6174 Binary files /dev/null and b/CCEdit-main/sgm/models/__pycache__/autoencoder.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/models/__pycache__/diffusion.cpython-39.pyc b/CCEdit-main/sgm/models/__pycache__/diffusion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..290a2ac3389f6fb4a161141aad58c1a48c0b637e Binary files /dev/null and b/CCEdit-main/sgm/models/__pycache__/diffusion.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/models/autoencoder.py b/CCEdit-main/sgm/models/autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..357174993a77e8bea1a1e42cde43b6dbf794bf22 --- /dev/null +++ b/CCEdit-main/sgm/models/autoencoder.py @@ -0,0 +1,357 @@ +import re +from abc import abstractmethod +from contextlib import contextmanager +from typing import Any, Dict, Tuple, Union +import einops + +import pytorch_lightning as pl +import torch +from omegaconf import ListConfig +from packaging import version +from safetensors.torch import load_file as load_safetensors + +from ..modules.diffusionmodules.model import Decoder, Encoder +from ..modules.distributions.distributions import DiagonalGaussianDistribution +from ..modules.ema import LitEma +from ..util import default, get_obj_from_str, instantiate_from_config + + +class AbstractAutoencoder(pl.LightningModule): + """ + This is the base class for all autoencoders, including image autoencoders, image autoencoders with discriminators, + unCLIP models, etc. Hence, it is fairly general, and specific features + (e.g. discriminator training, encoding, decoding) must be implemented in subclasses. + """ + + def __init__( + self, + ema_decay: Union[None, float] = None, + monitor: Union[None, str] = None, + input_key: str = "jpg", + ckpt_path: Union[None, str] = None, + ignore_keys: Union[Tuple, list, ListConfig] = (), + ): + super().__init__() + self.input_key = input_key + self.use_ema = ema_decay is not None + if monitor is not None: + self.monitor = monitor + + if self.use_ema: + self.model_ema = LitEma(self, decay=ema_decay) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + if version.parse(torch.__version__) >= version.parse("2.0.0"): + self.automatic_optimization = False + + def init_from_ckpt( + self, path: str, ignore_keys: Union[Tuple, list, ListConfig] = tuple() + ) -> None: + if path.endswith("ckpt"): + sd = torch.load(path, map_location="cpu")["state_dict"] + elif path.endswith("safetensors"): + sd = load_safetensors(path) + else: + raise NotImplementedError + + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if re.match(ik, k): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + @abstractmethod + def get_input(self, batch) -> Any: + raise NotImplementedError() + + def on_train_batch_end(self, *args, **kwargs): + # for EMA computation + if self.use_ema: + self.model_ema(self) + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + @abstractmethod + def encode(self, *args, **kwargs) -> torch.Tensor: + raise NotImplementedError("encode()-method of abstract base class called") + + @abstractmethod + def decode(self, *args, **kwargs) -> torch.Tensor: + raise NotImplementedError("decode()-method of abstract base class called") + + def instantiate_optimizer_from_config(self, params, lr, cfg): + print(f"loading >>> {cfg['target']} <<< optimizer from config") + return get_obj_from_str(cfg["target"])( + params, lr=lr, **cfg.get("params", dict()) + ) + + def configure_optimizers(self) -> Any: + raise NotImplementedError() + + +class AutoencodingEngine(AbstractAutoencoder): + """ + Base class for all image autoencoders that we train, like VQGAN or AutoencoderKL + (we also restore them explicitly as special cases for legacy reasons). + Regularizations such as KL or VQ are moved to the regularizer class. + """ + + def __init__( + self, + *args, + encoder_config: Dict, + decoder_config: Dict, + loss_config: Dict, + regularizer_config: Dict, + optimizer_config: Union[Dict, None] = None, + lr_g_factor: float = 1.0, + **kwargs, + ): + super().__init__(*args, **kwargs) + # todo: add options to freeze encoder/decoder + self.encoder = instantiate_from_config(encoder_config) + self.decoder = instantiate_from_config(decoder_config) + self.loss = instantiate_from_config(loss_config) + self.regularization = instantiate_from_config(regularizer_config) + self.optimizer_config = default( + optimizer_config, {"target": "torch.optim.Adam"} + ) + self.lr_g_factor = lr_g_factor + + def get_input(self, batch: Dict) -> torch.Tensor: + # assuming unified data format, dataloader returns a dict. + # image tensors should be scaled to -1 ... 1 and in channels-first format (e.g., bchw instead if bhwc) + return batch[self.input_key] + + def get_autoencoder_params(self) -> list: + params = ( + list(self.encoder.parameters()) + + list(self.decoder.parameters()) + + list(self.regularization.get_trainable_parameters()) + + list(self.loss.get_trainable_autoencoder_parameters()) + ) + return params + + def get_discriminator_params(self) -> list: + params = list(self.loss.get_trainable_parameters()) # e.g., discriminator + return params + + def get_last_layer(self): + return self.decoder.get_last_layer() + + def encode(self, x: Any, return_reg_log: bool = False) -> Any: + z = self.encoder(x) + z, reg_log = self.regularization(z) + if return_reg_log: + return z, reg_log + return z + + def decode(self, z: Any) -> torch.Tensor: + x = self.decoder(z) + return x + + def forward(self, x: Any) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + z, reg_log = self.encode(x, return_reg_log=True) + dec = self.decode(z) + return z, dec, reg_log + + def training_step(self, batch, batch_idx, optimizer_idx) -> Any: + x = self.get_input(batch) + z, xrec, regularization_log = self(x) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss( + regularization_log, + x, + xrec, + optimizer_idx, + self.global_step, + last_layer=self.get_last_layer(), + split="train", + ) + + self.log_dict( + log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True + ) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss( + regularization_log, + x, + xrec, + optimizer_idx, + self.global_step, + last_layer=self.get_last_layer(), + split="train", + ) + self.log_dict( + log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True + ) + return discloss + + def validation_step(self, batch, batch_idx) -> Dict: + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema") + log_dict.update(log_dict_ema) + return log_dict + + def _validation_step(self, batch, batch_idx, postfix="") -> Dict: + x = self.get_input(batch) + + z, xrec, regularization_log = self(x) + aeloss, log_dict_ae = self.loss( + regularization_log, + x, + xrec, + 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val" + postfix, + ) + + discloss, log_dict_disc = self.loss( + regularization_log, + x, + xrec, + 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val" + postfix, + ) + self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"]) + log_dict_ae.update(log_dict_disc) + self.log_dict(log_dict_ae) + return log_dict_ae + + def configure_optimizers(self) -> Any: + ae_params = self.get_autoencoder_params() + disc_params = self.get_discriminator_params() + + opt_ae = self.instantiate_optimizer_from_config( + ae_params, + default(self.lr_g_factor, 1.0) * self.learning_rate, + self.optimizer_config, + ) + opt_disc = self.instantiate_optimizer_from_config( + disc_params, self.learning_rate, self.optimizer_config + ) + + return [opt_ae, opt_disc], [] + + @torch.no_grad() + def log_images(self, batch: Dict, **kwargs) -> Dict: + log = dict() + x = self.get_input(batch) + _, xrec, _ = self(x) + log["inputs"] = x + log["reconstructions"] = xrec + with self.ema_scope(): + _, xrec_ema, _ = self(x) + log["reconstructions_ema"] = xrec_ema + return log + + +class AutoencoderKL(AutoencodingEngine): + def __init__(self, embed_dim: int, **kwargs): + ddconfig = kwargs.pop("ddconfig") + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", ()) + super().__init__( + encoder_config={"target": "torch.nn.Identity"}, + decoder_config={"target": "torch.nn.Identity"}, + regularizer_config={"target": "torch.nn.Identity"}, + loss_config=kwargs.pop("lossconfig"), + **kwargs, + ) + assert ddconfig["double_z"] + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def encode(self, x): + assert ( + not self.training + ), f"{self.__class__.__name__} only supports inference currently" + assert x.dim() == 4 + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + + return posterior + + def decode(self, z, **decoder_kwargs): + z = self.post_quant_conv(z) + dec = self.decoder(z, **decoder_kwargs) + return dec + + +class AutoencoderKLInferenceWrapper(AutoencoderKL): + def encode(self, x): + # return super().encode(x).sample() + is_video = x.dim() == 5 + if is_video: + b, c, t, h, w = x.shape + x = einops.rearrange(x, "b c t h w -> (b t) c h w") + x = super().encode(x).sample() + if is_video: + x = einops.rearrange(x, "(b t) c h w -> b c t h w", b=b, t=t) + return x + + def decode(self, z, **decoder_kwargs): + is_video = z.dim() == 5 + if is_video: + b, c, t, h, w = z.shape + z = einops.rearrange(z, "b c t h w -> (b t) c h w") + z = self.post_quant_conv(z.to(self.post_quant_conv.weight.dtype)) + dec = self.decoder(z, **decoder_kwargs) + if is_video: + dec = einops.rearrange(dec, "(b t) c h w -> b c t h w", b=b, t=t) + return dec + + +class IdentityFirstStage(AbstractAutoencoder): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def get_input(self, x: Any) -> Any: + return x + + def encode(self, x: Any, *args, **kwargs) -> Any: + return x + + def decode(self, x: Any, *args, **kwargs) -> Any: + return x diff --git a/CCEdit-main/sgm/models/diffusion-ori.py b/CCEdit-main/sgm/models/diffusion-ori.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f1397573212c7f9620042d01ea37dd9f2bb16d --- /dev/null +++ b/CCEdit-main/sgm/models/diffusion-ori.py @@ -0,0 +1,320 @@ +from contextlib import contextmanager +from typing import Any, Dict, List, Tuple, Union + +import pytorch_lightning as pl +import torch +from omegaconf import ListConfig, OmegaConf +from safetensors.torch import load_file as load_safetensors +from torch.optim.lr_scheduler import LambdaLR + +from ..modules import UNCONDITIONAL_CONFIG +from ..modules.diffusionmodules.wrappers import OPENAIUNETWRAPPER +from ..modules.ema import LitEma +from ..util import ( + default, + disabled_train, + get_obj_from_str, + instantiate_from_config, + log_txt_as_img, +) + + +class DiffusionEngine(pl.LightningModule): + def __init__( + self, + network_config, + denoiser_config, + first_stage_config, + conditioner_config: Union[None, Dict, ListConfig, OmegaConf] = None, + sampler_config: Union[None, Dict, ListConfig, OmegaConf] = None, + optimizer_config: Union[None, Dict, ListConfig, OmegaConf] = None, + scheduler_config: Union[None, Dict, ListConfig, OmegaConf] = None, + loss_fn_config: Union[None, Dict, ListConfig, OmegaConf] = None, + network_wrapper: Union[None, str] = None, + ckpt_path: Union[None, str] = None, + use_ema: bool = False, + ema_decay_rate: float = 0.9999, + scale_factor: float = 1.0, + disable_first_stage_autocast=False, + input_key: str = "jpg", + log_keys: Union[List, None] = None, + no_cond_log: bool = False, + compile_model: bool = False, + ): + super().__init__() + self.log_keys = log_keys + self.input_key = input_key + self.optimizer_config = default( + optimizer_config, {"target": "torch.optim.AdamW"} + ) + model = instantiate_from_config(network_config) + self.model = get_obj_from_str(default(network_wrapper, OPENAIUNETWRAPPER))( + model, compile_model=compile_model + ) + + self.denoiser = instantiate_from_config(denoiser_config) + self.sampler = ( + instantiate_from_config(sampler_config) + if sampler_config is not None + else None + ) + self.conditioner = instantiate_from_config( + default(conditioner_config, UNCONDITIONAL_CONFIG) + ) + self.scheduler_config = scheduler_config + self._init_first_stage(first_stage_config) + + self.loss_fn = ( + instantiate_from_config(loss_fn_config) + if loss_fn_config is not None + else None + ) + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model, decay=ema_decay_rate) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.scale_factor = scale_factor + self.disable_first_stage_autocast = disable_first_stage_autocast + self.no_cond_log = no_cond_log + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path) + + def init_from_ckpt( + self, + path: str, + ) -> None: + if path.endswith("ckpt"): + sd = torch.load(path, map_location="cpu")["state_dict"] + elif path.endswith("safetensors"): + sd = load_safetensors(path) + else: + raise NotImplementedError + + missing, unexpected = self.load_state_dict(sd, strict=False) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def _init_first_stage(self, config): + model = instantiate_from_config(config).eval() + model.train = disabled_train + for param in model.parameters(): + param.requires_grad = False + self.first_stage_model = model + + def get_input(self, batch): + # assuming unified data format, dataloader returns a dict. + # image tensors should be scaled to -1 ... 1 and in bchw format + return batch[self.input_key] + + @torch.no_grad() + def decode_first_stage(self, z): + z = 1.0 / self.scale_factor * z + with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast): + out = self.first_stage_model.decode(z) + return out + + @torch.no_grad() + def encode_first_stage(self, x): + with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast): + z = self.first_stage_model.encode(x) + z = self.scale_factor * z + return z + + def forward(self, x, batch): + loss = self.loss_fn(self.model, self.denoiser, self.conditioner, x, batch) + loss_mean = loss.mean() + loss_dict = {"loss": loss_mean} + return loss_mean, loss_dict + + def shared_step(self, batch: Dict) -> Any: + x = self.get_input(batch) + x = self.encode_first_stage(x) + batch["global_step"] = self.global_step + loss, loss_dict = self(x, batch) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict( + loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=False + ) + + self.log( + "global_step", + self.global_step, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=False, + ) + + if self.scheduler_config is not None: + lr = self.optimizers().param_groups[0]["lr"] + self.log( + "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False + ) + + return loss + + def on_train_start(self, *args, **kwargs): + if self.sampler is None or self.loss_fn is None: + raise ValueError("Sampler and loss function need to be set for training.") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def instantiate_optimizer_from_config(self, params, lr, cfg): + return get_obj_from_str(cfg["target"])( + params, lr=lr, **cfg.get("params", dict()) + ) + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + for embedder in self.conditioner.embedders: + if embedder.is_trainable: + params = params + list(embedder.parameters()) + opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config) + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, + } + ] + return [opt], scheduler + return opt + + @torch.no_grad() + def sample( + self, + cond: Dict, + uc: Union[Dict, None] = None, + batch_size: int = 16, + shape: Union[None, Tuple, List] = None, + **kwargs, + ): + randn = torch.randn(batch_size, *shape).to(self.device) + + denoiser = lambda input, sigma, c: self.denoiser( + self.model, input, sigma, c, **kwargs + ) + samples = self.sampler(denoiser, randn, cond, uc=uc) + return samples + + @torch.no_grad() + def log_conditionings(self, batch: Dict, n: int) -> Dict: + """ + Defines heuristics to log different conditionings. + These can be lists of strings (text-to-image), tensors, ints, ... + """ + image_h, image_w = batch[self.input_key].shape[2:] + log = dict() + + for embedder in self.conditioner.embedders: + if ( + (self.log_keys is None) or (embedder.input_key in self.log_keys) + ) and not self.no_cond_log: + x = batch[embedder.input_key][:n] + if isinstance(x, torch.Tensor): + if x.dim() == 1: + # class-conditional, convert integer to string + x = [str(x[i].item()) for i in range(x.shape[0])] + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4) + elif x.dim() == 2: + # size and crop cond and the like + x = [ + "x".join([str(xx) for xx in x[i].tolist()]) + for i in range(x.shape[0]) + ] + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20) + else: + raise NotImplementedError() + elif isinstance(x, (List, ListConfig)): + if isinstance(x[0], str): + # strings + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20) + else: + raise NotImplementedError() + else: + raise NotImplementedError() + log[embedder.input_key] = xc + return log + + @torch.no_grad() + def log_images( + self, + batch: Dict, + N: int = 8, + sample: bool = True, + ucg_keys: List[str] = None, + **kwargs, + ) -> Dict: + conditioner_input_keys = [e.input_key for e in self.conditioner.embedders] + if ucg_keys: + assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), ( + "Each defined ucg key for sampling must be in the provided conditioner input keys," + f"but we have {ucg_keys} vs. {conditioner_input_keys}" + ) + else: + ucg_keys = conditioner_input_keys + log = dict() + + x = self.get_input(batch) + + c, uc = self.conditioner.get_unconditional_conditioning( + batch, + force_uc_zero_embeddings=ucg_keys + if len(self.conditioner.embedders) > 0 + else [], + ) + + sampling_kwargs = {} + + N = min(x.shape[0], N) + x = x.to(self.device)[:N] + log["inputs"] = x + z = self.encode_first_stage(x) + log["reconstructions"] = self.decode_first_stage(z) + log.update(self.log_conditionings(batch, N)) + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc)) + + if sample: + with self.ema_scope("Plotting"): + samples = self.sample( + c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs + ) + samples = self.decode_first_stage(samples) + log["samples"] = samples + return log diff --git a/CCEdit-main/sgm/models/diffusion.py b/CCEdit-main/sgm/models/diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..f5bf3617d0fd8d70c7cea3c64eba20a30f3b0402 --- /dev/null +++ b/CCEdit-main/sgm/models/diffusion.py @@ -0,0 +1,910 @@ +from contextlib import contextmanager +from typing import Any, Dict, List, Tuple, Union + +import einops +import pytorch_lightning as pl +import torch +from omegaconf import ListConfig, OmegaConf +from safetensors.torch import load_file as load_safetensors +from torch.optim.lr_scheduler import LambdaLR + +from sgm.modules.encoders.modules import VAEEmbedder +from sgm.modules.encoders.modules import ( + LineartEncoder, + DepthZoeEncoder, + DepthMidasEncoder, + SoftEdgeEncoder, + NormalBaeEncoder, + ScribbleHEDEncoder, + ScribblePidiNetEncoder, + OpenposeEncoder, + OutpaintingEncoder, + InpaintingEncoder, +) + +import os +import numpy as np +import torch.nn as nn + +from ..modules import UNCONDITIONAL_CONFIG +from ..modules.diffusionmodules.wrappers import ( + OPENAIUNETWRAPPER, + OPENAIUNETWRAPPERRAIG, + OPENAIUNETWRAPPERCONTROLLDM3D, + OPENAIUNETWRAPPERCONTROLLDM3DSSN, + OPENAIUNETWRAPPERCONTROLLDM3DTV2V, + OPENAIUNETWRAPPERCONTROLLDM3DTV2V_INTERPOLATE, +) +from ..modules.ema import LitEma +from ..util import ( + default, + disabled_train, + get_obj_from_str, + instantiate_from_config, + log_txt_as_img, +) + +class DiffusionEngine(pl.LightningModule): + def __init__( + self, + network_config, + denoiser_config, + first_stage_config, + conditioner_config: Union[None, Dict, ListConfig, OmegaConf] = None, + sampler_config: Union[None, Dict, ListConfig, OmegaConf] = None, + optimizer_config: Union[None, Dict, ListConfig, OmegaConf] = None, + scheduler_config: Union[None, Dict, ListConfig, OmegaConf] = None, + loss_fn_config: Union[None, Dict, ListConfig, OmegaConf] = None, + network_wrapper: Union[None, str] = None, + ckpt_path: Union[None, str] = None, + use_ema: bool = False, + ema_decay_rate: float = 0.9999, + scale_factor: float = 1.0, + disable_first_stage_autocast=False, + input_key: str = "jpg", + log_keys: Union[List, None] = None, + no_cond_log: bool = False, + compile_model: bool = False, + ): + super().__init__() + self.log_keys = log_keys + self.input_key = input_key + self.optimizer_config = default( + optimizer_config, {"target": "torch.optim.AdamW"} + ) + model = instantiate_from_config(network_config) + wrapper_type = ( + self.wrapper_type if hasattr(self, "wrapper_type") else OPENAIUNETWRAPPER + ) + self.model = get_obj_from_str(default(network_wrapper, wrapper_type))( + model, compile_model=compile_model + ) + + self.denoiser = instantiate_from_config(denoiser_config) + self.sampler = ( + instantiate_from_config(sampler_config) + if sampler_config is not None + else None + ) + self.conditioner = instantiate_from_config( + default(conditioner_config, UNCONDITIONAL_CONFIG) + ) + self.scheduler_config = scheduler_config + self._init_first_stage(first_stage_config) + + self.loss_fn = ( + instantiate_from_config(loss_fn_config) + if loss_fn_config is not None + else None + ) + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model, decay=ema_decay_rate) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.scale_factor = scale_factor + self.disable_first_stage_autocast = disable_first_stage_autocast + self.no_cond_log = no_cond_log + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path) + + def init_from_ckpt( + self, + path: str, + ) -> None: + print(f"Loading checkpoint from {path} ... ") + if path.endswith("ckpt"): + # sd = torch.load(path, map_location="cpu")["state_dict"] + if "deepspeed" in path: + sd = torch.load(path, map_location="cpu") + sd = {k.replace("_forward_module.", ""): v for k, v in sd.items()} + else: + sd = torch.load(path, map_location="cpu")["state_dict"] + elif path.endswith("safetensors"): + sd = load_safetensors(path) + else: + raise NotImplementedError + + missing, unexpected = self.load_state_dict(sd, strict=False) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def _init_first_stage(self, config): + model = instantiate_from_config(config).eval() + model.train = disabled_train + for param in model.parameters(): + param.requires_grad = False + self.first_stage_model = model + + def get_input(self, batch): + # assuming unified data format, dataloader returns a dict. + # image tensors should be scaled to -1 ... 1 and in bchw format + return batch[self.input_key] + + @torch.no_grad() + def decode_first_stage(self, z): + z = 1.0 / self.scale_factor * z + with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast): + out = self.first_stage_model.decode(z) + return out + + @torch.no_grad() + def encode_first_stage(self, x): + with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast): + z = self.first_stage_model.encode(x) + z = self.scale_factor * z + return z + + def forward(self, x, batch): + loss = self.loss_fn(self.model, self.denoiser, self.conditioner, x, batch) + loss_mean = loss.mean() + loss_dict = {"loss": loss_mean} + return loss_mean, loss_dict + + def shared_step(self, batch: Dict) -> Any: + x = self.get_input(batch) + x = self.encode_first_stage(x) + batch["global_step"] = self.global_step + loss, loss_dict = self(x, batch) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict( + loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=False + ) + + self.log( + "global_step", + self.global_step, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=False, + ) + + if self.scheduler_config is not None: + lr = self.optimizers().param_groups[0]["lr"] + self.log( + "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False + ) + + return loss + + def on_train_start(self, *args, **kwargs): + if self.sampler is None or self.loss_fn is None: + raise ValueError("Sampler and loss function need to be set for training.") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def instantiate_optimizer_from_config(self, params, lr, cfg): + return get_obj_from_str(cfg["target"])( + params, lr=lr, **cfg.get("params", dict()) + ) + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + for embedder in self.conditioner.embedders: + if embedder.is_trainable: + params = params + list(embedder.parameters()) + opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config) + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, + } + ] + return [opt], scheduler + return opt + + @torch.no_grad() + def sample( + self, + cond: Dict, + uc: Union[Dict, None] = None, + batch_size: int = 16, + shape: Union[None, Tuple, List] = None, + **kwargs, + ): + randn = torch.randn(batch_size, *shape).to(self.device) + + denoiser = lambda input, sigma, c: self.denoiser( + self.model, input, sigma, c, **kwargs + ) + samples = self.sampler(denoiser, randn, cond, uc=uc) + return samples + + @torch.no_grad() + def log_conditionings(self, batch: Dict, n: int) -> Dict: + """ + Defines heuristics to log different conditionings. + These can be lists of strings (text-to-image), tensors, ints, ... + """ + image_h, image_w = batch[self.input_key].shape[2:] + log = dict() + + for embedder in self.conditioner.embedders: + if ( + (self.log_keys is None) or (embedder.input_key in self.log_keys) + ) and not self.no_cond_log: + x = batch[embedder.input_key][:n] + if isinstance(x, torch.Tensor): + if x.dim() == 1: + # class-conditional, convert integer to string + x = [str(x[i].item()) for i in range(x.shape[0])] + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4) + elif x.dim() == 2: + # size and crop cond and the like + x = [ + "x".join([str(xx) for xx in x[i].tolist()]) + for i in range(x.shape[0]) + ] + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20) + else: + raise NotImplementedError() + elif isinstance(x, (List, ListConfig)): + if isinstance(x[0], str): + # strings + # xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20) + # xc = log_txt_as_img((image_w * 2, image_h), x, size=image_h // 15) + xc = log_txt_as_img( + (image_w * 2, image_h), x, size=image_h // 25 + ) + else: + raise NotImplementedError() + else: + raise NotImplementedError() + log[embedder.input_key] = xc + return log + + @torch.no_grad() + def log_images( + self, + batch: Dict, + N: int = 8, + sample: bool = True, + ucg_keys: List[str] = None, + **kwargs, + ) -> Dict: + conditioner_input_keys = [e.input_key for e in self.conditioner.embedders] + if ucg_keys: + assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), ( + "Each defined ucg key for sampling must be in the provided conditioner input keys," + f"but we have {ucg_keys} vs. {conditioner_input_keys}" + ) + else: + ucg_keys = conditioner_input_keys + log = dict() + + x = self.get_input(batch) + + c, uc = self.conditioner.get_unconditional_conditioning( + batch, + force_uc_zero_embeddings=ucg_keys + if len(self.conditioner.embedders) > 0 + else [], + ) + + sampling_kwargs = {} + + N = min(x.shape[0], N) + x = x.to(self.device)[:N] + log["inputs"] = x + z = self.encode_first_stage(x) + log["reconstructions"] = self.decode_first_stage(z) + log.update(self.log_conditionings(batch, N)) + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc)) + + if sample: + with self.ema_scope("Plotting"): + samples = self.sample( + c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs + ) + samples = self.decode_first_stage(samples) + log["samples"] = samples + return log + + +class VideoDiffusionEngine(DiffusionEngine): + def __init__( + self, + freeze_model="none", + wrapper_type="OPENAIUNETWRAPPERCONTROLLDM3D", + *args, + **kwargs, + ): + self.wrapper_type = eval(wrapper_type) + super().__init__(*args, **kwargs) + self.freeze_model = freeze_model + + self.setup_vaeembedder() + + def setup_vaeembedder(self): + for embedder in self.conditioner.embedders: + if isinstance(embedder, VAEEmbedder): + embedder.first_stage_model = ( + self.first_stage_model + ) # TODO: should we add .clone() + embedder.disable_first_stage_autocast = ( + self.disable_first_stage_autocast + ) + embedder.scale_factor = self.scale_factor + embedder.freeze() + + def get_input(self, batch): + # assuming unified data format, dataloader returns a dict. + # video tensors should be scaled to -1 ... 1 and in bcthw format + out_data = batch[self.input_key] + return out_data + + def shared_step(self, batch: Dict) -> Any: + x = self.get_input(batch) + x = self.encode_first_stage(x) + batch["global_step"] = self.global_step + loss, loss_dict = self(x, batch) + return loss, loss_dict + + @torch.no_grad() + def log_conditionings(self, batch: Dict, n: int) -> Dict: + """ + Defines heuristics to log different conditionings. + These can be lists of strings (text-to-image), tensors, ints, ... + """ + image_h, image_w = batch[self.input_key].shape[-2:] + log = dict() + + for embedder in self.conditioner.embedders: + if ( + (self.log_keys is None) or (embedder.input_key in self.log_keys) + ) and not self.no_cond_log: + x = batch[embedder.input_key][:n] + if isinstance(x, torch.Tensor): + if x.dim() == 1: + # class-conditional, convert integer to string + x = [str(x[i].item()) for i in range(x.shape[0])] + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4) + elif x.dim() == 2: + # size and crop cond and the like + x = [ + "x".join([str(xx) for xx in x[i].tolist()]) + for i in range(x.shape[0]) + ] + xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20) + else: + raise NotImplementedError() + elif isinstance(x, (List, ListConfig)): + if isinstance(x[0], str): + # strings + # xc = log_txt_as_img((image_w, image_h), x, size=image_h // 20) + xc = log_txt_as_img( + (image_w, image_h), x, size=image_h // 10, split_loc=15 + ) + # xc = log_txt_as_img((image_w * 2, image_h), x, size=image_h // 15, split_loc=20) + # xc = log_txt_as_img( + # (image_w * 3, image_h), x, size=image_h // 5, split_loc=15 + # ) + else: + raise NotImplementedError() + else: + raise NotImplementedError() + log[embedder.input_key] = xc + return log + + @torch.no_grad() + def log_images( + self, + batch: Dict, + N: int = 8, + sample: bool = True, + ucg_keys: List[str] = None, + **kwargs, + ) -> Dict: + # TODO: refactor this + conditioner_input_keys = [e.input_key for e in self.conditioner.embedders] + if ucg_keys: + assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), ( + "Each defined ucg key for sampling must be in the provided conditioner input keys," + f"but we have {ucg_keys} vs. {conditioner_input_keys}" + ) + else: + ucg_keys = conditioner_input_keys + log = dict() + + x = self.get_input(batch) + + c, uc = self.conditioner.get_unconditional_conditioning( + batch, + force_uc_zero_embeddings=ucg_keys + if len(self.conditioner.embedders) > 0 + else [], + ) + + sampling_kwargs = { + key: batch[key] for key in self.loss_fn.batch2model_keys.intersection(batch) + } + + N = min(x.shape[0], N) + x = x.to(self.device)[:N] + log["inputs"] = x + log["inputs-video"] = x + log["cond_img"] = batch["cond_img"] + z = self.encode_first_stage(x) + log["reconstructions"] = self.decode_first_stage(z) + log["reconstructions-video"] = self.decode_first_stage(z) + log.update(self.log_conditionings(batch, N)) + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc)) + + if sample: + with self.ema_scope("Plotting"): + samples = self.sample( + c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs + ) + samples = self.decode_first_stage(samples) + log["samples"] = samples + log["samples-video"] = samples + + # concat the inputs and outputs for visualization + log["inputs_samples"] = torch.cat([log["inputs"], log["samples"]], dim=3) + del log["inputs"] + del log["samples"] + # log['inputs_samples-video'] = torch.cat([log['inputs-video'], log['samples-video']], dim=3) + # del log['inputs-video'] + # del log['samples-video'] + return log + + def configure_optimizers(self): + lr = self.learning_rate + + if self.freeze_model == "none": + params = list(self.model.diffusion_model.parameters()) + for name, param in self.model.diffusion_model.named_parameters(): + print(f"Setting {name} to trainable") + param.requires_grad = True # TODO: why this? + elif self.freeze_model == "spatial": + params = [] + if hasattr(self.model.diffusion_model, "controlnet"): + params += list(self.model.diffusion_model.controlnet.parameters()) + for name, param in self.model.diffusion_model.named_parameters(): + if "controlnet" not in name: + if "temporal" in name: + params.append(param) + else: + param.requires_grad = False + elif self.freeze_model == "spatial_openlora": + params = [] + if hasattr(self.model.diffusion_model, "controlnet"): + params += list(self.model.diffusion_model.controlnet.parameters()) + for name, param in self.model.diffusion_model.named_parameters(): + if "controlnet" not in name: + if "temporal" in name or "lora" in name: + params.append(param) + else: + param.requires_grad = False + else: + raise NotImplementedError + + for embedder in self.conditioner.embedders: + if embedder.is_trainable: + params = params + list(embedder.parameters()) + opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config) + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, + } + ] + return [opt], scheduler + return opt + +# ----------------------------------------------------- +class VideoDiffusionEngineTV2V(VideoDiffusionEngine): + def __init__(self, *args, **kwargs): + # kwargs['wrapper_type'] = OPENAIUNETWRAPPERCONTROLLDM3DTV2V + kwargs["wrapper_type"] = kwargs.get( + "wrapper_type", "OPENAIUNETWRAPPERCONTROLLDM3DTV2V" + ) + super().__init__(*args, **kwargs) + + # freeze the controlnet (load pre-trained weights, no need to train) + self.model.diffusion_model.controlnet.eval() + for name, param in self.model.diffusion_model.controlnet.named_parameters(): + param.requires_grad = False + + if hasattr(self.model.diffusion_model, "controlnet_img"): + print('Setting controlnet_img to trainable ... ') + # open the controlnet_img + for ( + name, + param, + ) in self.model.diffusion_model.controlnet_img.named_parameters(): + param.requires_grad = True + + def init_from_ckpt( + self, + path: str, + ) -> None: + print(f"Loading checkpoint from {path} ... ") + if path.endswith("ckpt"): + if "deepspeed" in path: + sd = torch.load(path, map_location="cpu") + sd = {k.replace("_forward_module.", ""): v for k, v in sd.items()} + else: + sd = torch.load(path, map_location="cpu")["state_dict"] + elif path.endswith("safetensors"): + sd = load_safetensors(path) + else: + raise NotImplementedError + + missing, unexpected = self.load_state_dict(sd, strict=False) + + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + @torch.no_grad() + def log_images( + self, + batch: Dict, + N: int = 8, + sample: bool = True, + ucg_keys: List[str] = None, + **kwargs, + ) -> Dict: + conditioner_input_keys = [e.input_key for e in self.conditioner.embedders] + if ucg_keys: + assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), ( + "Each defined ucg key for sampling must be in the provided conditioner input keys," + f"but we have {ucg_keys} vs. {conditioner_input_keys}" + ) + else: + ucg_keys = conditioner_input_keys + log = dict() + + x = self.get_input(batch) + + negative_prompt = "ugly, low quality" + batch_uc = { + "txt": [negative_prompt for i in range(x.shape[0])], + "control_hint": batch[ + "control_hint" + ].clone(), # to use the pretrained weights, we must use the same control_hint in the batch_uc + } + if "cond_img" in batch.keys(): # for TVI2V; + # TODO: in fact, we can delete this, just use empty tensor as cond_img for batch_uc + batch_uc["cond_img"] = batch["cond_img"].clone() + # batch_uc['cond_img'] = torch.zeros_like(batch['cond_img']) + batch["txt"] = ["masterpiece, best quality, " + each for each in batch["txt"]] + c, uc = self.conditioner.get_unconditional_conditioning( + batch_c=batch, + batch_uc=batch_uc, + ) + + sampling_kwargs = { + key: batch[key] for key in self.loss_fn.batch2model_keys.intersection(batch) + } + + N = min(x.shape[0], N) + x = x.to(self.device)[:N] + log["inputs"] = x + log["inputs-video"] = x + if "cond_img" in batch.keys(): + log["cond_img"] = batch["cond_img"] + z = self.encode_first_stage(x) + # log["reconstructions"] = self.decode_first_stage(z) + # log["reconstructions-video"] = self.decode_first_stage(z) + log.update(self.log_conditionings(batch, N)) + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc)) + + if sample: + with self.ema_scope("Plotting"): + samples = self.sample( + c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs + ) + samples = self.decode_first_stage(samples) + log["samples"] = samples + log["samples-video"] = samples + + for embedder in self.conditioner.embedders: + if ( + isinstance(embedder, LineartEncoder) + or isinstance(embedder, DepthZoeEncoder) + or isinstance(embedder, DepthMidasEncoder) + or isinstance(embedder, SoftEdgeEncoder) + or isinstance(embedder, NormalBaeEncoder) + or isinstance(embedder, ScribbleHEDEncoder) + or isinstance(embedder, ScribblePidiNetEncoder) + or isinstance(embedder, OpenposeEncoder) + or isinstance(embedder, OutpaintingEncoder) + or isinstance(embedder, InpaintingEncoder) + ): + # log['control_hint'] = embedder.encode(batch['control_hint']) + # log['control_hint-video'] = embedder.encode(batch['control_hint']) + log["control_hint"] = -embedder.encode(batch["control_hint"]) + log["control_hint-video"] = -embedder.encode(batch["control_hint"]) + break + + # concat the inputs and outputs for visualization + log["inputs_samples_hint"] = torch.cat( + [log["inputs"], log["samples"], log["control_hint"]], dim=3 + ) + del log["inputs"] + del log["samples"] + del log["control_hint"] + + log["inputs_samples_hint-video"] = torch.cat( + [log["inputs-video"], log["samples-video"], log["control_hint-video"]], + dim=3, + ) + del log["inputs-video"] + del log["samples-video"] + del log["control_hint-video"] + return log + + def configure_optimizers(self): + lr = self.learning_rate + + if self.freeze_model == "none": + params = list(self.model.diffusion_model.parameters()) + for name, param in self.model.diffusion_model.named_parameters(): + print(f"Setting {name} to trainable") + param.requires_grad = True + elif self.freeze_model == "spatial": + params = [] + if hasattr(self.model.diffusion_model, "controlnet"): + params += list(self.model.diffusion_model.controlnet.parameters()) + if hasattr(self.model.diffusion_model, "controlnet_img"): + params += list(self.model.diffusion_model.controlnet_img.parameters()) + for name, param in self.model.diffusion_model.named_parameters(): + if "controlnet" not in name: + if "temporal" in name: + params.append(param) + else: + param.requires_grad = False + else: + raise NotImplementedError + + for embedder in self.conditioner.embedders: + if embedder.is_trainable: + params = params + list(embedder.parameters()) + opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config) + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, + } + ] + return [opt], scheduler + return opt + + +class VideoDiffusionEngineTV2VInterpolate(VideoDiffusionEngineTV2V): + def __init__(self, *args, **kwargs): + kwargs["wrapper_type"] = "OPENAIUNETWRAPPERCONTROLLDM3DTV2V_INTERPOLATE" + super().__init__(*args, **kwargs) + + @torch.no_grad() + def log_images( + self, + batch: Dict, + N: int = 8, + sample: bool = True, + ucg_keys: List[str] = None, + **kwargs, + ) -> Dict: + conditioner_input_keys = [e.input_key for e in self.conditioner.embedders] + if ucg_keys: + assert all(map(lambda x: x in conditioner_input_keys, ucg_keys)), ( + "Each defined ucg key for sampling must be in the provided conditioner input keys," + f"but we have {ucg_keys} vs. {conditioner_input_keys}" + ) + else: + ucg_keys = conditioner_input_keys + log = dict() + + x = self.get_input(batch) + + # negative_prompt = "ugly, low quality" + negative_prompt = '' + batch_uc = { + "txt": [negative_prompt for i in range(x.shape[0])], + "control_hint": batch["control_hint"].clone(), + "interpolate_first_last": batch["interpolate_first_last"].clone(), + } + # TODO: specify this in the config file + batch["txt"] = ['' for each in batch["txt"]] # disbale text prompt + # batch["txt"] = ["masterpiece, best quality, " + each for each in batch["txt"]] + # batch['txt'] = ['masterpiece, best quality' for each in batch['txt']] # disbale text prompt + + c, uc = self.conditioner.get_unconditional_conditioning( + batch_c=batch, + batch_uc=batch_uc, + ) + + sampling_kwargs = { + key: batch[key] for key in self.loss_fn.batch2model_keys.intersection(batch) + } + + N = min(x.shape[0], N) + x = x.to(self.device)[:N] + log["inputs"] = x + log["inputs-video"] = x + # log['interpolate_first_last'] = torch.cat([batch['interpolate_first'], batch['interpolate_last']], dim=2) + from sgm.modules.encoders.modules import CustomIdentityEncoder, CustomIdentityDownCondEncoder + + for embedder in self.conditioner.embedders: + if isinstance(embedder, CustomIdentityEncoder) or isinstance(embedder, CustomIdentityDownCondEncoder): + log["interpolate_first_last"] = embedder.encode(batch["interpolate_first_last"])[:,:3,:,...] # in case of more than 3 + break + z = self.encode_first_stage(x) + # log["reconstructions"] = self.decode_first_stage(z) + # log["reconstructions-video"] = self.decode_first_stage(z) + log.update(self.log_conditionings(batch, N)) + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k][:N].to(self.device), (c, uc)) + + if sample: + with self.ema_scope("Plotting"): + samples = self.sample( + c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs + ) + samples = self.decode_first_stage(samples) + log["samples"] = samples + log["samples-video"] = samples + + for embedder in self.conditioner.embedders: + if ( + isinstance(embedder, LineartEncoder) + or isinstance(embedder, DepthZoeEncoder) + or isinstance(embedder, DepthMidasEncoder) + or isinstance(embedder, SoftEdgeEncoder) + or isinstance(embedder, NormalBaeEncoder) + or isinstance(embedder, ScribbleHEDEncoder) + or isinstance(embedder, ScribblePidiNetEncoder) + or isinstance(embedder, OpenposeEncoder) + or isinstance(embedder, OutpaintingEncoder) + or isinstance(embedder, InpaintingEncoder) + ): + log["control_hint"] = -embedder.encode(batch["control_hint"]) + log["control_hint-video"] = -embedder.encode(batch["control_hint"]) + break + + # concat the inputs and outputs for visualization + log["inputs_samples_hint"] = torch.cat( + [log["inputs"], log["samples"], log["control_hint"]], dim=3 + ) + del log["inputs"] + del log["samples"] + del log["control_hint"] + + log["inputs_samples_hint-video"] = torch.cat( + [log["inputs-video"], log["samples-video"], log["control_hint-video"]], + dim=3, + ) + del log["inputs-video"] + del log["samples-video"] + del log["control_hint-video"] + return log + + +if __name__ == "__main__": + import logging + + import yaml + + open("output.log", "w").close() + + logging.basicConfig( + level=logging.DEBUG, + filename="output.log", + datefmt="%Y/%m/%d %H:%M:%S", + format="%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s", + ) + logger = logging.getLogger(__name__) + # logger.info('This is a log info') + # logger.debug('Debugging') + # logger.warning('Warning exists') + # logger.info('Finish') + + BS = 2 + frame_length = 17 + # size = [BS, frame_length, 3, 320, 320] + size = [BS, 3, 320, 320] + batch = { + "jpg": torch.randn(size).cuda(), + "txt": BS * ["text"], + "original_size_as_tuple": torch.tensor([320, 320]).repeat(BS, 1).cuda(), + "crop_coords_top_left": torch.tensor([0, 0]).repeat(BS, 1).cuda(), + "target_size_as_tuple": torch.tensor([320, 320]).repeat(BS, 1).cuda(), + } + + model_config = yaml.load( + open("configs/example_training/sd_xl_base-test.yaml"), Loader=yaml.Loader + )["model"] + + learning_rate = model_config.pop("base_learning_rate") + model = DiffusionEngine(**model_config["params"]).cuda() + model.learning_rate = learning_rate + logger.info(model) + + opt = model.configure_optimizers() + + while True: + # out = model.shared_step(batch) + loss = model.training_step(batch, 1) + print(f"loss: {loss}") + loss.backward() + opt[0][0].step() + opt[0][0].zero_grad() diff --git a/CCEdit-main/sgm/modules/__init__.py b/CCEdit-main/sgm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b327403430c2ec36e52fdce27a3d43220fb1629 --- /dev/null +++ b/CCEdit-main/sgm/modules/__init__.py @@ -0,0 +1,6 @@ +from .encoders.modules import GeneralConditioner + +UNCONDITIONAL_CONFIG = { + "target": "sgm.modules.GeneralConditioner", + "params": {"emb_models": []}, +} \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/modules/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ad2f3078bd88d043de11d9a7f7694b7a519fd47 Binary files /dev/null and b/CCEdit-main/sgm/modules/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/__pycache__/attention.cpython-39.pyc b/CCEdit-main/sgm/modules/__pycache__/attention.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..570a0e649da180d8cfa7cf0a059215f87dd9fda5 Binary files /dev/null and b/CCEdit-main/sgm/modules/__pycache__/attention.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/__pycache__/ema.cpython-39.pyc b/CCEdit-main/sgm/modules/__pycache__/ema.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af4f7a83de2e7050488d35949e56c7e69a79d709 Binary files /dev/null and b/CCEdit-main/sgm/modules/__pycache__/ema.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/attention.py b/CCEdit-main/sgm/modules/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..eb959e4b0827338a7f82e3fb26f13a090c185342 --- /dev/null +++ b/CCEdit-main/sgm/modules/attention.py @@ -0,0 +1,1663 @@ +import math +from inspect import isfunction +from typing import Any, Optional + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from packaging import version +from torch import nn + +import loralib as lora + +if version.parse(torch.__version__) >= version.parse("2.0.0"): + SDP_IS_AVAILABLE = True + from torch.backends.cuda import SDPBackend, sdp_kernel + + BACKEND_MAP = { + SDPBackend.MATH: { + "enable_math": True, + "enable_flash": False, + "enable_mem_efficient": False, + }, + SDPBackend.FLASH_ATTENTION: { + "enable_math": False, + "enable_flash": True, + "enable_mem_efficient": False, + }, + SDPBackend.EFFICIENT_ATTENTION: { + "enable_math": False, + "enable_flash": False, + "enable_mem_efficient": True, + }, + None: {"enable_math": True, "enable_flash": True, "enable_mem_efficient": True}, + } +else: + from contextlib import nullcontext + + SDP_IS_AVAILABLE = False + sdp_kernel = nullcontext + BACKEND_MAP = {} + print( + f"No SDP backend available, likely because you are running in pytorch versions < 2.0. In fact, " + f"you are using PyTorch {torch.__version__}. You might want to consider upgrading." + ) + +try: + import xformers + import xformers.ops + + XFORMERS_IS_AVAILABLE = True +except: + XFORMERS_IS_AVAILABLE = False + print("no module 'xformers'. Processing without...") + +from .diffusionmodules.util import checkpoint + +try: + from flash_attn.flash_attn_interface import flash_attn_unpadded_func + from flash_attn.bert_padding import unpad_input + + use_flash_attention = True +except ImportError: + try: + from flash_attn.flash_attn_interface import ( + flash_attn_varlen_func as flash_attn_unpadded_func, + ) + from flash_attn.bert_padding import unpad_input + + use_flash_attention = True + except ImportError: + flash_attn_unpadded_func = None + unpad_input = None + use_flash_attention = False + print("Not use flash Attention") + + +def exists(val): + return val is not None + + +def uniq(arr): + return {el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + +def get_lora_params(kwargs): + lora_names = ["q", "k", "v", "o"] + lora_params = dict() + for lora_name in lora_names: + lora_use = lora_name + "_use_lora" + lora_r = lora_name + "_lora_r" + lora_alpha = lora_name + "_lora_alpha" + lora_params[lora_use] = kwargs.get(lora_use, False) + lora_params[lora_r] = kwargs.get(lora_r, 4) + lora_params[lora_alpha] = kwargs.get(lora_alpha, 1) + return lora_params + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = ( + nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) + if not glu + else GEGLU(dim, inner_dim) + ) + + self.net = nn.Sequential( + project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm( + num_groups=32, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange( + qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3 + ) + k = k.softmax(dim=-1) + context = torch.einsum("bhdn,bhen->bhde", k, v) + out = torch.einsum("bhde,bhdn->bhen", context, q) + out = rearrange( + out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w + ) + return self.to_out(out) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = rearrange(q, "b c h w -> b (h w) c") + k = rearrange(k, "b c h w -> b c (h w)") + w_ = torch.einsum("bij,bjk->bik", q, k) + + w_ = w_ * (int(c) ** (-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, "b c h w -> b c (h w)") + w_ = rearrange(w_, "b i j -> b j i") + h_ = torch.einsum("bij,bjk->bik", v, w_) + h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) + h_ = self.proj_out(h_) + + return x + h_ + + +class FlashCrossAttention(nn.Module): + def __init__( + self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, **kwargs + ): + super().__init__() + print( + f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " + f"{heads} heads with a dimension of {dim_head}." + ) + self.dropout = dropout + self.query_dim = query_dim + self.context_dim = context_dim + self.heads = heads + self.dim_head = dim_head + + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head**-0.5 + + lora_params = get_lora_params(kwargs) + + if lora_params["q_use_lora"]: + self.to_q = lora.Linear( + query_dim, inner_dim, r=lora_params["q_lora_r"], bias=False + ) + else: + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + + if lora_params["k_use_lora"]: + self.to_k = lora.Linear( + context_dim, inner_dim, r=lora_params["k_lora_r"], bias=False + ) + else: + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + + if lora_params["v_use_lora"]: + self.to_v = lora.Linear( + context_dim, inner_dim, r=lora_params["v_lora_r"], bias=False + ) + else: + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + if lora_params["o_use_lora"]: + self.to_out = nn.Sequential( + lora.Linear(inner_dim, query_dim, r=lora_params["o_lora_r"]), + nn.Dropout(dropout), + ) + else: + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) + ) + + def get_input(self, x, seqlen, batch_size, nheads, mask=None): + assert mask is None, "not implemented for mask with flash attention" + lengths = torch.ones([batch_size, 1], dtype=torch.int, device="cuda") * seqlen + attention_mask_bool = ( + repeat(torch.arange(seqlen, device="cuda"), "s -> b s", b=batch_size) + < lengths + ) + attention_mask = torch.zeros( + batch_size, seqlen, device="cuda", dtype=torch.float16 + ) + attention_mask[~attention_mask_bool] = -10000.0 + attention_mask = rearrange(attention_mask, "b s -> b 1 1 s") + x_unpad, indices, cu_seqlens_x, max_seqlen_in_batch_x = unpad_input( + x, attention_mask_bool + ) + x_unpad = rearrange(x_unpad, "nnz (h d) -> nnz h d", h=nheads) + return x_unpad.to(torch.float16), cu_seqlens_x, max_seqlen_in_batch_x + + def forward( + self, + x, + context=None, + mask=None, + additional_tokens=None, + n_times_crossframe_attn_in_self=0, + ): + if additional_tokens is not None: + # get the number of masked tokens at the beginning of the output sequence + n_tokens_to_mask = additional_tokens.shape[1] + # add additional token + x = torch.cat([additional_tokens, x], dim=1) + + h = self.heads + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + if n_times_crossframe_attn_in_self: + # reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439 + assert x.shape[0] % n_times_crossframe_attn_in_self == 0 + # n_cp = x.shape[0]//n_times_crossframe_attn_in_self + k = repeat( + k[::n_times_crossframe_attn_in_self], + "b ... -> (b n) ...", + n=n_times_crossframe_attn_in_self, + ) + v = repeat( + v[::n_times_crossframe_attn_in_self], + "b ... -> (b n) ...", + n=n_times_crossframe_attn_in_self, + ) + + b, seqlen_q = q.shape[0], q.shape[1] + seqlen_k = k.shape[1] + seqlen_v = v.shape[1] + q, cu_seqlens_q, max_seqlen_in_batch_q = self.get_input(q, seqlen_q, b, h) + k, cu_seqlens_k, max_seqlen_in_batch_k = self.get_input(k, seqlen_k, b, h) + v, cu_seqlens_v, max_seqlen_in_batch_v = self.get_input(v, seqlen_v, b, h) + + if self.training: + dropout_p = self.dropout + else: + dropout_p = 0 + + out = flash_attn_unpadded_func( + q, + k, + v, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_in_batch_q, + max_seqlen_in_batch_k, + dropout_p, + ) + + out = rearrange(out, "(b n) h d -> b n (h d)", b=b, h=h) + out = out.to(context.dtype) + if additional_tokens is not None: + # remove additional token + out = out[:, n_tokens_to_mask:] + out = self.to_out(out) + return out + + +class CrossAttention(nn.Module): + def __init__( + self, + query_dim, + context_dim=None, + heads=8, + dim_head=64, + dropout=0.0, + backend=None, + **kwargs, + ): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head**-0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) + ) + + self.backend = backend + + def forward( + self, + x, + context=None, + mask=None, + additional_tokens=None, + n_times_crossframe_attn_in_self=0, + ): + h = self.heads + + if additional_tokens is not None: + # get the number of masked tokens at the beginning of the output sequence + n_tokens_to_mask = additional_tokens.shape[1] + # add additional token + x = torch.cat([additional_tokens, x], dim=1) + + q = self.to_q(x) + context = default(context, x) + context = context.to(self.to_k.weight.dtype) + k = self.to_k(context) + v = self.to_v(context) + + if n_times_crossframe_attn_in_self: + # reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439 + assert x.shape[0] % n_times_crossframe_attn_in_self == 0 + n_cp = x.shape[0] // n_times_crossframe_attn_in_self + k = repeat( + k[::n_times_crossframe_attn_in_self], "b ... -> (b n) ...", n=n_cp + ) + v = repeat( + v[::n_times_crossframe_attn_in_self], "b ... -> (b n) ...", n=n_cp + ) + + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v)) + + # old + """ + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + del q, k + + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + sim = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', sim, v) + """ + # new + with sdp_kernel(**BACKEND_MAP[self.backend]): + # print("dispatching into backend", self.backend, "q/k/v shape: ", q.shape, k.shape, v.shape) + out = F.scaled_dot_product_attention( + q, k, v, attn_mask=mask + ) # scale is dim_head ** -0.5 per default + # if self.to_q.weight.dtype == torch.float16: + # q, k, v = q.to(torch.float32), k.to(torch.float32), v.to(torch.float32) + # elif self.to_q.weight.dtype == torch.bfloat16: + # q, k, v = ( + # q.to(torch.bfloat16), + # k.to(torch.bfloat16), + # v.to(torch.bfloat16), + # ) + # out = F.scaled_dot_product_attention(q, k, v, attn_mask=mask).to( + # self.to_q.weight.dtype + # ) + + del q, k, v + out = rearrange(out, "b h n d -> b n (h d)", h=h) + + if additional_tokens is not None: + # remove additional token + out = out[:, n_tokens_to_mask:] + return self.to_out(out) + + +class MemoryEfficientCrossAttention(nn.Module): + # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 + def __init__( + self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, **kwargs + ): + super().__init__() + print( + f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " + f"{heads} heads with a dimension of {dim_head}." + ) + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.heads = heads + self.dim_head = dim_head + + lora_params = get_lora_params(kwargs) + + if lora_params["q_use_lora"]: + self.to_q = lora.Linear( + query_dim, inner_dim, r=lora_params["q_lora_r"], bias=False + ) + else: + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + + if lora_params["k_use_lora"]: + self.to_k = lora.Linear( + context_dim, inner_dim, r=lora_params["k_lora_r"], bias=False + ) + else: + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + + if lora_params["v_use_lora"]: + self.to_v = lora.Linear( + context_dim, inner_dim, r=lora_params["v_lora_r"], bias=False + ) + else: + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + if lora_params["o_use_lora"]: + self.to_out = nn.Sequential( + lora.Linear(inner_dim, query_dim, r=lora_params["o_lora_r"]), + nn.Dropout(dropout), + ) + else: + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) + ) + + self.attention_op: Optional[Any] = None + + def forward( + self, + x, + context=None, + mask=None, + additional_tokens=None, + n_times_crossframe_attn_in_self=0, + ): + if additional_tokens is not None: + # get the number of masked tokens at the beginning of the output sequence + n_tokens_to_mask = additional_tokens.shape[1] + # add additional token + x = torch.cat([additional_tokens, x], dim=1) + q = self.to_q(x) + context = default(context, x) + context = context.to(self.to_k.weight.dtype) + k = self.to_k(context) + v = self.to_v(context) + + if n_times_crossframe_attn_in_self: + # reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439 + assert x.shape[0] % n_times_crossframe_attn_in_self == 0 + # n_cp = x.shape[0]//n_times_crossframe_attn_in_self + k = repeat( + k[::n_times_crossframe_attn_in_self], + "b ... -> (b n) ...", + n=n_times_crossframe_attn_in_self, + ) + v = repeat( + v[::n_times_crossframe_attn_in_self], + "b ... -> (b n) ...", + n=n_times_crossframe_attn_in_self, + ) + + b, _, _ = q.shape + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, t.shape[1], self.heads, self.dim_head) + .permute(0, 2, 1, 3) + .reshape(b * self.heads, t.shape[1], self.dim_head) + .contiguous(), + (q, k, v), + ) + + # actually compute the attention, what we cannot get enough of + # out = xformers.ops.memory_efficient_attention( + # q, k, v, attn_bias=None, op=self.attention_op + # ) + + with torch.autocast(enabled=False, device_type="cuda"): + if self.to_q.weight.dtype == torch.float16: + q, k, v = q.to(torch.float32), k.to(torch.float32), v.to(torch.float32) + elif self.to_q.weight.dtype == torch.bfloat16: + q, k, v = ( + q.to(torch.bfloat16), + k.to(torch.bfloat16), + v.to(torch.bfloat16), + ) + out = F.scaled_dot_product_attention(q, k, v, is_causal=False).to( + self.to_q.weight.dtype + ) + + # TODO: Use this directly in the attention operation, as a bias + if exists(mask): + raise NotImplementedError + out = ( + out.unsqueeze(0) + .reshape(b, self.heads, out.shape[1], self.dim_head) + .permute(0, 2, 1, 3) + .reshape(b, out.shape[1], self.heads * self.dim_head) + ) + if additional_tokens is not None: + # remove additional token + out = out[:, n_tokens_to_mask:] + return self.to_out(out) + + +class BasicTransformerBlock(nn.Module): + ATTENTION_MODES = { + "softmax": CrossAttention, # vanilla attention + "flash": FlashCrossAttention, # flash attention + "softmax-xformers": MemoryEfficientCrossAttention, # ampere + } + + def __init__( + self, + dim, + n_heads, + d_head, + dropout=0.0, + context_dim=None, + gated_ff=True, + checkpoint=True, + disable_self_attn=False, + flash_attention=False, + attn_mode="softmax", + sdp_backend=None, + **kwargs, + ): + super().__init__() + assert attn_mode in self.ATTENTION_MODES + if use_flash_attention and flash_attention: + attn_mode = "flash" + else: + if attn_mode != "softmax" and not XFORMERS_IS_AVAILABLE: + print( + f"Attention mode '{attn_mode}' is not available. Falling back to native attention. " + f"This is not a problem in Pytorch >= 2.0. FYI, you are running with PyTorch version {torch.__version__}" + ) + attn_mode = "softmax" + elif attn_mode == "softmax" and not SDP_IS_AVAILABLE: + print( + "We do not support vanilla attention anymore, as it is too expensive. Sorry." + ) + if not XFORMERS_IS_AVAILABLE: + assert ( + False + ), "Please install xformers via e.g. 'pip install xformers==0.0.16'" + else: + print("Falling back to xformers efficient attention.") + attn_mode = "softmax-xformers" + attn_cls = self.ATTENTION_MODES[attn_mode] + if version.parse(torch.__version__) >= version.parse("2.0.0"): + assert sdp_backend is None or isinstance(sdp_backend, SDPBackend) + else: + assert sdp_backend is None + self.disable_self_attn = disable_self_attn + self.attn1 = attn_cls( + query_dim=dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + context_dim=context_dim if self.disable_self_attn else None, + backend=sdp_backend, + **kwargs, + ) # is a self-attention if not self.disable_self_attn + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = attn_cls( + query_dim=dim, + context_dim=context_dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + backend=sdp_backend, + **kwargs, + ) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + if self.checkpoint: + print(f"{self.__class__.__name__} is using checkpointing") + + def forward( + self, x, context=None, additional_tokens=None, n_times_crossframe_attn_in_self=0 + ): + kwargs = {"x": x} + + if context is not None: + kwargs.update({"context": context}) + + if additional_tokens is not None: + kwargs.update({"additional_tokens": additional_tokens}) + + if n_times_crossframe_attn_in_self: + kwargs.update( + {"n_times_crossframe_attn_in_self": n_times_crossframe_attn_in_self} + ) + + # return mixed_checkpoint(self._forward, kwargs, self.parameters(), self.checkpoint) + return checkpoint( + self._forward, (x, context), self.parameters(), self.checkpoint + ) + + def _forward( + self, x, context=None, additional_tokens=None, n_times_crossframe_attn_in_self=0 + ): + x = ( + self.attn1( + self.norm1(x), + context=context if self.disable_self_attn else None, + additional_tokens=additional_tokens, + n_times_crossframe_attn_in_self=n_times_crossframe_attn_in_self + if not self.disable_self_attn + else 0, + ) + + x + ) + x = ( + self.attn2( + self.norm2(x), context=context, additional_tokens=additional_tokens + ) + + x + ) + x = self.ff(self.norm3(x)) + x + return x + + +class BasicTransformerSingleLayerBlock(nn.Module): + ATTENTION_MODES = { + "softmax": CrossAttention, # vanilla attention + # on the A100s not quite as fast as the above version + "softmax-xformers": MemoryEfficientCrossAttention + # (todo might depend on head_dim, check, falls back to semi-optimized kernels for dim!=[16,32,64,128]) + } + + def __init__( + self, + dim, + n_heads, + d_head, + dropout=0.0, + context_dim=None, + gated_ff=True, + checkpoint=True, + attn_mode="softmax-xformers", + ): + super().__init__() + assert attn_mode in self.ATTENTION_MODES + attn_cls = self.ATTENTION_MODES[attn_mode] + self.attn1 = attn_cls( + query_dim=dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + context_dim=context_dim, + ) + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint( + self._forward, (x, context), self.parameters(), self.checkpoint + ) + + def _forward(self, x, context=None): + x = self.attn1(self.norm1(x), context=context) + x + x = self.ff(self.norm2(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + NEW: use_linear for more efficiency instead of the 1x1 convs + """ + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + context_dim=None, + disable_self_attn=False, + use_linear=False, + attn_type="softmax-xformers", + use_checkpoint=True, + sdp_backend=None, + **kwargs, + ): + super().__init__() + print( + f"constructing {self.__class__.__name__} of depth {depth} w/ {in_channels} channels and {n_heads} heads" + ) + from omegaconf import ListConfig + + if exists(context_dim) and not isinstance(context_dim, (list, ListConfig)): + context_dim = [context_dim] + if exists(context_dim) and isinstance(context_dim, list): + if depth != len(context_dim): + print( + f"WARNING: {self.__class__.__name__}: Found context dims {context_dim} of depth {len(context_dim)}, " + f"which does not match the specified 'depth' of {depth}. Setting context_dim to {depth * [context_dim[0]]} now." + ) + # depth does not match context dims. + assert all( + map(lambda x: x == context_dim[0], context_dim) + ), "need homogenous context_dim to match depth automatically" + context_dim = depth * [context_dim[0]] + elif context_dim is None: + context_dim = [None] * depth + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + if not use_linear: + self.proj_in = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 + ) + else: + self.proj_in = nn.Linear(in_channels, inner_dim) + + disable_text_ca = kwargs.get("disable_text_ca", False) + self.disable_text_ca = disable_text_ca + if disable_text_ca: + self.transformer_blocks = nn.ModuleList( + [ + # BasicTransformerBlock( # temporal transformer does not use flash attention + BasicTransformerSingleLayerBlock( # temporal transformer does not use flash attention + inner_dim, + n_heads, + d_head, + dropout=dropout, + # context_dim=context_dim[d], + context_dim=None, + attn_mode="softmax", + checkpoint=use_checkpoint, + ) + for d in range(depth) + ] + ) + else: + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=context_dim[d], + disable_self_attn=disable_self_attn, + attn_mode=attn_type, + checkpoint=use_checkpoint, + sdp_backend=sdp_backend, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + # self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) + self.proj_out = zero_module(nn.Linear(inner_dim, in_channels)) + self.use_linear = use_linear + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "b c h w -> b (h w) c").contiguous() + if self.use_linear: + x = self.proj_in(x) + for i, block in enumerate(self.transformer_blocks): + if i > 0 and len(context) == 1: + i = 0 # use same context for each block + if self.disable_text_ca: + x = block(x, context=x) + else: + x = block(x, context=context[i]) + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out(x) + return x + x_in + + +class SpatialTransformerCA(SpatialTransformer): + """ + This is hacked from SpatialTransformer. + Conduct additional cross-attention with k,v from the reference feature. + Thus, the attention order is text cross-attention -> sptial self-attention -> reference cross-attention. + Note that if the reference feature is not given, this module is equivalent to SpatialTransformer. + + + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + NEW: use_linear for more efficiency instead of the 1x1 convs + """ + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + context_dim=None, + disable_self_attn=False, + use_linear=False, + attn_type="softmax-xformers", + use_checkpoint=True, + sdp_backend=None, + **kwargs, + ): + super().__init__( + in_channels, + n_heads, + d_head, + depth, + dropout, + context_dim, + disable_self_attn, + use_linear, + attn_type, + use_checkpoint, + sdp_backend, + **kwargs, + ) + inner_dim = n_heads * d_head + + # temporal crossattention part + self.norm_ca = Normalize(in_channels) + if not use_linear: + self.proj_in_ca = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 + ) + else: + self.proj_in_ca = nn.Linear(in_channels, inner_dim) + self.transformer_blocks_ca = nn.ModuleList( + [ + BasicTransformerSingleLayerBlock( + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=None, + attn_mode=attn_type, + checkpoint=use_checkpoint, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out_ca = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_out_ca = zero_module(nn.Linear(inner_dim, in_channels)) + + def forward(self, x, context=None): + x = super().forward(x, context) + + assert hasattr(self, 'ref_control'), "must have ref_control" + ref_control = self.ref_control + + b, c, h, w = x.shape + # cross-frame attention + # x = rearrange(x, "b c t h w -> (b t) c h w").contiguous() + x_in = x + x = self.norm_ca(x) + if not self.use_linear: + x = self.proj_in_ca(x) + x = rearrange(x, "b c h w -> b (h w) c").contiguous() + if self.use_linear: + x = self.proj_in_ca(x) + + for i, block in enumerate(self.transformer_blocks_ca): + ref_control = rearrange(ref_control, "b c h w -> b (h w) c").contiguous() + context_texture = ref_control + x = block(x, context_texture) + + if self.use_linear: + x = self.proj_out_ca(x) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out_ca(x) + x = x + x_in + + return x + + +class SpatialTransformer3D(nn.Module): + """ + This is hacked from the 2D version above. + + Transformer block for video-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + NEW: use_linear for more efficiency instead of the 1x1 convs + """ + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + context_dim=None, + disable_self_attn=False, + use_linear=False, + attn_type="softmax-xformers", + use_checkpoint=True, + sdp_backend=None, + **kwargs, + ): + super().__init__() + print( + f"constructing {self.__class__.__name__} of depth {depth} w/ {in_channels} channels and {n_heads} heads" + ) + from omegaconf import ListConfig + + if exists(context_dim) and not isinstance(context_dim, (list, ListConfig)): + context_dim = [context_dim] + if exists(context_dim) and isinstance(context_dim, list): + if depth != len(context_dim): + print( + f"WARNING: {self.__class__.__name__}: Found context dims {context_dim} of depth {len(context_dim)}, " + f"which does not match the specified 'depth' of {depth}. Setting context_dim to {depth * [context_dim[0]]} now." + ) + # depth does not match context dims. + assert all( + map(lambda x: x == context_dim[0], context_dim) + ), "need homogenous context_dim to match depth automatically" + context_dim = depth * [context_dim[0]] + elif context_dim is None: + context_dim = [None] * depth + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + if not use_linear: + self.proj_in = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 + ) + else: + self.proj_in = nn.Linear(in_channels, inner_dim) + + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=context_dim[d], + disable_self_attn=disable_self_attn, + attn_mode=attn_type, + checkpoint=use_checkpoint, + sdp_backend=sdp_backend, + flash_attention=True, + **kwargs, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_out = zero_module(nn.Linear(inner_dim, in_channels)) + self.use_linear = use_linear + + # temporal part + self.norm_temporal = Normalize(in_channels) + if not use_linear: + self.proj_in_temporal = zero_module( + nn.Conv1d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_in_temporal = zero_module(nn.Linear(in_channels, inner_dim)) + disable_temporal_text_ca = kwargs.get("disable_temporal_text_ca", False) + self.disable_temporal_text_ca = disable_temporal_text_ca + if disable_temporal_text_ca: + self.transformer_blocks_temporal = nn.ModuleList( + [ + # BasicTransformerBlock( # temporal transformer does not use flash attention + BasicTransformerSingleLayerBlock( # temporal transformer does not use flash attention + inner_dim, + n_heads, + d_head, + dropout=dropout, + # context_dim=context_dim[d], + context_dim=None, + attn_mode="softmax", + checkpoint=use_checkpoint, + ) + for d in range(depth) + ] + ) + else: + self.transformer_blocks_temporal = nn.ModuleList( + [ + BasicTransformerBlock( # temporal transformer does not use flash attention + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=context_dim[d], + disable_self_attn=disable_self_attn, + attn_mode="softmax", + checkpoint=use_checkpoint, + sdp_backend=sdp_backend, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out_temporal = zero_module( + nn.Conv1d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_out_temporal = zero_module(nn.Linear(inner_dim, in_channels)) + + use_learnable_alpha = kwargs.get("use_learnable_alpha", False) + if use_learnable_alpha: + self.alpha_temporal = nn.Parameter( + torch.ones(1) + ) # x = alpha * spatial + (1-alpha) * temporal + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, t, h, w = x.shape + # spatial attention + x = rearrange(x, "b c t h w -> (b t) c h w").contiguous() + x_in = x + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "bt c h w -> bt (h w) c").contiguous() + if self.use_linear: + x = self.proj_in(x) + + for i, block in enumerate(self.transformer_blocks): + if i > 0 and len(context) == 1: + i = 0 + context_i = ( + repeat(context[i], "b l c -> (b t) l c", t=t).contiguous() + if context[i] is not None + else None + ) + x = block(x, context=context_i) + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "bt (h w) c -> bt c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out(x) + x = x + x_in + + x = rearrange(x, "(b t) c h w -> (b h w) c t", t=t).contiguous() + # temporal attention + if hasattr(self, "norm_temporal"): # temporal operation exist + x_in = x + x = self.norm_temporal(x) + if not self.use_linear: + x = self.proj_in_temporal(x) + x = rearrange(x, "bhw c t->bhw t c").contiguous() + if self.use_linear: + x = self.proj_in_temporal(x) + for i, block in enumerate(self.transformer_blocks_temporal): + if i > 0 and len(context) == 1: + i = 0 # use same context for each block + # if context[i] != None: + context_i = ( + repeat(context[i], "b l c -> (b h w) l c", h=h, w=w).contiguous() + if context[i] is not None + else None + ) + if self.disable_temporal_text_ca: + x = block(x, context=x) + else: + x = block(x, context=context_i) + + if self.use_linear: + x = self.proj_out_temporal(x) + x = rearrange(x, "bhw t c -> bhw c t").contiguous() + if not self.use_linear: + x = self.proj_out_temporal(x) + if hasattr(self, "alpha_temporal"): + x = self.alpha_temporal * x_in + (1 - self.alpha_temporal) * x + else: + x = x_in + x + # x = x_in # ! DEBUG ONLY + + x = rearrange(x, "(b h w) c t -> b c t h w", h=h, w=w).contiguous() + return x + + +class SpatialTransformer3DCA(SpatialTransformer3D): + """ + # -> SpatialTransformer3DCrossAttention + # Replace the second temporal attention in SpatialTransformer3D with cross-attention + # Original attention order: + # 1. spatial self-attention + # 2. cross-attention with text condition + # 3. temporal self-attention (1d) + # 4. cross-attention with text condition + # Attention order: + # 1. spatial self-attention + # 2. cross-attention with text condition + # 3. temporal self-attention (1d) + # 4. cross-attention with text condition (maybe not necessary, but... nevermind) + # 5. cross-attention with anchor frame (usually center frame, or reference image from outside) + + This is hacked from the 2D version above. + + Transformer block for video-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + NEW: use_linear for more efficiency instead of the 1x1 convs + """ + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + context_dim=None, + disable_self_attn=False, + use_linear=False, + attn_type="softmax-xformers", + use_checkpoint=True, + sdp_backend=None, + **kwargs, + ): + # super().__init__(**kwargs) + super().__init__( + in_channels, + n_heads, + d_head, + depth=depth, + dropout=dropout, + context_dim=context_dim, + disable_self_attn=disable_self_attn, + use_linear=use_linear, + attn_type=attn_type, + use_checkpoint=use_checkpoint, + sdp_backend=sdp_backend, + **kwargs, + ) + + inner_dim = n_heads * d_head + + # temporal crossattention part + self.norm_temporal_ca = Normalize(in_channels) + if not use_linear: + self.proj_in_temporal_ca = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 + ) + else: + self.proj_in_temporal_ca = nn.Linear(in_channels, inner_dim) + self.transformer_blocks_temporal_ca = nn.ModuleList( + [ + BasicTransformerSingleLayerBlock( + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=None, + attn_mode=attn_type, + checkpoint=use_checkpoint, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out_temporal_ca = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_out_temporal_ca = zero_module(nn.Linear(inner_dim, in_channels)) + + self.ST3DCA_ca_type = kwargs.get("ST3DCA_ca_type", "center") + assert self.ST3DCA_ca_type in ["center", 'self', 'center_self'] + + def forward(self, x, context=None): + x = super().forward(x, context) + + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, t, h, w = x.shape + # cross-frame attention + x = rearrange(x, "b c t h w -> (b t) c h w").contiguous() + x_in = x + x = self.norm_temporal_ca(x) + if not self.use_linear: + x = self.proj_in_temporal_ca(x) + x = rearrange(x, "bt c h w -> bt (h w) c").contiguous() + if self.use_linear: + x = self.proj_in_temporal_ca(x) + + for i, block in enumerate(self.transformer_blocks_temporal_ca): + if i > 0 and len(context) == 1: + i = 0 + # # center frame as anchor + x = rearrange(x, "(b t) hw c -> b t hw c", b=b).contiguous() + attn_anchor_frame_idx = t // 2 # center frame + anchor_frame = x[:, attn_anchor_frame_idx, :, :].contiguous() + anchor_frame = repeat(anchor_frame, "b hw c -> b t hw c", t=t).contiguous() + anchor_frame = rearrange(anchor_frame, "b t hw c -> (b t) hw c").contiguous() + context_texture = anchor_frame + x = rearrange(x, "b t hw c -> (b t) hw c", b=b).contiguous() + if self.ST3DCA_ca_type == 'center': + x = block(x, context=context_texture) + elif self.ST3DCA_ca_type == 'self': + x = block(x, context=x) + elif self.ST3DCA_ca_type == 'center_self': + context_texture = torch.cat([context_texture, x], dim=1) + x = block(x, context=context_texture) + else: + raise NotImplementedError + # x = block(x, context_texture) + + if self.use_linear: + x = self.proj_out_temporal_ca(x) + x = rearrange(x, "bt (h w) c -> bt c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out_temporal_ca(x) + x = x + x_in + + x = rearrange(x, "(b t) c h w -> b c t h w", b=b, t=t).contiguous() + + return x + + +def benchmark_attn(): + # Lets define a helpful benchmarking function: + # https://pytorch.org/tutorials/intermediate/scaled_dot_product_attention_tutorial.html + device = "cuda" if torch.cuda.is_available() else "cpu" + import torch.nn.functional as F + import torch.utils.benchmark as benchmark + + def benchmark_torch_function_in_microseconds(f, *args, **kwargs): + t0 = benchmark.Timer( + stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f} + ) + return t0.blocked_autorange().mean * 1e6 + + # Lets define the hyper-parameters of our input + batch_size = 32 + max_sequence_len = 1024 + num_heads = 32 + embed_dimension = 32 + + dtype = torch.float16 + + query = torch.rand( + batch_size, + num_heads, + max_sequence_len, + embed_dimension, + device=device, + dtype=dtype, + ) + key = torch.rand( + batch_size, + num_heads, + max_sequence_len, + embed_dimension, + device=device, + dtype=dtype, + ) + value = torch.rand( + batch_size, + num_heads, + max_sequence_len, + embed_dimension, + device=device, + dtype=dtype, + ) + + print(f"q/k/v shape:", query.shape, key.shape, value.shape) + + # Lets explore the speed of each of the 3 implementations + from torch.backends.cuda import SDPBackend, sdp_kernel + + # Helpful arguments mapper + backend_map = { + SDPBackend.MATH: { + "enable_math": True, + "enable_flash": False, + "enable_mem_efficient": False, + }, + SDPBackend.FLASH_ATTENTION: { + "enable_math": False, + "enable_flash": True, + "enable_mem_efficient": False, + }, + SDPBackend.EFFICIENT_ATTENTION: { + "enable_math": False, + "enable_flash": False, + "enable_mem_efficient": True, + }, + } + + from torch.profiler import ProfilerActivity, profile, record_function + + activities = [ProfilerActivity.CPU, ProfilerActivity.CUDA] + + print( + f"The default implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds" + ) + with profile( + activities=activities, record_shapes=False, profile_memory=True + ) as prof: + with record_function("Default detailed stats"): + for _ in range(25): + o = F.scaled_dot_product_attention(query, key, value) + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10)) + + print( + f"The math implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds" + ) + with sdp_kernel(**backend_map[SDPBackend.MATH]): + with profile( + activities=activities, record_shapes=False, profile_memory=True + ) as prof: + with record_function("Math implmentation stats"): + for _ in range(25): + o = F.scaled_dot_product_attention(query, key, value) + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10)) + + with sdp_kernel(**backend_map[SDPBackend.FLASH_ATTENTION]): + try: + print( + f"The flash attention implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds" + ) + except RuntimeError: + print("FlashAttention is not supported. See warnings for reasons.") + with profile( + activities=activities, record_shapes=False, profile_memory=True + ) as prof: + with record_function("FlashAttention stats"): + for _ in range(25): + o = F.scaled_dot_product_attention(query, key, value) + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10)) + + with sdp_kernel(**backend_map[SDPBackend.EFFICIENT_ATTENTION]): + try: + print( + f"The memory efficient implementation runs in {benchmark_torch_function_in_microseconds(F.scaled_dot_product_attention, query, key, value):.3f} microseconds" + ) + except RuntimeError: + print("EfficientAttention is not supported. See warnings for reasons.") + with profile( + activities=activities, record_shapes=False, profile_memory=True + ) as prof: + with record_function("EfficientAttention stats"): + for _ in range(25): + o = F.scaled_dot_product_attention(query, key, value) + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10)) + + +def run_model(model, x, context): + return model(x, context) + + +def benchmark_transformer_blocks(): + device = "cuda" if torch.cuda.is_available() else "cpu" + import torch.utils.benchmark as benchmark + + def benchmark_torch_function_in_microseconds(f, *args, **kwargs): + t0 = benchmark.Timer( + stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f} + ) + return t0.blocked_autorange().mean * 1e6 + + checkpoint = True + compile = False + + batch_size = 32 + h, w = 64, 64 + context_len = 1024 + embed_dimension = 1024 + context_dim = 1024 + d_head = 64 + + transformer_depth = 4 + + n_heads = embed_dimension // d_head + + dtype = torch.float16 + + model_native = SpatialTransformer( + embed_dimension, + n_heads, + d_head, + context_dim=context_dim, + use_linear=True, + use_checkpoint=checkpoint, + attn_type="softmax", + depth=transformer_depth, + sdp_backend=SDPBackend.FLASH_ATTENTION, + ).to(device) + model_efficient_attn = SpatialTransformer( + embed_dimension, + n_heads, + d_head, + context_dim=context_dim, + use_linear=True, + depth=transformer_depth, + use_checkpoint=checkpoint, + attn_type="softmax-xformers", + ).to(device) + if not checkpoint and compile: + print("compiling models") + model_native = torch.compile(model_native) + model_efficient_attn = torch.compile(model_efficient_attn) + + x = torch.rand(batch_size, embed_dimension, h, w, device=device, dtype=dtype) + c = torch.rand(batch_size, context_len, context_dim, device=device, dtype=dtype) + + from torch.profiler import ProfilerActivity, profile, record_function + + activities = [ProfilerActivity.CPU, ProfilerActivity.CUDA] + + with torch.autocast("cuda"): + print( + f"The native model runs in {benchmark_torch_function_in_microseconds(model_native.forward, x, c):.3f} microseconds" + ) + print( + f"The efficientattn model runs in {benchmark_torch_function_in_microseconds(model_efficient_attn.forward, x, c):.3f} microseconds" + ) + + print(75 * "+") + print("NATIVE") + print(75 * "+") + torch.cuda.reset_peak_memory_stats() + with profile( + activities=activities, record_shapes=False, profile_memory=True + ) as prof: + with record_function("NativeAttention stats"): + for _ in range(25): + model_native(x, c) + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10)) + print(torch.cuda.max_memory_allocated() * 1e-9, "GB used by native block") + + print(75 * "+") + print("Xformers") + print(75 * "+") + torch.cuda.reset_peak_memory_stats() + with profile( + activities=activities, record_shapes=False, profile_memory=True + ) as prof: + with record_function("xformers stats"): + for _ in range(25): + model_efficient_attn(x, c) + print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10)) + print(torch.cuda.max_memory_allocated() * 1e-9, "GB used by xformers block") + + +def test01(): + # conv1x1 vs linear + from ..util import count_params + + conv = nn.Conv2d(3, 32, kernel_size=1).cuda() + print(count_params(conv)) + linear = torch.nn.Linear(3, 32).cuda() + print(count_params(linear)) + + print(conv.weight.shape) + + # use same initialization + linear.weight = torch.nn.Parameter(conv.weight.squeeze(-1).squeeze(-1)) + linear.bias = torch.nn.Parameter(conv.bias) + + print(linear.weight.shape) + + x = torch.randn(11, 3, 64, 64).cuda() + + xr = rearrange(x, "b c h w -> b (h w) c").contiguous() + print(xr.shape) + out_linear = linear(xr) + print(out_linear.mean(), out_linear.shape) + + out_conv = conv(x) + print(out_conv.mean(), out_conv.shape) + print("done with test01.\n") + + +def test02(): + # try cosine flash attention + import time + + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + torch.backends.cudnn.benchmark = True + print("testing cosine flash attention...") + DIM = 1024 + SEQLEN = 4096 + BS = 16 + + print(" softmax (vanilla) first...") + model = BasicTransformerBlock( + dim=DIM, + n_heads=16, + d_head=64, + dropout=0.0, + context_dim=None, + attn_mode="softmax", + ).cuda() + try: + x = torch.randn(BS, SEQLEN, DIM).cuda() + tic = time.time() + y = model(x) + toc = time.time() + print(y.shape, toc - tic) + except RuntimeError as e: + # likely oom + print(str(e)) + + print("\n now softmax-xformer ...") + model = BasicTransformerBlock( + dim=DIM, + n_heads=16, + d_head=64, + dropout=0.0, + context_dim=None, + attn_mode="softmax-xformers", + ).cuda() + x = torch.randn(BS, SEQLEN, DIM).cuda() + tic = time.time() + y = model(x) + toc = time.time() + print(y.shape, toc - tic) + print("done with test02.\n") + + +if __name__ == "__main__": + test01() + test02() + + benchmark_attn() + # benchmark_transformer_blocks() + + print("done.") diff --git a/CCEdit-main/sgm/modules/autoencoding/__init__.py b/CCEdit-main/sgm/modules/autoencoding/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/sgm/modules/autoencoding/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/modules/autoencoding/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1d72b14109925aa84ee60cbba9a31b60fe0295c Binary files /dev/null and b/CCEdit-main/sgm/modules/autoencoding/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/autoencoding/losses/__init__.py b/CCEdit-main/sgm/modules/autoencoding/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a3b54f7284ae1be6a23b425f6c296efc1881a5c --- /dev/null +++ b/CCEdit-main/sgm/modules/autoencoding/losses/__init__.py @@ -0,0 +1,246 @@ +from typing import Any, Union + +import torch +import torch.nn as nn +from einops import rearrange +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init +from taming.modules.losses.lpips import LPIPS +from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss + +from ....util import default, instantiate_from_config + + +def adopt_weight(weight, global_step, threshold=0, value=0.0): + if global_step < threshold: + weight = value + return weight + + +class LatentLPIPS(nn.Module): + def __init__( + self, + decoder_config, + perceptual_weight=1.0, + latent_weight=1.0, + scale_input_to_tgt_size=False, + scale_tgt_to_input_size=False, + perceptual_weight_on_inputs=0.0, + ): + super().__init__() + self.scale_input_to_tgt_size = scale_input_to_tgt_size + self.scale_tgt_to_input_size = scale_tgt_to_input_size + self.init_decoder(decoder_config) + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + self.latent_weight = latent_weight + self.perceptual_weight_on_inputs = perceptual_weight_on_inputs + + def init_decoder(self, config): + self.decoder = instantiate_from_config(config) + if hasattr(self.decoder, "encoder"): + del self.decoder.encoder + + def forward(self, latent_inputs, latent_predictions, image_inputs, split="train"): + log = dict() + loss = (latent_inputs - latent_predictions) ** 2 + log[f"{split}/latent_l2_loss"] = loss.mean().detach() + image_reconstructions = None + if self.perceptual_weight > 0.0: + image_reconstructions = self.decoder.decode(latent_predictions) + image_targets = self.decoder.decode(latent_inputs) + perceptual_loss = self.perceptual_loss( + image_targets.contiguous(), image_reconstructions.contiguous() + ) + loss = ( + self.latent_weight * loss.mean() + + self.perceptual_weight * perceptual_loss.mean() + ) + log[f"{split}/perceptual_loss"] = perceptual_loss.mean().detach() + + if self.perceptual_weight_on_inputs > 0.0: + image_reconstructions = default( + image_reconstructions, self.decoder.decode(latent_predictions) + ) + if self.scale_input_to_tgt_size: + image_inputs = torch.nn.functional.interpolate( + image_inputs, + image_reconstructions.shape[2:], + mode="bicubic", + antialias=True, + ) + elif self.scale_tgt_to_input_size: + image_reconstructions = torch.nn.functional.interpolate( + image_reconstructions, + image_inputs.shape[2:], + mode="bicubic", + antialias=True, + ) + + perceptual_loss2 = self.perceptual_loss( + image_inputs.contiguous(), image_reconstructions.contiguous() + ) + loss = loss + self.perceptual_weight_on_inputs * perceptual_loss2.mean() + log[f"{split}/perceptual_loss_on_inputs"] = perceptual_loss2.mean().detach() + return loss, log + + +class GeneralLPIPSWithDiscriminator(nn.Module): + def __init__( + self, + disc_start: int, + logvar_init: float = 0.0, + pixelloss_weight=1.0, + disc_num_layers: int = 3, + disc_in_channels: int = 3, + disc_factor: float = 1.0, + disc_weight: float = 1.0, + perceptual_weight: float = 1.0, + disc_loss: str = "hinge", + scale_input_to_tgt_size: bool = False, + dims: int = 2, + learn_logvar: bool = False, + regularization_weights: Union[None, dict] = None, + ): + super().__init__() + self.dims = dims + if self.dims > 2: + print( + f"running with dims={dims}. This means that for perceptual loss calculation, " + f"the LPIPS loss will be applied to each frame independently. " + ) + self.scale_input_to_tgt_size = scale_input_to_tgt_size + assert disc_loss in ["hinge", "vanilla"] + self.pixel_weight = pixelloss_weight + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + # output log variance + self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) + self.learn_logvar = learn_logvar + + self.discriminator = NLayerDiscriminator( + input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=False + ).apply(weights_init) + self.discriminator_iter_start = disc_start + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.regularization_weights = default(regularization_weights, {}) + + def get_trainable_parameters(self) -> Any: + return self.discriminator.parameters() + + def get_trainable_autoencoder_parameters(self) -> Any: + if self.learn_logvar: + yield self.logvar + yield from () + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad( + nll_loss, self.last_layer[0], retain_graph=True + )[0] + g_grads = torch.autograd.grad( + g_loss, self.last_layer[0], retain_graph=True + )[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward( + self, + regularization_log, + inputs, + reconstructions, + optimizer_idx, + global_step, + last_layer=None, + split="train", + weights=None, + ): + if self.scale_input_to_tgt_size: + inputs = torch.nn.functional.interpolate( + inputs, reconstructions.shape[2:], mode="bicubic", antialias=True + ) + + if self.dims > 2: + inputs, reconstructions = map( + lambda x: rearrange(x, "b c t h w -> (b t) c h w"), + (inputs, reconstructions), + ) + + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss( + inputs.contiguous(), reconstructions.contiguous() + ) + rec_loss = rec_loss + self.perceptual_weight * p_loss + + nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar + weighted_nll_loss = nll_loss + if weights is not None: + weighted_nll_loss = weights * nll_loss + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] + nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + + # now the GAN part + if optimizer_idx == 0: + # generator update + logits_fake = self.discriminator(reconstructions.contiguous()) + g_loss = -torch.mean(logits_fake) + + if self.disc_factor > 0.0: + try: + d_weight = self.calculate_adaptive_weight( + nll_loss, g_loss, last_layer=last_layer + ) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + else: + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight( + self.disc_factor, global_step, threshold=self.discriminator_iter_start + ) + loss = weighted_nll_loss + d_weight * disc_factor * g_loss + log = dict() + for k in regularization_log: + if k in self.regularization_weights: + loss = loss + self.regularization_weights[k] * regularization_log[k] + log[f"{split}/{k}"] = regularization_log[k].detach().mean() + + log.update( + { + "{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/logvar".format(split): self.logvar.detach(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + ) + + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + + disc_factor = adopt_weight( + self.disc_factor, global_step, threshold=self.discriminator_iter_start + ) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = { + "{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean(), + } + return d_loss, log diff --git a/CCEdit-main/sgm/modules/autoencoding/regularizers/__init__.py b/CCEdit-main/sgm/modules/autoencoding/regularizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8de3212d3be4f58e621e8caa6e31dd8dc32b6929 --- /dev/null +++ b/CCEdit-main/sgm/modules/autoencoding/regularizers/__init__.py @@ -0,0 +1,53 @@ +from abc import abstractmethod +from typing import Any, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ....modules.distributions.distributions import DiagonalGaussianDistribution + + +class AbstractRegularizer(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]: + raise NotImplementedError() + + @abstractmethod + def get_trainable_parameters(self) -> Any: + raise NotImplementedError() + + +class DiagonalGaussianRegularizer(AbstractRegularizer): + def __init__(self, sample: bool = True): + super().__init__() + self.sample = sample + + def get_trainable_parameters(self) -> Any: + yield from () + + def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]: + log = dict() + posterior = DiagonalGaussianDistribution(z) + if self.sample: + z = posterior.sample() + else: + z = posterior.mode() + kl_loss = posterior.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + log["kl_loss"] = kl_loss + return z, log + + +def measure_perplexity(predicted_indices, num_centroids): + # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py + # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally + encodings = ( + F.one_hot(predicted_indices, num_centroids).float().reshape(-1, num_centroids) + ) + avg_probs = encodings.mean(0) + perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() + cluster_use = torch.sum(avg_probs > 0) + return perplexity, cluster_use diff --git a/CCEdit-main/sgm/modules/autoencoding/regularizers/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/modules/autoencoding/regularizers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bd5ab1334853ab9d49e98f4b8dbaee838ec6ee3 Binary files /dev/null and b/CCEdit-main/sgm/modules/autoencoding/regularizers/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31c81f6b4cd0455e1de68062cfe27fd59774466 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_scaling.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_scaling.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..311b1f3ce11ccd966b5f6ff053657782a9e1dbb2 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_scaling.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/model.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ba87ae5bad4055996ac7185a792b4e1e3f07be1 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/model.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/openaimodel.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/openaimodel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55ea7f47ae9f94132f92eebe64159f54b964ccaf Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/openaimodel.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling_utils.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..004568e9e920df40f40d7d9af8fe2c12d1e70858 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling_utils.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/controlmodel.py b/CCEdit-main/sgm/modules/diffusionmodules/controlmodel.py new file mode 100644 index 0000000000000000000000000000000000000000..adfdde5c951b5cbd7f6f6f953f33b0ebafd73c77 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/controlmodel.py @@ -0,0 +1,727 @@ +import torch +import torch as th +import torch.nn as nn +import torch.nn.functional as F +from einops import repeat, rearrange +import einops +from ...modules.diffusionmodules.util import ( + conv_nd, + timestep_embedding, + zero_module, +) +from ...util import default, exists, instantiate_from_config + +from sgm.modules.attention import BasicTransformerBlock, SpatialTransformer, SpatialTransformerCA, SpatialTransformer3DCA +from sgm.modules.diffusionmodules.openaimodel import ( + spatial_temporal_forward, + TimestepEmbedSequential, + UNetModel, + UNetModel3D, +) + + +class ControlNet3D(UNetModel3D): + """A locked copy branch of UNetModel3D that processes task-specific conditions. + The model weights are initilized from the weights of the pretrained UNetModel3D. + The additional input_hint_block is used to transform the input condition into the + same dimension as the output of the vae-encoder + """ + + def __init__( + self, hint_channels, control_scales, disable_temporal=False, *args, **kwargs + ): + kwargs["out_channels"] = kwargs["in_channels"] # this is unused actually + self.control_scales = control_scales + # Note: disable_temporal means only conduct 2d operation on the center frame + self.disable_temporal = disable_temporal + super().__init__(*args, **kwargs) + + model_channels = kwargs["model_channels"] + channel_mult = kwargs["channel_mult"] + del self.output_blocks + del self.out + del self.out_temporal + if hasattr(self, "id_predictor"): + del self.id_predictor + del self.id_predictor_temporal + + self.input_hint_block = TimestepEmbedSequential( + conv_nd(2, hint_channels, 16, 3, padding=1), + nn.SiLU(), + conv_nd(2, 16, 16, 3, padding=1), + nn.SiLU(), + conv_nd(2, 16, 32, 3, padding=1), + nn.SiLU(), + conv_nd(2, 32, 32, 3, padding=1), + nn.SiLU(), + conv_nd(2, 32, 96, 3, padding=1), + nn.SiLU(), + conv_nd(2, 96, 96, 3, padding=1), + nn.SiLU(), + conv_nd(2, 96, 256, 3, padding=1), + nn.SiLU(), + zero_module(conv_nd(2, 256, model_channels, 3, padding=1)), + ) + + # this is for the transformation of hint + self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) + if disable_temporal: + self.zero_convs_temporal = [None] + else: + self.zero_convs_temporal = nn.ModuleList( + [self.make_zero_conv(model_channels, dims=1)] + ) + + input_block_chans = [model_channels] + ch = model_channels + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + ch = mult * model_channels + self.zero_convs.append(self.make_zero_conv(ch)) + if disable_temporal: + self.zero_convs_temporal.append(None) + else: + self.zero_convs_temporal.append(self.make_zero_conv(ch, dims=1)) + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + self.zero_convs.append(self.make_zero_conv(ch)) + if disable_temporal: + self.zero_convs_temporal.append(None) + else: + self.zero_convs_temporal.append(self.make_zero_conv(ch, dims=1)) + + self.middle_block_out = self.make_zero_conv(ch) + if disable_temporal: + self.middle_block_out_temporal = None + else: + self.middle_block_out_temporal = self.make_zero_conv(ch, dims=1) + + if disable_temporal: + self.setup_disbale_temporal() + + def setup_disbale_temporal(self): + from sgm.util import torch_dfs + from sgm.modules.diffusionmodules.openaimodel import ( + ResBlock3D, + Upsample3D, + Downsample3D, + ) + from sgm.modules.attention import SpatialTransformer3D + + self.input_blocks_temporal = None + all_modules = torch_dfs(self) + for module in all_modules: + if isinstance(module, ResBlock3D): + module.in_layers_temporal = None + module.out_layers_temporal = None + if hasattr(module, "skip_connection_temporal"): + module.skip_connection_temporal = None + if hasattr(module, "alpha_temporal1"): + module.alpha_temporal1 = None + if hasattr(module, "alpha_temporal2"): + module.alpha_temporal2 = None + if isinstance(module, SpatialTransformer3D): + del module.norm_temporal + del module.proj_in_temporal + del module.transformer_blocks_temporal + del module.proj_out_temporal + if hasattr(module, "alpha_temporal"): + del module.alpha_temporal + if isinstance(module, Downsample3D) or isinstance(module, Upsample3D): + if hasattr(module, "conv_temporal"): + module.conv_temporal = None + return + + def make_zero_conv(self, channels, dims=2): + return TimestepEmbedSequential( + zero_module(conv_nd(dims, channels, channels, 1, padding=0)) + ) + + def forward(self, x, hint, timesteps=None, context=None, y=None, **kwargs): + if self.disable_temporal: + x = x[:, :, x.shape[2] // 2, :, :].unsqueeze(2) + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + t_emb = t_emb.to(self.input_hint_block[0].weight.dtype) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + context = ( + context.to(self.input_hint_block[0].weight.dtype) + if context is not None + else None + ) + guided_hint = self.input_hint_block(hint, emb, context) + outs = [] + + h = x + for module, zero_conv, zero_conv_temporal in zip( + self.input_blocks, self.zero_convs, self.zero_convs_temporal + ): + if guided_hint is not None: + h = spatial_temporal_forward( + h, module, self.input_blocks_temporal, emb=emb, context=context + ) + frame_length = h.shape[2] + guided_hint = repeat( + guided_hint, "b c h w -> b c t h w", t=frame_length + ) + h += guided_hint + guided_hint = None + else: + h = module(h, emb, context) + outs.append(spatial_temporal_forward(h, zero_conv, zero_conv_temporal)) + h = self.middle_block(h, emb, context) + outs.append( + spatial_temporal_forward( + h, self.middle_block_out, self.middle_block_out_temporal + ) + ) + control_scales = [self.control_scales for _ in range(len(outs))] + control = [ + c * scale for c, scale in zip(outs, control_scales) + ] # Adjusting the strength of control + + return control + + +# ----------------------------------------------------- +# This is used for TV2V (text-video-to-video) generation +class ControlNet2D(UNetModel): + def __init__(self, hint_channels, control_scales, no_add_x=False, set_input_hint_block_as_identity=False, *args, **kwargs): + kwargs["out_channels"] = kwargs["in_channels"] # this is unused actually + super().__init__(*args, **kwargs) + + self.control_scales = control_scales + model_channels = kwargs["model_channels"] + channel_mult = kwargs["channel_mult"] + del self.output_blocks + del self.out + if hasattr(self, "id_predictor"): + del self.id_predictor + + self.set_input_hint_block_as_identity = set_input_hint_block_as_identity + if set_input_hint_block_as_identity: + self.input_hint_block = TimestepEmbedSequential( + nn.Identity() + ) + # though set input_hint_block as identity, + else: + self.input_hint_block = TimestepEmbedSequential( + conv_nd(2, hint_channels, 16, 3, padding=1), + nn.SiLU(), + conv_nd(2, 16, 16, 3, padding=1), + nn.SiLU(), + conv_nd(2, 16, 32, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(2, 32, 32, 3, padding=1), + nn.SiLU(), + conv_nd(2, 32, 96, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(2, 96, 96, 3, padding=1), + nn.SiLU(), + conv_nd(2, 96, 256, 3, padding=1, stride=2), + nn.SiLU(), + zero_module(conv_nd(2, 256, model_channels, 3, padding=1)) + ) + + # this is for the transformation of hint + self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) + + input_block_chans = [model_channels] + ch = model_channels + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + ch = mult * model_channels + self.zero_convs.append(self.make_zero_conv(ch)) + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + self.zero_convs.append(self.make_zero_conv(ch)) + + self.middle_block_out = self.make_zero_conv(ch) + self.no_add_x = no_add_x + + def make_zero_conv(self, channels, dims=2): + return TimestepEmbedSequential(zero_module(conv_nd(dims, channels, channels, 1, padding=0))) + + def forward(self, x, hint, timesteps=None, context=None, y=None, **kwargs): + assert (y is not None) == (self.num_classes is not None), \ + "must specify y if and only if the model is class-conditional" + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + # t_emb = t_emb.to(self.input_hint_block[0].weight.dtype) + t_emb = t_emb.to(self.input_blocks[0][0].weight.dtype) + emb = self.time_embed(t_emb) + + if x.dim() == 5: + is_video = True + n_frames = x.shape[2] + x = einops.rearrange(x, 'b c t h w -> (b t) c h w') + hint = einops.rearrange(hint, 'b c t h w -> (b t) c h w') + emb = einops.repeat(emb, 'b d -> (b t) d', t=n_frames) + context = einops.repeat(context, 'b n d -> (b t) n d', t=n_frames) if context is not None else None + else: + is_video = False + + if self.num_classes is not None: + if is_video: + raise NotImplementedError("class-conditional video generation is not supported yet") + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + hint = hint.to(self.input_blocks[0][0].weight.dtype) + emb = emb.to(self.input_blocks[0][0].weight.dtype) + context = context.to(self.input_blocks[0][0].weight.dtype) if context is not None else None + # hint = hint.to(self.input_hint_block[0].weight.dtype) + # emb = emb.to(self.input_hint_block[0].weight.dtype) + # context = context.to(self.input_hint_block[0].weight.dtype) if context is not None else None + guided_hint = self.input_hint_block(hint, emb, context) + if self.set_input_hint_block_as_identity: + guided_hint = self.input_blocks[0](guided_hint, emb, context) + outs = [] + + # h = x.type(self.dtype) + h = x + # if self.no_add_x: + # h = torch.zeros_like(x) + # else: + # h = x + for module, zero_conv in zip(self.input_blocks, self.zero_convs): + if guided_hint is not None: + # h = module(h, emb, context) + # h += guided_hint + if self.no_add_x: + h = guided_hint + else: + h = module(h, emb, context) + h += guided_hint + guided_hint = None + else: + h = module(h, emb, context) + outs.append(zero_conv(h, emb, context)) + + h = self.middle_block(h, emb, context) + outs.append(self.middle_block_out(h, emb, context)) + + # Adjusting the strength of control + control_scales = [self.control_scales for _ in range(len(outs))] + control = [c * scale for c, scale in zip(outs, control_scales)] + + if is_video: + control = [einops.rearrange(each, '(b t) c h w -> b c t h w', t=n_frames) for each in control] + + return control + + +class ControlledUNetModel3DTV2V(UNetModel3D): + """A trainable copy branch of UNetModel3D that processes the video inputs. + The model weights are initilized from the weights of the pretrained UNetModel3D. + """ + + def __init__(self, controlnet_config, *args, **kwargs): + super().__init__(*args, **kwargs) + self.controlnet = instantiate_from_config(controlnet_config) + + controlnet_img_config = kwargs.get("controlnet_img_config", None) + if controlnet_img_config is not None: + self.controlnet_img = instantiate_from_config(controlnet_img_config) + + # reference-aware condition + crossframe_type = kwargs.get("crossframe_type", None) + if crossframe_type is not None: + assert hasattr(self, 'controlnet_img'), "must have controlnet_img if crossframe_type is not None" + assert crossframe_type == 'reference', "only support reference-aware condition" + self.crossframe_type = crossframe_type + # register hook in controlnet_img + self.bank_attn = [] + for name, module in self.controlnet_img.named_modules(): + if isinstance(module, SpatialTransformer): + print('registering attention hook for', name) + module.register_forward_hook(self._get_attn_hook) + + # hack the attention function in unet + def hacked_spatialtransformer_inner_forward(self, x, context=None): + assert hasattr(self, 'm_control'), "must have m_control if crossframe_type is not None" + anchor_frame = self.m_control + + # x = super().forward(x, context) + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, t, h, w = x.shape + # spatial attention + x = rearrange(x, "b c t h w -> (b t) c h w").contiguous() + x_in = x + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "bt c h w -> bt (h w) c").contiguous() + if self.use_linear: + x = self.proj_in(x) + + for i, block in enumerate(self.transformer_blocks): + if i > 0 and len(context) == 1: + i = 0 + context_i = ( + repeat(context[i], "b l c -> (b t) l c", t=t).contiguous() + if context[i] is not None + else None + ) + x = block(x, context=context_i) + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "bt (h w) c -> bt c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out(x) + x = x + x_in + + x = rearrange(x, "(b t) c h w -> (b h w) c t", t=t).contiguous() + # temporal attention + if hasattr(self, "norm_temporal"): # temporal operation exist + x_in = x + x = self.norm_temporal(x) + if not self.use_linear: + x = self.proj_in_temporal(x) + x = rearrange(x, "bhw c t->bhw t c").contiguous() + if self.use_linear: + x = self.proj_in_temporal(x) + for i, block in enumerate(self.transformer_blocks_temporal): + if i > 0 and len(context) == 1: + i = 0 # use same context for each block + # if context[i] != None: + context_i = ( + repeat(context[i], "b l c -> (b h w) l c", h=h, w=w).contiguous() + if context[i] is not None + else None + ) + # x = block(x, context=context_i) + if self.disable_temporal_text_ca: + x = block(x, context=x) + else: + x = block(x, context=context_i) + if self.use_linear: + x = self.proj_out_temporal(x) + x = rearrange(x, "bhw t c -> bhw c t").contiguous() + if not self.use_linear: + x = self.proj_out_temporal(x) + if hasattr(self, "alpha_temporal"): + x = self.alpha_temporal * x_in + (1 - self.alpha_temporal) * x + else: + x = x_in + x + + x = rearrange(x, "(b h w) c t -> b c t h w", h=h, w=w).contiguous() + + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, t, h, w = x.shape + # cross-frame attention + x = rearrange(x, "b c t h w -> (b t) c h w").contiguous() + x_in = x + x = self.norm_temporal_ca(x) + if not self.use_linear: + x = self.proj_in_temporal_ca(x) + x = rearrange(x, "bt c h w -> bt (h w) c").contiguous() + if self.use_linear: + x = self.proj_in_temporal_ca(x) + + for i, block in enumerate(self.transformer_blocks_temporal_ca): + if i > 0 and len(context) == 1: + i = 0 + # # center frame as anchor + x = rearrange(x, "(b t) hw c -> b t hw c", b=b).contiguous() + # attn_anchor_frame_idx = t // 2 # center frame + # anchor_frame = x[:, attn_anchor_frame_idx, :, :].contiguous() + # anchor_frame = repeat(anchor_frame, "b hw c -> b t hw c", t=t).contiguous() + + # anchor_frame = repeat(anchor_frame, "b c h w -> b t (h w) c", t=t).contiguous() + anchor_frame = repeat(anchor_frame, "b c h w -> b t h w c", t=t).contiguous() + anchor_frame = rearrange(anchor_frame, "b t h w c -> b t (h w) c").contiguous() + anchor_frame = rearrange(anchor_frame, "b t hw c -> (b t) hw c").contiguous() + context_texture = anchor_frame + x = rearrange(x, "b t hw c -> (b t) hw c", b=b).contiguous() + x = block(x, context_texture) + + if self.use_linear: + x = self.proj_out_temporal_ca(x) + x = rearrange(x, "bt (h w) c -> bt c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out_temporal_ca(x) + # print(x.min(), x.max()) #! debug + x = x + x_in + + x = rearrange(x, "(b t) c h w -> b c t h w", b=b, t=t).contiguous() + + return x + + all_modules = torch_dfs(self) + st_modules = [module for module in all_modules if isinstance(module, SpatialTransformer3DCA)] # st = spatialtransformer + + # hard code, the first 7 st modules are used for reference aware cross-frame attention + for i, module in enumerate(st_modules[:7]): + if getattr(module, 'original_inner_forward', None) is None: + module.original_inner_forward = module.forward + module.forward = hacked_spatialtransformer_inner_forward.__get__(module, SpatialTransformer3DCA) + # module.attn1_type = spatial_transformer_attn1_type + + def forward( + self, + x, + timesteps=None, + context=None, + y=None, + control=None, + img_control=None, + only_mid_control=False, + **kwargs + ): + # 1. If img_control is not None, img_control would be added on the center frame of the video. + # 2. Note that control (lineart maps or something) would conduct on the whole video, + # which controls the global motion or structure. + # But img_control would only conduct on the center frame, which controls the local texture. + # The texture introduced from img_control would spread to the whole video through the temporal blocks. + # 3. Note that control is added in the decoder, while img_control is added in the encoder. + + if hasattr(self, 'crossframe_type') and self.crossframe_type == 'reference': + all_modules = torch_dfs(self) + st_modules = [module for module in all_modules if isinstance(module, SpatialTransformer3DCA)] # st = spatialtransformer + + # control_attn = self.bank_attn + # st_modules = st_modules[:len(control_attn)] + # for (module, m_control) in zip(st_modules, control_attn): + # module.m_control = m_control + st_modules = st_modules[:7] + assert len(self.bank_attn) == 7, "hard code, the first 7 st modules are used for reference aware cross-frame attention" \ + "and the number in self.bank_attn is {} now".format(len(self.bank_attn)) + # for (module, m_control) in zip(st_modules, self.bank_attn): + for module in st_modules: + module.m_control = self.bank_attn.pop(0) + assert len(self.bank_attn) == 0, "self.bank_attn should be empty now" + + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + t_emb = t_emb.to(self.input_blocks_temporal[0].weight.dtype) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + context = ( + context.to(self.input_blocks_temporal[0].weight.dtype) + if context is not None + else None + ) + h = x + for layer, module in enumerate(self.input_blocks): + if layer == 0: + h = spatial_temporal_forward( + h, module, self.input_blocks_temporal, emb=emb, context=context + ) + else: + h = module(h, emb, context) + if (not only_mid_control) and (img_control is not None): + h[:,:,h.shape[2]//2,:,:] += img_control.pop(0) + hs.append(h) + + h = self.middle_block(h, emb, context) + if img_control is not None: + h[:,:,h.shape[2]//2,:,:] += img_control.pop(0) + if control is not None: + h = h + control.pop() # B C T H W + + for i, module in enumerate(self.output_blocks): + if only_mid_control or control is None: + h = th.cat([h, hs.pop()], dim=1) + else: + h = th.cat([h, hs.pop() + control.pop()], dim=1) + h = module(h, emb, context) + + h = h.type(x.dtype) + if self.predict_codebook_ids: + assert False, "not supported anymore. what the f*** are you doing?" # niubi + else: + return spatial_temporal_forward(h, self.out, self.out_temporal) + + def _get_attn_hook(self, module, input, output): + self.bank_attn.append(output) + + +class ControlledUNetModel3DTV2VInterpolate(ControlledUNetModel3DTV2V): + def forward( + self, + x, + timesteps=None, + context=None, + y=None, + control=None, + interpolate_control=None, + only_mid_control=False, + **kwargs + ): + assert control is not None + assert interpolate_control is not None + interpolate_control_first, interpolate_control_last = interpolate_control + + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + t_emb = t_emb.to(self.input_blocks_temporal[0].weight.dtype) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + context = ( + context.to(self.input_blocks_temporal[0].weight.dtype) + if context is not None + else None + ) + h = x + for layer, module in enumerate(self.input_blocks): + if layer == 0: + h = spatial_temporal_forward( + h, module, self.input_blocks_temporal, emb=emb, context=context + ) + else: + h = module(h, emb, context) + if (not only_mid_control): + h[:,:,0,:,:] += interpolate_control_first.pop(0) + h[:,:,-1,:,:] += interpolate_control_last.pop(0) + hs.append(h) + + h = self.middle_block(h, emb, context) + h[:,:,0,:,:] += interpolate_control_first.pop(0) + h[:,:,-1,:,:] += interpolate_control_last.pop(0) + h = h + control.pop() # B C T H W + + for i, module in enumerate(self.output_blocks): + if only_mid_control or control is None: + h = th.cat([h, hs.pop()], dim=1) + else: + h = th.cat([h, hs.pop() + control.pop()], dim=1) + h = module(h, emb, context) + + h = h.type(x.dtype) + if self.predict_codebook_ids: + assert False, "not supported anymore. what the f*** are you doing?" # niubi + else: + return spatial_temporal_forward(h, self.out, self.out_temporal) + + +class ControlledUNetModel2DRAIG(UNetModel): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + controlnet_img_config = kwargs.get("controlnet_img_config", None) + if controlnet_img_config is not None: + self.controlnet_img = instantiate_from_config(controlnet_img_config) + + # reference-aware condition + enable_ref_attn = kwargs.get("enable_ref_attn", False) + self.enable_ref_attn = enable_ref_attn + if enable_ref_attn: + assert hasattr(self, 'controlnet_img'), "must have controlnet_img if crossframe_type is not None" + # register hook in controlnet_img + self.bank_attn = [] + for name, module in self.controlnet_img.named_modules(): + if isinstance(module, SpatialTransformer): + print('registering attention hook for', name) + module.register_forward_hook(self._get_attn_hook) + + def forward( + self, + x, + timesteps=None, + context=None, + y=None, + control=None, + img_control=None, + only_mid_control=False, + **kwargs + ): + assert img_control == None, 'img_control should not shown here, features needed are hooked during the forward process' + + if self.enable_ref_attn: + all_modules = torch_dfs(self) + st_modules = [module for module in all_modules if isinstance(module, SpatialTransformerCA)] # st = spatialtransformer + + # hard code, might be changed later + mapping_dict = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 5, + 8: 5, + 9: 4, + 10: 3, + 11: 3, + 12: 2, + 13: 1, + 14: 1, + 15: 0, + } + + for idx, module in enumerate(st_modules): + module.ref_control = self.bank_attn[mapping_dict[idx]] + + self.bank_attn = [] + + assert (y is not None) == (self.num_classes is not None), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + t_emb = t_emb.to(self.output_blocks[0][0].in_layers[0].weight.dtype) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + context = ( + context.to(self.output_blocks[0][0].in_layers[0].weight.dtype) + if context is not None + else None + ) + h = x + for layer, module in enumerate(self.input_blocks): + h = module(h, emb, context) + hs.append(h) + + h = self.middle_block(h, emb, context) + if control is not None: + h = h + control.pop() # B C T H W + + for i, module in enumerate(self.output_blocks): + if only_mid_control or control is None: + h = th.cat([h, hs.pop()], dim=1) + else: + h = th.cat([h, hs.pop() + control.pop()], dim=1) + h = module(h, emb, context) + + h = h.type(x.dtype) + if self.predict_codebook_ids: + assert False, "not supported anymore. what the f*** are you doing?" + else: + return self.out(h) + + def _get_attn_hook(self, module, input, output): + self.bank_attn.append(output) + + +# DFS Search for Torch.nn.Module, Written by Lvmin +def torch_dfs(model: torch.nn.Module): + result = [model] + for child in model.children(): + result += torch_dfs(child) + return result \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/diffusionmodules/denoiser.py b/CCEdit-main/sgm/modules/diffusionmodules/denoiser.py new file mode 100644 index 0000000000000000000000000000000000000000..4e452b12781e9e9ed8a99f0f0cbb169cf46937b3 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/denoiser.py @@ -0,0 +1,75 @@ +import torch.nn as nn + +from ...util import append_dims, instantiate_from_config + + +class Denoiser(nn.Module): + def __init__(self, weighting_config, scaling_config): + super().__init__() + + self.weighting = instantiate_from_config(weighting_config) + self.scaling = instantiate_from_config(scaling_config) + + def possibly_quantize_sigma(self, sigma): + return sigma + + def possibly_quantize_c_noise(self, c_noise): + return c_noise + + def w(self, sigma): + return self.weighting(sigma) + + def __call__(self, network, input, sigma, cond): + sigma = self.possibly_quantize_sigma(sigma) + sigma_shape = sigma.shape + sigma = append_dims(sigma, input.ndim) + c_skip, c_out, c_in, c_noise = self.scaling(sigma) + c_noise = self.possibly_quantize_c_noise(c_noise.reshape(sigma_shape)) + # import pdb; pdb.set_trace() + # import torchvision, einops + # tmp = einops.rearrange(input, 'b c t h w -> (b t) c h w') + # torchvision.utils.save_image(tmp[:,:3], 'tmp.png', normalize=True) + ''' + input * c_in: noised_input multiplied by the coefficient of the corresponding t. (not sure) + c_in: torch.Size([2, 1, 1, 1, 1]); 0.0683, 0.0683 + c_noise: the step t. e.g., tensor([451], device='cuda:0') + cond: the condition. e.g., cond['crossattn']: [1, 77, 1024] + c_out: e.g., -1.3762. Don't know why multiply this and why it's negative. + c_skip: e.g., 1.0. Don't know why multiply this. + ''' + return network(input * c_in, c_noise, cond) * c_out + input * c_skip + + +class DiscreteDenoiser(Denoiser): + def __init__( + self, + weighting_config, + scaling_config, + num_idx, + discretization_config, + do_append_zero=False, + quantize_c_noise=True, + flip=True, + ): + super().__init__(weighting_config, scaling_config) + sigmas = instantiate_from_config(discretization_config)( + num_idx, do_append_zero=do_append_zero, flip=flip + ) + self.register_buffer("sigmas", sigmas) + self.quantize_c_noise = quantize_c_noise + + def sigma_to_idx(self, sigma): + dists = sigma - self.sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) + + def idx_to_sigma(self, idx): + return self.sigmas[idx] + + def possibly_quantize_sigma(self, sigma): + return self.idx_to_sigma(self.sigma_to_idx(sigma)) + + def possibly_quantize_c_noise(self, c_noise): + if self.quantize_c_noise: + return self.sigma_to_idx(c_noise) + else: + return c_noise \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/diffusionmodules/denoiser_scaling.py b/CCEdit-main/sgm/modules/diffusionmodules/denoiser_scaling.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a2ac6732ea78f1030b21bebd14063d52ac2a82 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/denoiser_scaling.py @@ -0,0 +1,31 @@ +import torch + + +class EDMScaling: + def __init__(self, sigma_data=0.5): + self.sigma_data = sigma_data + + def __call__(self, sigma): + c_skip = self.sigma_data**2 / (sigma**2 + self.sigma_data**2) + c_out = sigma * self.sigma_data / (sigma**2 + self.sigma_data**2) ** 0.5 + c_in = 1 / (sigma**2 + self.sigma_data**2) ** 0.5 + c_noise = 0.25 * sigma.log() + return c_skip, c_out, c_in, c_noise + + +class EpsScaling: + def __call__(self, sigma): + c_skip = torch.ones_like(sigma, device=sigma.device) + c_out = -sigma + c_in = 1 / (sigma**2 + 1.0) ** 0.5 + c_noise = sigma.clone() + return c_skip, c_out, c_in, c_noise + + +class VScaling: + def __call__(self, sigma): + c_skip = 1.0 / (sigma**2 + 1.0) + c_out = -sigma / (sigma**2 + 1.0) ** 0.5 + c_in = 1.0 / (sigma**2 + 1.0) ** 0.5 + c_noise = sigma.clone() + return c_skip, c_out, c_in, c_noise diff --git a/CCEdit-main/sgm/modules/diffusionmodules/denoiser_weighting.py b/CCEdit-main/sgm/modules/diffusionmodules/denoiser_weighting.py new file mode 100644 index 0000000000000000000000000000000000000000..b8b03ca58f17ea3d7374f4bbb7bf1d2994755e00 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/denoiser_weighting.py @@ -0,0 +1,24 @@ +import torch + + +class UnitWeighting: + def __call__(self, sigma): + return torch.ones_like(sigma, device=sigma.device) + + +class EDMWeighting: + def __init__(self, sigma_data=0.5): + self.sigma_data = sigma_data + + def __call__(self, sigma): + return (sigma**2 + self.sigma_data**2) / (sigma * self.sigma_data) ** 2 + + +class VWeighting(EDMWeighting): + def __init__(self): + super().__init__(sigma_data=1.0) + + +class EpsWeighting: + def __call__(self, sigma): + return sigma**-2.0 diff --git a/CCEdit-main/sgm/modules/diffusionmodules/discretizer.py b/CCEdit-main/sgm/modules/diffusionmodules/discretizer.py new file mode 100644 index 0000000000000000000000000000000000000000..384f7ebddd40267e221ddf6a5875b59de004f6fe --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/discretizer.py @@ -0,0 +1,69 @@ +from abc import abstractmethod +from functools import partial + +import numpy as np +import torch + +from ...modules.diffusionmodules.util import make_beta_schedule +from ...util import append_zero + + +def generate_roughly_equally_spaced_steps( + num_substeps: int, max_step: int +) -> np.ndarray: + return np.linspace(max_step - 1, 0, num_substeps, endpoint=False).astype(int)[::-1] + + +class Discretization: + def __call__(self, n, do_append_zero=True, device="cpu", flip=False): + sigmas = self.get_sigmas(n, device=device) + sigmas = append_zero(sigmas) if do_append_zero else sigmas + return sigmas if not flip else torch.flip(sigmas, (0,)) + + @abstractmethod + def get_sigmas(self, n, device): + pass + + +class EDMDiscretization(Discretization): + def __init__(self, sigma_min=0.02, sigma_max=80.0, rho=7.0): + self.sigma_min = sigma_min + self.sigma_max = sigma_max + self.rho = rho + + def get_sigmas(self, n, device="cpu"): + ramp = torch.linspace(0, 1, n, device=device) + min_inv_rho = self.sigma_min ** (1 / self.rho) + max_inv_rho = self.sigma_max ** (1 / self.rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** self.rho + return sigmas + + +class LegacyDDPMDiscretization(Discretization): + def __init__( + self, + linear_start=0.00085, + linear_end=0.0120, + num_timesteps=1000, + ): + super().__init__() + self.num_timesteps = num_timesteps + betas = make_beta_schedule( + "linear", num_timesteps, linear_start=linear_start, linear_end=linear_end + ) + alphas = 1.0 - betas + self.alphas_cumprod = np.cumprod(alphas, axis=0) + self.to_torch = partial(torch.tensor, dtype=torch.float32) + + def get_sigmas(self, n, device="cpu"): + if n < self.num_timesteps: + timesteps = generate_roughly_equally_spaced_steps(n, self.num_timesteps) + alphas_cumprod = self.alphas_cumprod[timesteps] + elif n == self.num_timesteps: + alphas_cumprod = self.alphas_cumprod + else: + raise ValueError + + to_torch = partial(torch.tensor, dtype=torch.float32, device=device) + sigmas = to_torch((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 + return torch.flip(sigmas, (0,)) \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/diffusionmodules/guiders.py b/CCEdit-main/sgm/modules/diffusionmodules/guiders.py new file mode 100644 index 0000000000000000000000000000000000000000..fa092a501aded6a491ef32bd578a675f2e9e4ff2 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/guiders.py @@ -0,0 +1,67 @@ +from functools import partial + +import torch + +from ...util import default, instantiate_from_config + + +class VanillaCFG: + """ + implements parallelized CFG + """ + + def __init__(self, scale, dyn_thresh_config=None): + scale_schedule = lambda scale, sigma: scale # independent of step + self.scale_schedule = partial(scale_schedule, scale) + self.dyn_thresh = instantiate_from_config( + default( + dyn_thresh_config, + { + "target": "sgm.modules.diffusionmodules.sampling_utils.NoDynamicThresholding" + }, + ) + ) + + def __call__(self, x, sigma): + x_u, x_c = x.chunk(2) + scale_value = self.scale_schedule(sigma) + x_pred = self.dyn_thresh(x_u, x_c, scale_value) + return x_pred + + def prepare_inputs(self, x, s, c, uc): + c_out = dict() + + for k in c: + if k in ["vector", "crossattn", "concat", "cond_feat"]: + c_out[k] = torch.cat((uc[k], c[k]), 0) + else: + assert c[k] == uc[k] + c_out[k] = c[k] + return torch.cat([x] * 2), torch.cat([s] * 2), c_out + + +class IdentityGuider: + def __call__(self, x, sigma): + return x + + def prepare_inputs(self, x, s, c, uc): + c_out = dict() + + for k in c: + c_out[k] = c[k] + + return x, s, c_out + + +class VanillaCFGTV2V(VanillaCFG): + def prepare_inputs(self, x, s, c, uc): + c_out = dict() + + for k in c: + # if k in ["vector", "crossattn", "concat", "cond_feat", 'control_hint']: + if k in ["vector", "crossattn", "concat", "cond_feat", 'control_hint', 'interpolate_first', 'interpolate_last', 'interpolate_first_last']: + c_out[k] = torch.cat((uc[k], c[k]), 0) + else: + assert c[k] == uc[k] + c_out[k] = c[k] + return torch.cat([x] * 2), torch.cat([s] * 2), c_out \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/diffusionmodules/loss.py b/CCEdit-main/sgm/modules/diffusionmodules/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ba2920b8bff3c14260a67909b9b1502daea9386d --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/loss.py @@ -0,0 +1,84 @@ +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from omegaconf import ListConfig +from taming.modules.losses.lpips import LPIPS + +from ...util import append_dims, instantiate_from_config + + +class StandardDiffusionLoss(nn.Module): + def __init__( + self, + sigma_sampler_config, + type="l2", + offset_noise_level=0.0, + offset_noise_varying_dim = 1, + batch2model_keys: Optional[Union[str, List[str], ListConfig]] = None, + ): + super().__init__() + + assert type in ["l2", "l1", "lpips"] + + self.sigma_sampler = instantiate_from_config(sigma_sampler_config) + + self.type = type + self.offset_noise_level = offset_noise_level + self.offset_noise_varying_dim = offset_noise_varying_dim + + if type == "lpips": + self.lpips = LPIPS().eval() + + if not batch2model_keys: + batch2model_keys = [] + + if isinstance(batch2model_keys, str): + batch2model_keys = [batch2model_keys] + + self.batch2model_keys = set(batch2model_keys) + + def __call__(self, network, denoiser, conditioner, input, batch): + cond = conditioner(batch) + additional_model_inputs = { + key: batch[key] for key in self.batch2model_keys.intersection(batch) + } + + sigmas = self.sigma_sampler(input.shape[0]).to(input.device) + noise = torch.randn_like(input) + if self.offset_noise_level > 0.0: + # noise = noise + self.offset_noise_level * append_dims( + # torch.randn(input.shape[0], device=input.device), input.ndim + # ) + assert input.ndim > self.offset_noise_varying_dim, 'input.ndim should be larger than self.offset_noise_varying_dim' + noise = noise + self.offset_noise_level * append_dims( + torch.randn(input.shape[:self.offset_noise_varying_dim], device=input.device), input.ndim + ) + + noised_input = input + noise * append_dims(sigmas, input.ndim) + # noised_input: [1, 4, 9, 40, 40] + # cond['crossattn']: [1, 77, 1024] + # sigmas: the coefficient of the corresponding t. + # import torchvision, einops + # vis = einops.rearrange(input, '1 c t h w -> t c h w')[:,:3] + # torchvision.utils.save_image(vis, 'input.png', normalize=True) + # import pdb; pdb.set_trace() + # model_output = denoiser( + # network, noised_input, sigmas, cond, **additional_model_inputs + # ) + model_output = denoiser(network, noised_input, sigmas, cond) + w = append_dims(denoiser.w(sigmas), input.ndim) + return self.get_loss(model_output, input, w) + + def get_loss(self, model_output, target, w): + if self.type == "l2": + return torch.mean( + (w * (model_output - target) ** 2).reshape(target.shape[0], -1), 1 + ) + elif self.type == "l1": + return torch.mean( + (w * (model_output - target).abs()).reshape(target.shape[0], -1), 1 + ) + elif self.type == "lpips": + loss = self.lpips(model_output, target).reshape(-1) + return loss \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/diffusionmodules/model.py b/CCEdit-main/sgm/modules/diffusionmodules/model.py new file mode 100644 index 0000000000000000000000000000000000000000..7547c75fe501037b7e1963888ea988d7e9cef22d --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/model.py @@ -0,0 +1,761 @@ +# pytorch_diffusion + derived encoder decoder +import math +from typing import Any, Callable, Optional + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from packaging import version + +try: + import xformers + import xformers.opsControlledUNetModel3DTV2V + + XFORMERS_IS_AVAILABLE = True +except: + XFORMERS_IS_AVAILABLE = False + print("no module 'xformers'. Processing without...") + +from ...modules.attention import LinearAttention, MemoryEfficientCrossAttention + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +def nonlinearity(x): + # swish + return x * torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm( + num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + x = torch.nn.functional.interpolate( + x.to(torch.float32), scale_factor=2.0, mode="nearest" + ).to(x.dtype) + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=2, padding=0 + ) + + def forward(self, x): + if self.with_conv: + pad = (0, 1, 0, 1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + dropout, + temb_channels=512, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d( + out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + else: + self.nin_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x + h + + +class LinAttnBlock(LinearAttention): + """to match AttnBlock usage""" + + def __init__(self, in_channels): + super().__init__(dim=in_channels, heads=1, dim_head=in_channels) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def attention(self, h_: torch.Tensor) -> torch.Tensor: + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + b, c, h, w = q.shape + q, k, v = map( + lambda x: rearrange(x, "b c h w -> b 1 (h w) c").contiguous(), (q, k, v) + ) + h_ = torch.nn.functional.scaled_dot_product_attention( + q, k, v + ) # scale is dim ** -0.5 per default + # compute attention + + return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b) + + def forward(self, x, **kwargs): + h_ = x + h_ = self.attention(h_) + h_ = self.proj_out(h_) + return x + h_ + + +class MemoryEfficientAttnBlock(nn.Module): + """ + Uses xformers efficient implementation, + see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 + Note: this is a single-head self-attention operation + """ + + # + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.attention_op: Optional[Any] = None + + def attention(self, h_: torch.Tensor) -> torch.Tensor: + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + B, C, H, W = q.shape + q, k, v = map(lambda x: rearrange(x, "b c h w -> b (h w) c"), (q, k, v)) + + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(B, t.shape[1], 1, C) + .permute(0, 2, 1, 3) + .reshape(B * 1, t.shape[1], C) + .contiguous(), + (q, k, v), + ) + # out = xformers.ops.memory_efficient_attention( + # q, k, v, attn_bias=None, op=self.attention_op + # ) + + with torch.autocast(enabled=False, device_type="cuda"): + if self.proj_out.weight.dtype == torch.float16: + q, k, v = q.to(torch.float32), k.to(torch.float32), v.to(torch.float32) + elif self.proj_out.weight.dtype == torch.bfloat16: + q, k, v = ( + q.to(torch.bfloat16), + k.to(torch.bfloat16), + v.to(torch.bfloat16), + ) + out = F.scaled_dot_product_attention(q, k, v, is_causal=False).to( + self.proj_out.weight.dtype + ) + + out = ( + out.unsqueeze(0) + .reshape(B, 1, out.shape[1], C) + .permute(0, 2, 1, 3) + .reshape(B, out.shape[1], C) + ) + return rearrange(out, "b (h w) c -> b c h w", b=B, h=H, w=W, c=C) + + def forward(self, x, **kwargs): + h_ = x + h_ = self.attention(h_) + h_ = self.proj_out(h_) + return x + h_ + + +class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): + def forward(self, x, context=None, mask=None, **unused_kwargs): + b, c, h, w = x.shape + x = rearrange(x, "b c h w -> b (h w) c") + out = super().forward(x, context=context, mask=mask) + out = rearrange(out, "b (h w) c -> b c h w", h=h, w=w, c=c) + return x + out + + +def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): + assert attn_type in [ + "vanilla", + "vanilla-xformers", + "memory-efficient-cross-attn", + "linear", + "none", + ], f"attn_type {attn_type} unknown" + if ( + version.parse(torch.__version__) < version.parse("2.0.0") + and attn_type != "none" + ): + assert XFORMERS_IS_AVAILABLE, ( + f"We do not support vanilla attention in {torch.__version__} anymore, " + f"as it is too expensive. Please install xformers via e.g. 'pip install xformers==0.0.16'" + ) + attn_type = "vanilla-xformers" + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": + assert attn_kwargs is None + return AttnBlock(in_channels) + elif attn_type == "vanilla-xformers": + print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...") + return MemoryEfficientAttnBlock(in_channels) + elif type == "memory-efficient-cross-attn": + attn_kwargs["query_dim"] = in_channels + return MemoryEfficientCrossAttentionWrapper(**attn_kwargs) + elif attn_type == "none": + return nn.Identity(in_channels) + else: + return LinAttnBlock(in_channels) + + +class Model(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + use_timestep=True, + use_linear_attn=False, + attn_type="vanilla", + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch * 4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList( + [ + torch.nn.Linear(self.ch, self.temb_ch), + torch.nn.Linear(self.temb_ch, self.temb_ch), + ] + ) + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + skip_in = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + if i_block == self.num_res_blocks: + skip_in = ch * in_ch_mult[i_level] + block.append( + ResnetBlock( + in_channels=block_in + skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x, t=None, context=None): + # assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb + ) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + double_z=True, + use_linear_attn=False, + attn_type="vanilla", + **ignore_kwargs, + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, + 2 * z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1, + ) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + if x.shape[1] == 4 and self.conv_in.in_channels == 3: + raise ValueError("Mismatched number of input channels") + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + give_pre_end=False, + tanh_out=False, + use_linear_attn=False, + attn_type="vanilla", + **ignorekwargs, + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,) + tuple(ch_mult) + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + print( + "Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape) + ) + ) + + make_attn_cls = self._make_attn() + make_resblock_cls = self._make_resblock() + make_conv_cls = self._make_conv() + # z to block_in + self.conv_in = torch.nn.Conv2d( + z_channels, block_in, kernel_size=3, stride=1, padding=1 + ) + + # middle + self.mid = nn.Module() + self.mid.block_1 = make_resblock_cls( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn_cls(block_in, attn_type=attn_type) + self.mid.block_2 = make_resblock_cls( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append( + make_resblock_cls( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn_cls(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = make_conv_cls( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def _make_attn(self) -> Callable: + return make_attn + + def _make_resblock(self) -> Callable: + return ResnetBlock + + def _make_conv(self) -> Callable: + return torch.nn.Conv2d + + def get_last_layer(self, **kwargs): + return self.conv_out.weight + + def forward(self, z, **kwargs): + # assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb, **kwargs) + h = self.mid.attn_1(h, **kwargs) + h = self.mid.block_2(h, temb, **kwargs) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h, temb, **kwargs) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h, **kwargs) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h, **kwargs) + if self.tanh_out: + h = torch.tanh(h) + return h diff --git a/CCEdit-main/sgm/modules/diffusionmodules/openaimodel.py b/CCEdit-main/sgm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 0000000000000000000000000000000000000000..f46b598c5056a46a5c0b9741177201b6ad639b5d --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,1898 @@ +import math +from abc import abstractmethod +from functools import partial +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange, repeat +import warnings + +from ...modules.attention import ( + SpatialTransformer, + SpatialTransformerCA, + SpatialTransformer3D, + SpatialTransformer3DCA, +) +from ...modules.diffusionmodules.util import ( + avg_pool_nd, + checkpoint, + checkpoint_new, + conv_nd, + linear, + normalization, + timestep_embedding, + zero_module, +) +from ...util import default, exists + + +# dummy replace +def convert_module_to_f16(x): + pass + + +def convert_module_to_f32(x): + pass + + +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter( + th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5 + ) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward( + self, + x, + emb, + context=None, + skip_time_mix=False, + time_context=None, + num_video_frames=None, + time_context_cat=None, + use_crossframe_attention_in_spatial_layers=False, + img_emb=None, + return_inject=False, + ): + inject = None + for layer in self: + if isinstance(layer, TimestepBlock): + if isinstance(layer, ResBlock3DSSN): + if return_inject: + x, inject = layer(x, emb, img_emb, return_inject) + else: + x = layer(x, emb, img_emb, return_inject) + else: + x = layer(x, emb) + elif ( + isinstance(layer, SpatialTransformer) + or isinstance(layer, SpatialTransformer3D) + or isinstance(layer, SpatialTransformer3DCA) + ): + x = layer(x, context) + else: + x = layer(x) + # return x + if inject is not None and return_inject: + return x, inject + else: + return x + + +def spatial_temporal_forward( + x, + spatial_layers, + temporal_layers, + identity_layer=nn.Identity(), + emb=None, + context=None, + alpha=None, +): + """ + pseudo 3D = 2D + 1D + + first do spatial forward + then do temporal forward + have skip connection + in the temporal layers not change channel + """ + b, c, *_, h, w = x.shape + x = rearrange(x, "b c t h w -> (b t) c h w").contiguous() + if isinstance(spatial_layers, nn.Module): + if isinstance(spatial_layers, TimestepEmbedSequential): + x = spatial_layers(x, emb, context) + else: + x = spatial_layers(x) + else: + raise TypeError("spatial_layers argument must be a nn.Module object") + # rearrange x to do temporal forward + bt, c, h, w = x.shape + x = rearrange(x, "(b t) c h w -> (b h w) c t", b=b).contiguous() + + identity = identity_layer(x) # skip connection + + if isinstance(temporal_layers, nn.Module): + if isinstance(temporal_layers, TimestepEmbedSequential): + x = temporal_layers(x, emb, context) + else: + x = temporal_layers(x) + + elif temporal_layers is None: + x = th.zeros_like(identity, device=identity.device, dtype=identity.dtype) + else: + raise TypeError("temporal_layers must be a nn.Module object or None") + # x = x + identity + if alpha is not None: + x = alpha * identity + (1 - alpha) * x + else: + x = x + identity + # x = identity # ! DEBUG ONLY + x = rearrange(x, "(b h w) c t -> b c t h w", h=h, w=w).contiguous() + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__( + self, channels, use_conv, dims=2, out_channels=None, padding=1, third_up=False + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + self.third_up = third_up + if use_conv: + self.conv = conv_nd( + dims, self.channels, self.out_channels, 3, padding=padding + ) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + t_factor = 1 if not self.third_up else 2 + x = F.interpolate( + x, + (t_factor * x.shape[2], x.shape[3] * 2, x.shape[4] * 2), + mode="nearest", + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + + +class Upsample3D(nn.Module): + """ + This is hacked from Upsample, pseudo 3D convolutions (2D+1D) are used. + + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__( + self, + channels, + use_conv, + dims=2, + out_channels=None, + padding=1, + third_up=False, + timeupscale=1, + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + self.third_up = third_up + self.timeupscale = timeupscale + if use_conv: + self.conv = conv_nd(2, self.channels, self.out_channels, 3, padding=padding) + self.conv_temporal = zero_module( + conv_nd(1, self.out_channels, self.out_channels, 3, padding=padding) + ) + + def forward(self, x): + assert x.shape[1] == self.channels + x = F.interpolate( + x.to(th.float32), + (x.shape[2] * self.timeupscale, x.shape[3] * 2, x.shape[4] * 2), + mode="nearest", + ).to(x.dtype) + if self.use_conv: + x = spatial_temporal_forward(x, self.conv, self.conv_temporal) + return x + + +class TransposedUpsample(nn.Module): + "Learned 2x upsampling without padding" + + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d( + self.channels, self.out_channels, kernel_size=ks, stride=2 + ) + + def forward(self, x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__( + self, channels, use_conv, dims=2, out_channels=None, padding=1, third_down=False + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else ((1, 2, 2) if not third_down else (2, 2, 2)) + if use_conv: + print(f"Building a Downsample layer with {dims} dims.") + print( + f" --> settings are: \n in-chn: {self.channels}, out-chn: {self.out_channels}, " + f"kernel-size: 3, stride: {stride}, padding: {padding}" + ) + if dims == 3: + print(f" --> Downsampling third axis (time): {third_down}") + self.op = conv_nd( + dims, + self.channels, + self.out_channels, + 3, + stride=stride, + padding=padding, + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class Downsample3D(nn.Module): + """ + This is hacked from Downsample, pseudo 3D convolutions (2D+1D) are used. + + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__( + self, + channels, + use_conv, + dims=2, + out_channels=None, + padding=1, + third_down=False, + timedownscale=1, + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else ((1, 2, 2) if not third_down else (2, 2, 2)) + self.timedownscale = timedownscale + if use_conv: + print(f"Building a Downsample layer with {dims} dims.") + print( + f" --> settings are: \n in-chn: {self.channels}, out-chn: {self.out_channels}, " + f"kernel-size: 3, stride: {stride}, padding: {padding}" + ) + if dims == 3: + print(f" --> Downsampling third axis (time): {third_down}") + self.op = conv_nd( + 2, + self.channels, + self.out_channels, + 3, + stride=stride, + padding=padding, + ) + self.conv_temporal = zero_module( + conv_nd( + 1, + self.out_channels, + self.out_channels, + 3, + stride=self.timedownscale, + padding=padding, + ) + ) + if self.timedownscale == 1: + self.identity = nn.Identity() + elif self.timedownscale == 2: + self.identity = avg_pool_nd(1, kernel_size=stride, stride=stride) + else: + self.op = avg_pool_nd(2, kernel_size=2, stride=2) + stride = (self.timedownscale, 2, 2) + self.op_3d = avg_pool_nd(3, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.use_conv: + x = spatial_temporal_forward(x, self.op, self.conv_temporal, self.identity) + else: + x = self.op_3d(x) + return x + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + kernel_size=3, + exchange_temb_dims=False, + skip_t_emb=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + self.exchange_temb_dims = exchange_temb_dims + + if isinstance(kernel_size, Iterable): + padding = [k // 2 for k in kernel_size] + else: + padding = kernel_size // 2 + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.skip_t_emb = skip_t_emb + self.emb_out_channels = ( + 2 * self.out_channels if use_scale_shift_norm else self.out_channels + ) + if self.skip_t_emb: + print(f"Skipping timestep embedding in {self.__class__.__name__}") + assert not self.use_scale_shift_norm + self.emb_layers = None + self.exchange_temb_dims = False + else: + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + self.emb_out_channels, + ), + ) + + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd( + dims, + self.out_channels, + self.out_channels, + kernel_size, + padding=padding, + ) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, kernel_size, padding=padding + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + # with warnings.catch_warnings(): + # warnings.filterwarnings("ignore", category=UserWarning) + # if x.grad is not None and x.grad_fn is not None: + # use_checkpoint = True + # else: + # use_checkpoint = False + + # if use_checkpoint: + # return checkpoint_new( + # self._forward, (x, emb), self.use_checkpoint + # ) + # else: + # return self._forward(x, emb) + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + + if self.skip_t_emb: + emb_out = th.zeros_like(h) + else: + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + if self.exchange_temb_dims: + emb_out = rearrange(emb_out, "b t c ... -> b c t ...") + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class ResBlock3D(TimestepBlock): + """ + This is hacked from ResBlock, pseudo 3D convolutions (2D+1D) are used. + The dims parameter in ResBlock is no longer used. + TODO: should support pure 3D resblock? + + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, # not used + use_checkpoint=False, + up=False, + down=False, + kernel_size=3, + exchange_temb_dims=False, + skip_t_emb=False, + temporal_kernel_size=None, + **kwargs, + # use_learnable_alpha=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + self.exchange_temb_dims = exchange_temb_dims + + self.temporal_kernel_size = default(temporal_kernel_size, 3) + + if isinstance(kernel_size, Iterable): + padding = [k // 2 for k in kernel_size] + else: + padding = kernel_size // 2 + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(2, channels, self.out_channels, kernel_size, padding=padding), + ) + self.in_layers_temporal = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + zero_module( + conv_nd( + 1, + self.out_channels, + self.out_channels, + self.temporal_kernel_size, + padding=self.temporal_kernel_size // 2, + ) + ), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, 3) + self.x_upd = Upsample(channels, False, 3) + elif down: + self.h_upd = Downsample(channels, False, 3) + self.x_upd = Downsample(channels, False, 3) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.skip_t_emb = skip_t_emb + self.emb_out_channels = ( + 2 * self.out_channels if use_scale_shift_norm else self.out_channels + ) + if self.skip_t_emb: + print(f"Skipping timestep embedding in {self.__class__.__name__}") + assert not self.use_scale_shift_norm + self.emb_layers = None + self.exchange_temb_dims = False + else: + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + self.emb_out_channels, + ), + ) + + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd( + 2, + self.out_channels, + self.out_channels, + kernel_size, + padding=padding, + ) + ), + ) + self.out_layers_temporal = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd( + 1, + self.out_channels, + self.out_channels, + self.temporal_kernel_size, + padding=self.temporal_kernel_size // 2, + ) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + self.skip_connection_temporal = None + elif use_conv: + self.skip_connection = conv_nd( + 2, channels, self.out_channels, kernel_size, padding=padding + ) + self.skip_connection_temporal = zero_module( + conv_nd( + 1, + self.out_channels, + self.out_channels, + self.temporal_kernel_size, + padding=self.temporal_kernel_size // 2, + ) + ) + else: + self.skip_connection = conv_nd(2, channels, self.out_channels, 1) + self.skip_connection_temporal = zero_module( + conv_nd(1, self.out_channels, self.out_channels, 1) + ) + + use_learnable_alpha = kwargs.get("use_learnable_alpha", False) + if use_learnable_alpha: + self.alpha_temporal1 = nn.Parameter(th.tensor(1.0)) + self.alpha_temporal2 = nn.Parameter(th.tensor(1.0)) + else: + self.alpha_temporal1 = None + self.alpha_temporal2 = None + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + def _forward(self, x, emb): + identity = x + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + # 3d + x = in_rest(x) + x = self.h_upd(x) + # 2d+1d + x = spatial_temporal_forward( + x, in_conv, self.in_layers_temporal, alpha=self.alpha_temporal1 + ) + # 3d + identity = self.x_upd(identity) + else: + x = spatial_temporal_forward( + x, self.in_layers, self.in_layers_temporal, alpha=self.alpha_temporal1 + ) + + if self.skip_t_emb: + emb_out = th.zeros_like(x) + else: + emb_out = self.emb_layers(emb).type(x.dtype) + while len(emb_out.shape) < len(x.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + # 3d + x = out_norm(x) * (1 + scale) + shift + # 2d+1d + x = spatial_temporal_forward( + x, out_rest, self.out_layers_temporal, alpha=self.alpha_temporal2 + ) + else: + if self.exchange_temb_dims: + emb_out = rearrange(emb_out, "b t c ... -> b c t ...") + x = x + emb_out + # 2d + 1d + x = spatial_temporal_forward( + x, self.out_layers, self.out_layers_temporal, alpha=self.alpha_temporal2 + ) + identity = spatial_temporal_forward( + identity, self.skip_connection, self.skip_connection_temporal + ) + x = identity + x + return x + + +class ResBlock3DSSN(ResBlock3D): + # TODO: better to use hook to get the img_emb, instead of return. + def forward(self, x, emb, img_emb=None, return_inject=False): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + # TODO: Not sure about this, need crosscheck + # return checkpoint( + # self._forward, (x, emb, img_emb, return_inject), self.parameters(), self.use_checkpoint + # ) + return checkpoint_new( + self._forward, (x, emb, img_emb, return_inject), self.use_checkpoint + ) + + def _forward(self, x, emb, img_emb=None, return_inject=False): + identity = x + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + # 3d + x = in_rest(x) + x = self.h_upd(x) + # 2d+1d + x = spatial_temporal_forward( + x, in_conv, self.in_layers_temporal, alpha=self.alpha_temporal1 + ) + # 3d + identity = self.x_upd(identity) + else: + x = spatial_temporal_forward( + x, self.in_layers, self.in_layers_temporal, alpha=self.alpha_temporal1 + ) + + if self.skip_t_emb: + emb_out = th.zeros_like(x) + else: + emb_out = self.emb_layers(emb).type(x.dtype) + while len(emb_out.shape) < len(x.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale1, shift1 = th.chunk(emb_out, 2, dim=1) + if img_emb is not None: + scale2, shift2 = th.chunk(img_emb, 2, dim=1) + else: + scale2, shift2 = 0.0, 0.0 + + inject = out_norm(x) + x = inject * (1 + scale1) * (1 + scale2) + shift1 + shift2 + x = spatial_temporal_forward( + x, out_rest, self.out_layers_temporal, alpha=self.alpha_temporal2 + ) + else: + if self.exchange_temb_dims: + emb_out = rearrange(emb_out, "b t c ... -> b c t ...") + x = x + emb_out + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + if img_emb is not None: + scale2, shift2 = th.chunk(img_emb, 2, dim=1) + else: + scale2, shift2 = 0.0, 0.0 + inject = out_norm(x) + x = inject * (1 + scale2) + shift2 + x = spatial_temporal_forward( + x, out_rest, self.out_layers_temporal, alpha=self.alpha_temporal2 + ) + + identity = spatial_temporal_forward( + identity, self.skip_connection, self.skip_connection_temporal + ) + x = identity + x + if return_inject: + return x, inject + else: + return x + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x, **kwargs): + # TODO add crossframe attention and use mixed checkpoint + return checkpoint( + self._forward, (x,), self.parameters(), True + ) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial**2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class Timestep(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, t): + return timestep_embedding(t, self.dim) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None, + disable_middle_self_attn=False, + use_linear_in_transformer=False, + spatial_transformer_attn_type="softmax", + # spatial_transformer_attn_type="softmax-xformers", + adm_in_channels=None, + use_fairscale_checkpoint=False, + offload_to_cpu=False, + transformer_depth_middle=None, + unet_type=None, + enable_attention3d_crossframe=False, + disable_text_ca=False, + disable_temporal_text_ca=False, + **kwargs, + ): + super().__init__() + from omegaconf.listconfig import ListConfig + + resblk_kwargs = dict() + transformer_kwargs = dict() + + # lora params + for _key, _val in kwargs.items(): + if "lora" in _key: + transformer_kwargs[_key] = _val + + if unet_type == None or unet_type == "2d": + resblk_class = ResBlock + transformer_class = SpatialTransformer + downsample_class = Downsample + upsample_class = Upsample + transformer_kwargs['disable_text_ca'] = disable_text_ca + enable_ref_attn = kwargs.get('enable_ref_attn', False) + if enable_ref_attn: + transformer_class = SpatialTransformerCA + elif unet_type == "pseudo-3d": + resblk_class = ResBlock3D + if enable_attention3d_crossframe: + transformer_class = SpatialTransformer3DCA + else: + transformer_class = SpatialTransformer3D + downsample_class = Downsample3D + upsample_class = Upsample3D + resblk_kwargs["use_learnable_alpha"] = kwargs.get( + "use_learnable_alpha", False + ) + transformer_kwargs["use_learnable_alpha"] = kwargs.get( + "use_learnable_alpha", False + ) + transformer_kwargs['disable_temporal_text_ca'] = disable_temporal_text_ca + ST3DCA_ca_type = kwargs.get('ST3DCA_ca_type', None) + if ST3DCA_ca_type: + transformer_kwargs['ST3DCA_ca_type'] = ST3DCA_ca_type + elif unet_type == "pseudo-3d-ssn": + resblk_class = ResBlock3DSSN + assert not enable_attention3d_crossframe, "Not supported yet" + transformer_class = SpatialTransformer3D + downsample_class = Downsample3D + upsample_class = Upsample3D + resblk_kwargs["use_learnable_alpha"] = kwargs.get( + "use_learnable_alpha", False + ) + transformer_kwargs["use_learnable_alpha"] = kwargs.get( + "use_learnable_alpha", False + ) + else: + raise ValueError(f"Unknown resblk_type: {unet_type}") + + if use_spatial_transformer: + assert ( + context_dim is not None + ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..." + + if context_dim is not None: + assert ( + use_spatial_transformer + ), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..." + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert ( + num_head_channels != -1 + ), "Either num_heads or num_head_channels has to be set" + + if num_head_channels == -1: + assert ( + num_heads != -1 + ), "Either num_heads or num_head_channels has to be set" + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + if isinstance(transformer_depth, int): + transformer_depth = len(channel_mult) * [transformer_depth] + elif isinstance(transformer_depth, ListConfig): + transformer_depth = list(transformer_depth) + transformer_depth_middle = default( + transformer_depth_middle, transformer_depth[-1] + ) + + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError( + "provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult" + ) + self.num_res_blocks = num_res_blocks + # self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all( + map( + lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], + range(len(num_attention_blocks)), + ) + ) + print( + f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set." + ) # todo: convert to warning + + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + if use_fp16: + print("WARNING: use_fp16 was dropped and has no effect anymore.") + # self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + assert use_fairscale_checkpoint != use_checkpoint or not ( + use_checkpoint or use_fairscale_checkpoint + ) + + self.use_fairscale_checkpoint = False + checkpoint_wrapper_fn = ( + partial(checkpoint_wrapper, offload_to_cpu=offload_to_cpu) + if self.use_fairscale_checkpoint + else lambda x: x + ) + + time_embed_dim = model_channels * 4 + self.time_embed = checkpoint_wrapper_fn( + nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + ) + + if self.num_classes is not None: + if isinstance(self.num_classes, int): + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + elif self.num_classes == "continuous": + print("setting up linear c_adm embedding layer") + self.label_emb = nn.Linear(1, time_embed_dim) + elif self.num_classes == "timestep": + self.label_emb = checkpoint_wrapper_fn( + nn.Sequential( + Timestep(model_channels), + nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ), + ) + ) + elif self.num_classes == "sequential": + assert adm_in_channels is not None + self.label_emb = nn.Sequential( + nn.Sequential( + linear(adm_in_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + ) + else: + raise ValueError() + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + checkpoint_wrapper_fn( + resblk_class( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + **resblk_kwargs, + ) + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ( + ch // num_heads + if use_spatial_transformer + else num_head_channels + ) + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if ( + not exists(num_attention_blocks) + or nr < num_attention_blocks[level] + ): + layers.append( + checkpoint_wrapper_fn( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) + ) + if not use_spatial_transformer + else checkpoint_wrapper_fn( + transformer_class( + ch, + num_heads, + dim_head, + depth=transformer_depth[level], + context_dim=context_dim, + disable_self_attn=disabled_sa, + use_linear=use_linear_in_transformer, + attn_type=spatial_transformer_attn_type, + use_checkpoint=use_checkpoint, + **transformer_kwargs, + ) + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + checkpoint_wrapper_fn( + resblk_class( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + **resblk_kwargs, + ) + ) + if resblock_updown + # else Downsample( + else downsample_class( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + checkpoint_wrapper_fn( + resblk_class( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + **resblk_kwargs, + ) + ), + checkpoint_wrapper_fn( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) + ) + if not use_spatial_transformer + else checkpoint_wrapper_fn( + transformer_class( # always uses a self-attn + ch, + num_heads, + dim_head, + depth=transformer_depth_middle, + context_dim=context_dim, + disable_self_attn=disable_middle_self_attn, + use_linear=use_linear_in_transformer, + attn_type=spatial_transformer_attn_type, + use_checkpoint=use_checkpoint, + **transformer_kwargs, + ) + ), + checkpoint_wrapper_fn( + resblk_class( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + **resblk_kwargs, + ) + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(self.num_res_blocks[level] + 1): + ich = input_block_chans.pop() + layers = [ + checkpoint_wrapper_fn( + resblk_class( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + **resblk_kwargs, + ) + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ( + ch // num_heads + if use_spatial_transformer + else num_head_channels + ) + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if ( + not exists(num_attention_blocks) + or i < num_attention_blocks[level] + ): + layers.append( + checkpoint_wrapper_fn( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) + ) + if not use_spatial_transformer + else checkpoint_wrapper_fn( + transformer_class( + ch, + num_heads, + dim_head, + depth=transformer_depth[level], + context_dim=context_dim, + disable_self_attn=disabled_sa, + use_linear=use_linear_in_transformer, + attn_type=spatial_transformer_attn_type, + use_checkpoint=use_checkpoint, + **transformer_kwargs, + ) + ) + ) + if level and i == self.num_res_blocks[level]: + out_ch = ch + layers.append( + checkpoint_wrapper_fn( + resblk_class( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + **resblk_kwargs, + ) + ) + if resblock_updown + # else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + else upsample_class( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = checkpoint_wrapper_fn( + nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + ) + if self.predict_codebook_ids: + self.id_predictor = checkpoint_wrapper_fn( + nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + # nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None, **kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + # h = x.type(self.dtype) + h = x + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + assert False, "not supported anymore. what the f*** are you doing?" + else: + return self.out(h) + + +class UNetModel3D(UNetModel): + """ + Extends the UNetModel to 3D inputs. + """ + + def __init__( + self, + temporal_kernel_size=None, # add to adjust temporal conv kernel size, default is 3 + offload_to_cpu=False, + n_embed=None, + use_learnable_alpha=False, + *args, + **kwargs, + ): + kwargs["dims"] = 2 + kwargs["unet_type"] = kwargs.get( + "unet_type", "pseudo-3d" + ) # default to pseudo-3d + kwargs["offload_to_cpu"] = offload_to_cpu + kwargs["n_embed"] = n_embed + kwargs["use_learnable_alpha"] = use_learnable_alpha + assert ( + kwargs["use_spatial_transformer"] == True + ), "not implemented for use_spatial_transformer is False" + super().__init__(*args, **kwargs) + + model_channels = kwargs["model_channels"] + out_channels = kwargs["out_channels"] + self.temporal_kernel_size = default(temporal_kernel_size, 3) + + self.input_blocks_temporal = TimestepEmbedSequential( + zero_module( + conv_nd( + 1, + model_channels, + model_channels, + self.temporal_kernel_size, + padding=self.temporal_kernel_size // 2, + ) + ) + ) + checkpoint_wrapper_fn = ( + partial(checkpoint_wrapper, offload_to_cpu=offload_to_cpu) + if self.use_fairscale_checkpoint + else lambda x: x + ) + self.out_temporal = checkpoint_wrapper_fn( + nn.Sequential( + nn.SiLU(), + zero_module(conv_nd(1, out_channels, out_channels, 3, padding=1)), + ) + ) + + if self.predict_codebook_ids: + self.id_predictor_temporal = checkpoint_wrapper_fn( + nn.Sequential( + zero_module(conv_nd(1, n_embed, n_embed, 1)), + ) + ) + + +class NoTimeUNetModel(UNetModel): + def forward(self, x, timesteps=None, context=None, y=None, **kwargs): + timesteps = th.zeros_like(timesteps) + return super().forward(x, timesteps, context, y, **kwargs) + + +class EncoderUNetModel(nn.Module): + """ + The half UNet model with attention and timestep embedding. + For usage, see UNet. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + pool="adaptive", + *args, + **kwargs, + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + self.pool = pool + if pool == "adaptive": + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + nn.AdaptiveAvgPool2d((1, 1)), + zero_module(conv_nd(dims, ch, out_channels, 1)), + nn.Flatten(), + ) + elif pool == "attention": + assert num_head_channels != -1 + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + AttentionPool2d( + (image_size // ds), ch, num_head_channels, out_channels + ), + ) + elif pool == "spatial": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + nn.ReLU(), + nn.Linear(2048, self.out_channels), + ) + elif pool == "spatial_v2": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + normalization(2048), + nn.SiLU(), + nn.Linear(2048, self.out_channels), + ) + else: + raise NotImplementedError(f"Unexpected {pool} pooling") + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x, timesteps): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :return: an [N x K] Tensor of outputs. + """ + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + + results = [] + # h = x.type(self.dtype) + h = x + for module in self.input_blocks: + h = module(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = self.middle_block(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = th.cat(results, axis=-1) + return self.out(h) + else: + h = h.type(x.dtype) + return self.out(h) + + +if __name__ == "__main__": + + class Dummy(nn.Module): + def __init__(self, in_channels=3, model_channels=64): + super().__init__() + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(2, in_channels, model_channels, 3, padding=1) + ) + ] + ) + + model = UNetModel( + use_checkpoint=True, + image_size=64, + in_channels=4, + out_channels=4, + model_channels=128, + attention_resolutions=[4, 2], + num_res_blocks=2, + channel_mult=[1, 2, 4], + num_head_channels=64, + use_spatial_transformer=False, + use_linear_in_transformer=True, + transformer_depth=1, + legacy=False, + ).cuda() + x = th.randn(11, 4, 64, 64).cuda() + t = th.randint(low=0, high=10, size=(11,), device="cuda") + o = model(x, t) + print("done.") diff --git a/CCEdit-main/sgm/modules/diffusionmodules/sampling.py b/CCEdit-main/sgm/modules/diffusionmodules/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..8b9ad1951897d4556ab98d93019f9a9770fcc372 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/sampling.py @@ -0,0 +1,485 @@ +""" + Partially ported from https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py +""" + + +from typing import Dict, Union + +import torch +from omegaconf import ListConfig, OmegaConf +from tqdm import tqdm + +from ...modules.diffusionmodules.sampling_utils import ( + get_ancestral_step, + linear_multistep_coeff, + to_d, + to_neg_log_sigma, + to_sigma, +) +from ...util import append_dims, default, instantiate_from_config + +DEFAULT_GUIDER = {"target": "sgm.modules.diffusionmodules.guiders.IdentityGuider"} + + +class BaseDiffusionSampler: + def __init__( + self, + discretization_config: Union[Dict, ListConfig, OmegaConf], + num_steps: Union[int, None] = None, + guider_config: Union[Dict, ListConfig, OmegaConf, None] = None, + verbose: bool = False, + device: str = "cuda", + ): + self.num_steps = num_steps + self.discretization = instantiate_from_config(discretization_config) + self.guider = instantiate_from_config( + default( + guider_config, + DEFAULT_GUIDER, + ) + ) + self.verbose = verbose + self.device = device + + def prepare_sampling_loop(self, x, cond, uc=None, num_steps=None): + sigmas = self.discretization( + self.num_steps if num_steps is None else num_steps, device=self.device + ) + uc = default(uc, cond) + + x *= torch.sqrt(1.0 + sigmas[0] ** 2.0) + num_sigmas = len(sigmas) + + s_in = x.new_ones([x.shape[0]]) + + return x, s_in, sigmas, num_sigmas, cond, uc + + def denoise(self, x, denoiser, sigma, cond, uc): + denoised = denoiser(*self.guider.prepare_inputs(x, sigma, cond, uc)) + denoised = self.guider(denoised, sigma) + return denoised + + def get_sigma_gen(self, num_sigmas): + sigma_generator = range(num_sigmas - 1) + if self.verbose: + print("#" * 30, " Sampling setting ", "#" * 30) + print(f"Sampler: {self.__class__.__name__}") + print(f"Discretization: {self.discretization.__class__.__name__}") + print(f"Guider: {self.guider.__class__.__name__}") + sigma_generator = tqdm( + sigma_generator, + total=num_sigmas, + desc=f"Sampling with {self.__class__.__name__} for {num_sigmas} steps", + ) + return sigma_generator + + def sdedit(self, denoise_steps, denoiser, x, cond, uc=None, num_steps=None): + raise NotImplementedError + + +class SingleStepDiffusionSampler(BaseDiffusionSampler): + def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc, *args, **kwargs): + raise NotImplementedError + + def euler_step(self, x, d, dt): + return x + dt * d + + +class EDMSampler(SingleStepDiffusionSampler): + def __init__( + self, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0, *args, **kwargs + ): + super().__init__(*args, **kwargs) + + self.s_churn = s_churn + self.s_tmin = s_tmin + self.s_tmax = s_tmax + self.s_noise = s_noise + + def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc=None, gamma=0.0): + sigma_hat = sigma * (gamma + 1.0) + if gamma > 0: + eps = torch.randn_like(x) * self.s_noise + x = x + eps * append_dims(sigma_hat**2 - sigma**2, x.ndim) ** 0.5 + + denoised = self.denoise(x, denoiser, sigma_hat, cond, uc) + d = to_d(x, sigma_hat, denoised) + dt = append_dims(next_sigma - sigma_hat, x.ndim) + + euler_step = self.euler_step(x, d, dt) + x = self.possible_correction_step( + euler_step, x, d, dt, next_sigma, denoiser, cond, uc + ) + return x + + def __call__(self, denoiser, x, cond, uc=None, num_steps=None): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + + for i in self.get_sigma_gen(num_sigmas): + gamma = ( + min(self.s_churn / (num_sigmas - 1), 2**0.5 - 1) + if self.s_tmin <= sigmas[i] <= self.s_tmax + else 0.0 + ) + x = self.sampler_step( + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc, + gamma, + ) + + return x + + def sample_inpainting(self, denoiser, x, cond, x0, mask, uc=None, num_steps=None): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + + for i in self.get_sigma_gen(num_sigmas): + gamma = ( + min(self.s_churn / (num_sigmas - 1), 2**0.5 - 1) + if self.s_tmin <= sigmas[i] <= self.s_tmax + else 0.0 + ) + + noise = torch.randn_like(x) + img_orig = x0 + noise * append_dims(sigmas[i], x.ndim) + img_orig = img_orig / torch.sqrt(1 + sigmas[i] ** 2) # TODO: make sure if right to comment this line + x = x * mask + img_orig * (1 - mask) + + x = self.sampler_step( + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc, + gamma, + ) + + return x + + +class AncestralSampler(SingleStepDiffusionSampler): + def __init__(self, eta=1.0, s_noise=1.0, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.eta = eta + self.s_noise = s_noise + self.noise_sampler = lambda x: torch.randn_like(x) + + def ancestral_euler_step(self, x, denoised, sigma, sigma_down): + d = to_d(x, sigma, denoised) + dt = append_dims(sigma_down - sigma, x.ndim) + + return self.euler_step(x, d, dt) + + def ancestral_step(self, x, sigma, next_sigma, sigma_up): + x = torch.where( + append_dims(next_sigma, x.ndim) > 0.0, + x + self.noise_sampler(x) * self.s_noise * append_dims(sigma_up, x.ndim), + x, + ) + return x + + def __call__(self, denoiser, x, cond, uc=None, num_steps=None): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + + for i in self.get_sigma_gen(num_sigmas): + x = self.sampler_step( + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc, + ) + + return x + + def sample_inpainting(self, denoiser, x, cond, x0, mask, uc=None, num_steps=None): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + for i in self.get_sigma_gen(num_sigmas): + # img_orig = self.model.q_sample(x0, ts) + noise = torch.randn_like(x) + img_orig = x0 + noise * append_dims(sigmas[i], x.ndim) + img_orig = img_orig / torch.sqrt(1 + sigmas[i] ** 2) # TODO: make sure if right to comment this line + x = x * mask + img_orig * (1 - mask) + # import pdb; pdb.set_trace() + # import torchvision, einops + # torchvision.utils.save_image(einops.rearrange(mask, 'b c t h w -> (b t) c h w'), 'debug/inpainting/mask.png', normalize=True) + # torchvision.utils.save_image(einops.rearrange(x0, 'b c t h w -> (b t) c h w'), 'debug/inpainting/x0.png', normalize=True) + # torchvision.utils.save_image(einops.rearrange(img_orig, 'b c t h w -> (b t) c h w'), 'debug/inpainting/img_orig.png', normalize=True) + # torchvision.utils.save_image(einops.rearrange(x, 'b c t h w -> (b t) c h w'), 'debug/inpainting/x.png', normalize=True) + + x = self.sampler_step( + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc, + ) + + return x + + def sampling_blending(self, denoiser, x, cond, x0, uc=None, num_steps=None): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + + b, c, t, h, w = x.shape + # mask = torch.zeros((b, 1, t, 1, 1), device=x.device) + # mask[:, :, t//2+1:, :, :] = 1 + + for i in self.get_sigma_gen(num_sigmas): + noise = torch.randn_like(x) + img_orig = x0 + noise * append_dims(sigmas[i], x.ndim) + img_orig = img_orig / torch.sqrt(1 + sigmas[i] ** 2) # TODO: make sure if right to comment this line + x[:,:,:t//2,:,:] = img_orig[:,:,t//2+1:,:,:] + # x[:,:,:t//2+1,:,:] = img_orig[:,:,t//2:,:,:] + # x = x * mask + img_orig * (1 - mask) + + # import pdb; pdb.set_trace() + # import torchvision, einops + # torchvision.utils.save_image(einops.rearrange(mask, 'b c t h w -> (b t) c h w'), 'debug/inpainting/mask.png', normalize=True) + # torchvision.utils.save_image(einops.rearrange(x0, 'b c t h w -> (b t) c h w'), 'debug/inpainting/x0.png', normalize=True) + # torchvision.utils.save_image(einops.rearrange(img_orig, 'b c t h w -> (b t) c h w'), 'debug/inpainting/img_orig.png', normalize=True) + # torchvision.utils.save_image(einops.rearrange(x, 'b c t h w -> (b t) c h w'), 'debug/inpainting/x.png', normalize=True) + + x = self.sampler_step( + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc, + ) + + return x + + def sdedit(self, denoise_steps, denoiser, x, cond, uc=None, num_steps=None): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + ''' + x.shape: torch.Size([1, 4, 9, 48, 72]) + s_in: [1.] + sigmas.shape: torch.Size([31]) + cond.keys(): dict_keys(['crossattn', 'control_hint']) + uc.keys(): dict_keys(['crossattn', 'control_hint']) + ''' + for i in self.get_sigma_gen(num_sigmas): + if i < num_sigmas-1 - denoise_steps: + continue + x = self.sampler_step( + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc, + ) + + return x + + +class LinearMultistepSampler(BaseDiffusionSampler): + def __init__( + self, + order=4, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.order = order + + def __call__(self, denoiser, x, cond, uc=None, num_steps=None, **kwargs): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + + ds = [] + sigmas_cpu = sigmas.detach().cpu().numpy() + for i in self.get_sigma_gen(num_sigmas): + sigma = s_in * sigmas[i] + denoised = denoiser( + *self.guider.prepare_inputs(x, sigma, cond, uc), **kwargs + ) + denoised = self.guider(denoised, sigma) + d = to_d(x, sigma, denoised) + ds.append(d) + if len(ds) > self.order: + ds.pop(0) + cur_order = min(i + 1, self.order) + coeffs = [ + linear_multistep_coeff(cur_order, sigmas_cpu, i, j) + for j in range(cur_order) + ] + x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds))) + + return x + + +class EulerEDMSampler(EDMSampler): + def possible_correction_step( + self, euler_step, x, d, dt, next_sigma, denoiser, cond, uc + ): + return euler_step + + +class HeunEDMSampler(EDMSampler): + def possible_correction_step( + self, euler_step, x, d, dt, next_sigma, denoiser, cond, uc + ): + if torch.sum(next_sigma) < 1e-14: + # Save a network evaluation if all noise levels are 0 + return euler_step + else: + denoised = self.denoise(euler_step, denoiser, next_sigma, cond, uc) + d_new = to_d(euler_step, next_sigma, denoised) + d_prime = (d + d_new) / 2.0 + + # apply correction if noise level is not 0 + x = torch.where( + append_dims(next_sigma, x.ndim) > 0.0, x + d_prime * dt, euler_step + ) + return x + + +class EulerAncestralSampler(AncestralSampler): + def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc): + sigma_down, sigma_up = get_ancestral_step(sigma, next_sigma, eta=self.eta) + denoised = self.denoise(x, denoiser, sigma, cond, uc) + x = self.ancestral_euler_step(x, denoised, sigma, sigma_down) + x = self.ancestral_step(x, sigma, next_sigma, sigma_up) + + return x + + +class DPMPP2SAncestralSampler(AncestralSampler): + def get_variables(self, sigma, sigma_down): + t, t_next = [to_neg_log_sigma(s) for s in (sigma, sigma_down)] + h = t_next - t + s = t + 0.5 * h + return h, s, t, t_next + + def get_mult(self, h, s, t, t_next): + mult1 = to_sigma(s) / to_sigma(t) + mult2 = (-0.5 * h).expm1() + mult3 = to_sigma(t_next) / to_sigma(t) + mult4 = (-h).expm1() + + return mult1, mult2, mult3, mult4 + + def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc=None, **kwargs): + sigma_down, sigma_up = get_ancestral_step(sigma, next_sigma, eta=self.eta) + denoised = self.denoise(x, denoiser, sigma, cond, uc) + x_euler = self.ancestral_euler_step(x, denoised, sigma, sigma_down) + + if torch.sum(sigma_down) < 1e-14: + # Save a network evaluation if all noise levels are 0 + x = x_euler + else: + h, s, t, t_next = self.get_variables(sigma, sigma_down) + mult = [ + append_dims(mult, x.ndim) for mult in self.get_mult(h, s, t, t_next) + ] + + x2 = mult[0] * x - mult[1] * denoised + denoised2 = self.denoise(x2, denoiser, to_sigma(s), cond, uc) + x_dpmpp2s = mult[2] * x - mult[3] * denoised2 + + # apply correction if noise level is not 0 + x = torch.where(append_dims(sigma_down, x.ndim) > 0.0, x_dpmpp2s, x_euler) + + x = self.ancestral_step(x, sigma, next_sigma, sigma_up) + return x + + +class DPMPP2MSampler(BaseDiffusionSampler): + def get_variables(self, sigma, next_sigma, previous_sigma=None): + t, t_next = [to_neg_log_sigma(s) for s in (sigma, next_sigma)] + h = t_next - t + + if previous_sigma is not None: + h_last = t - to_neg_log_sigma(previous_sigma) + r = h_last / h + return h, r, t, t_next + else: + return h, None, t, t_next + + def get_mult(self, h, r, t, t_next, previous_sigma): + mult1 = to_sigma(t_next) / to_sigma(t) + mult2 = (-h).expm1() + + if previous_sigma is not None: + mult3 = 1 + 1 / (2 * r) + mult4 = 1 / (2 * r) + return mult1, mult2, mult3, mult4 + else: + return mult1, mult2 + + def sampler_step( + self, + old_denoised, + previous_sigma, + sigma, + next_sigma, + denoiser, + x, + cond, + uc=None, + ): + denoised = self.denoise(x, denoiser, sigma, cond, uc) + + h, r, t, t_next = self.get_variables(sigma, next_sigma, previous_sigma) + mult = [ + append_dims(mult, x.ndim) + for mult in self.get_mult(h, r, t, t_next, previous_sigma) + ] + + x_standard = mult[0] * x - mult[1] * denoised + if old_denoised is None or torch.sum(next_sigma) < 1e-14: + # Save a network evaluation if all noise levels are 0 or on the first step + return x_standard, denoised + else: + denoised_d = mult[2] * denoised - mult[3] * old_denoised + x_advanced = mult[0] * x - mult[1] * denoised_d + + # apply correction if noise level is not 0 and not first step + x = torch.where( + append_dims(next_sigma, x.ndim) > 0.0, x_advanced, x_standard + ) + + return x, denoised + + def __call__(self, denoiser, x, cond, uc=None, num_steps=None, **kwargs): + x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop( + x, cond, uc, num_steps + ) + + old_denoised = None + for i in self.get_sigma_gen(num_sigmas): + x, old_denoised = self.sampler_step( + old_denoised, + None if i == 0 else s_in * sigmas[i - 1], + s_in * sigmas[i], + s_in * sigmas[i + 1], + denoiser, + x, + cond, + uc=uc, + ) + + return x diff --git a/CCEdit-main/sgm/modules/diffusionmodules/sampling_utils.py b/CCEdit-main/sgm/modules/diffusionmodules/sampling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7cca6361c2c6aeb97940b314eea5a607f1cd6a59 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/sampling_utils.py @@ -0,0 +1,48 @@ +import torch +from scipy import integrate + +from ...util import append_dims + + +class NoDynamicThresholding: + def __call__(self, uncond, cond, scale): + return uncond + scale * (cond - uncond) + + +def linear_multistep_coeff(order, t, i, j, epsrel=1e-4): + if order - 1 > i: + raise ValueError(f"Order {order} too high for step {i}") + + def fn(tau): + prod = 1.0 + for k in range(order): + if j == k: + continue + prod *= (tau - t[i - k]) / (t[i - j] - t[i - k]) + return prod + + return integrate.quad(fn, t[i], t[i + 1], epsrel=epsrel)[0] + + +def get_ancestral_step(sigma_from, sigma_to, eta=1.0): + if not eta: + return sigma_to, 0.0 + sigma_up = torch.minimum( + sigma_to, + eta + * (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5, + ) + sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 + return sigma_down, sigma_up + + +def to_d(x, sigma, denoised): + return (x - denoised) / append_dims(sigma, x.ndim) + + +def to_neg_log_sigma(sigma): + return sigma.log().neg() + + +def to_sigma(neg_log_sigma): + return neg_log_sigma.neg().exp() diff --git a/CCEdit-main/sgm/modules/diffusionmodules/sigma_sampling.py b/CCEdit-main/sgm/modules/diffusionmodules/sigma_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..d54724c6ef6a7b8067784a4192b0fe2f41123063 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/sigma_sampling.py @@ -0,0 +1,31 @@ +import torch + +from ...util import default, instantiate_from_config + + +class EDMSampling: + def __init__(self, p_mean=-1.2, p_std=1.2): + self.p_mean = p_mean + self.p_std = p_std + + def __call__(self, n_samples, rand=None): + log_sigma = self.p_mean + self.p_std * default(rand, torch.randn((n_samples,))) + return log_sigma.exp() + + +class DiscreteSampling: + def __init__(self, discretization_config, num_idx, do_append_zero=False, flip=True): + self.num_idx = num_idx + self.sigmas = instantiate_from_config(discretization_config)( + num_idx, do_append_zero=do_append_zero, flip=flip + ) + + def idx_to_sigma(self, idx): + return self.sigmas[idx] + + def __call__(self, n_samples, rand=None): + idx = default( + rand, + torch.randint(0, self.num_idx, (n_samples,)), + ) + return self.idx_to_sigma(idx) diff --git a/CCEdit-main/sgm/modules/diffusionmodules/util.py b/CCEdit-main/sgm/modules/diffusionmodules/util.py new file mode 100644 index 0000000000000000000000000000000000000000..508c9dbbc1a77bc4ef77dad0036736e6795e3459 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/util.py @@ -0,0 +1,478 @@ +""" +adopted from +https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +and +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +and +https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py + +thanks! +""" + +import math +import os +import numpy as np + +import torch +import torch.nn as nn +from einops import repeat, rearrange + +from torch.utils.checkpoint import checkpoint as cp + +import deepspeed + +def make_beta_schedule( + schedule, + n_timestep, + linear_start=1e-4, + linear_end=2e-2, +): + if schedule == "linear": + betas = ( + torch.linspace( + linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64 + ) + ** 2 + ) + return betas.numpy() + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def mixed_checkpoint(func, inputs: dict, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. This differs from the original checkpoint function + borrowed from https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py in that + it also works with non-tensor inputs + :param func: the function to evaluate. + :param inputs: the argument dictionary to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + tensor_keys = [key for key in inputs if isinstance(inputs[key], torch.Tensor)] + tensor_inputs = [ + inputs[key] for key in inputs if isinstance(inputs[key], torch.Tensor) + ] + non_tensor_keys = [ + key for key in inputs if not isinstance(inputs[key], torch.Tensor) + ] + non_tensor_inputs = [ + inputs[key] for key in inputs if not isinstance(inputs[key], torch.Tensor) + ] + args = tuple(tensor_inputs) + tuple(non_tensor_inputs) + tuple(params) + return MixedCheckpointFunction.apply( + func, + len(tensor_inputs), + len(non_tensor_inputs), + tensor_keys, + non_tensor_keys, + *args, + ) + else: + return func(**inputs) + + +class MixedCheckpointFunction(torch.autograd.Function): + @staticmethod + def forward( + ctx, + run_function, + length_tensors, + length_non_tensors, + tensor_keys, + non_tensor_keys, + *args, + ): + ctx.end_tensors = length_tensors + ctx.end_non_tensors = length_tensors + length_non_tensors + ctx.gpu_autocast_kwargs = { + "enabled": torch.is_autocast_enabled(), + "dtype": torch.get_autocast_gpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + assert ( + len(tensor_keys) == length_tensors + and len(non_tensor_keys) == length_non_tensors + ) + + ctx.input_tensors = { + key: val for (key, val) in zip(tensor_keys, list(args[: ctx.end_tensors])) + } + ctx.input_non_tensors = { + key: val + for (key, val) in zip( + non_tensor_keys, list(args[ctx.end_tensors : ctx.end_non_tensors]) + ) + } + ctx.run_function = run_function + ctx.input_params = list(args[ctx.end_non_tensors :]) + + with torch.no_grad(): + output_tensors = ctx.run_function( + **ctx.input_tensors, **ctx.input_non_tensors + ) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + # additional_args = {key: ctx.input_tensors[key] for key in ctx.input_tensors if not isinstance(ctx.input_tensors[key],torch.Tensor)} + ctx.input_tensors = { + key: ctx.input_tensors[key].detach().requires_grad_(True) + for key in ctx.input_tensors + } + + with torch.enable_grad(), torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = { + key: ctx.input_tensors[key].view_as(ctx.input_tensors[key]) + for key in ctx.input_tensors + } + # shallow_copies.update(additional_args) + output_tensors = ctx.run_function(**shallow_copies, **ctx.input_non_tensors) + input_grads = torch.autograd.grad( + output_tensors, + list(ctx.input_tensors.values()) + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return ( + (None, None, None, None, None) + + input_grads[: ctx.end_tensors] + + (None,) * (ctx.end_non_tensors - ctx.end_tensors) + + input_grads[ctx.end_tensors :] + ) + + +def checkpoint_new(func, input, flag=False): + """ + Custom checkpoint function + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + return cp(func, *input) + else: + return func(*input) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +# def checkpoint_new(func, input, flag): +# """ +# Custom checkpoint function. +# Evaluate a function without caching intermediate activations, allowing for +# reduced memory at the expense of extra compute in the backward pass. +# :param func: the function to evaluate. +# :param input: the argument sequence to pass to `func`. +# :param flag: if False, disable gradient checkpointing. +# """ +# if flag: +# return deepspeed.checkpointing.checkpoint(func, *input) +# else: +# return func(*input) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + ctx.gpu_autocast_kwargs = { + "enabled": torch.is_autocast_enabled(), + "dtype": torch.get_autocast_gpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + # ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors if x is not None] + # Ensure all tensors have requires_grad set to True + ctx.input_params = [p.requires_grad_(True) for p in ctx.input_params] + with torch.enable_grad(), torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) + * torch.arange(start=0, end=half, dtype=torch.float32) + / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat( + [embedding, torch.zeros_like(embedding[:, :1])], dim=-1 + ) + else: + embedding = repeat(timesteps, "b -> b d", d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return nn.GroupNorm(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.to(torch.float32)).to(x.dtype) + + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + +# --------------------------------------------------- +# This is used for the generation of lineart maps +# From https://github.com/carolineec/informative-drawings +annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts') + +norm_layer = nn.InstanceNorm2d + +class ResidualBlock(nn.Module): + def __init__(self, in_features): + super(ResidualBlock, self).__init__() + + conv_block = [ nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + norm_layer(in_features), + nn.ReLU(inplace=True), + nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + norm_layer(in_features) + ] + + self.conv_block = nn.Sequential(*conv_block) + + def forward(self, x): + return x + self.conv_block(x) + + +class Generator(nn.Module): + def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True): + super(Generator, self).__init__() + + # Initial convolution block + model0 = [ nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, 64, 7), + norm_layer(64), + nn.ReLU(inplace=True) ] + self.model0 = nn.Sequential(*model0) + + # Downsampling + model1 = [] + in_features = 64 + out_features = in_features*2 + for _ in range(2): + model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), + norm_layer(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features*2 + self.model1 = nn.Sequential(*model1) + + model2 = [] + # Residual blocks + for _ in range(n_residual_blocks): + model2 += [ResidualBlock(in_features)] + self.model2 = nn.Sequential(*model2) + + # Upsampling + model3 = [] + out_features = in_features//2 + for _ in range(2): + model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), + norm_layer(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features//2 + self.model3 = nn.Sequential(*model3) + + # Output layer + model4 = [ nn.ReflectionPad2d(3), + nn.Conv2d(64, output_nc, 7)] + if sigmoid: + model4 += [nn.Sigmoid()] + + self.model4 = nn.Sequential(*model4) + + def forward(self, x, cond=None): + out = self.model0(x) + out = self.model1(out) + out = self.model2(out) + out = self.model3(out) + out = self.model4(out) + + return out + + +class LineartDetector(nn.Module): + # hacked from controlnet1.1, find differences from the official repo + def __init__(self): + super(LineartDetector, self).__init__() + self.model = self.load_model('sk_model.pth') + self.model_coarse = self.load_model('sk_model2.pth') + + def load_model(self, name): + remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/" + name + modelpath = os.path.join(annotator_ckpts_path, name) + if not os.path.exists(modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + model = Generator(3, 1, 3) + model.load_state_dict(torch.load(modelpath, map_location=torch.device('cpu'))) + model.eval() + # model = model.cuda() + return model + + def forward(self, input_image, coarse): + model = self.model_coarse if coarse else self.model + # if numpy + if isinstance(input_image, np.ndarray): + assert input_image.ndim == 3 + image = input_image + with torch.no_grad(): + image = torch.from_numpy(image).float().cuda() + image = image / 255.0 + image = rearrange(image, 'h w c -> 1 c h w') + line = model(image)[0][0] + + line = line.cpu().numpy() + line = (line * 255.0).clip(0, 255).astype(np.uint8) + + return line + # or tensor + elif isinstance(input_image, torch.Tensor): + assert input_image.ndim == 4 + image = input_image + with torch.no_grad(): + image = (image + 1) / 2.0 # 0 ~ 1 + line = model(image) + line = line * 2.0 - 1.0 + line = line.clip(-1, 1) + return line # b c h w + else: + raise ValueError('input_image should be numpy or tensor') \ No newline at end of file diff --git a/CCEdit-main/sgm/modules/diffusionmodules/wrappers.py b/CCEdit-main/sgm/modules/diffusionmodules/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..71a5205d6ff9576d7d1c5fd6dd7b4fdaa3f25871 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/wrappers.py @@ -0,0 +1,265 @@ +import einops +import torch +import torch.nn as nn +from packaging import version + +OPENAIUNETWRAPPER = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapper" +OPENAIUNETWRAPPERRAIG = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapperRAIG" +OPENAIUNETWRAPPERCONTROLLDM3D = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapperControlLDM3D" +OPENAIUNETWRAPPERCONTROLLDM3DSSN = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapperControlLDM3DSSN" +OPENAIUNETWRAPPERCONTROLLDM3DTV2V = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapperControlLDM3DTV2V" +OPENAIUNETWRAPPERCONTROLLDM3DTV2V_INTERPOLATE = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapperControlLDM3DTV2VInterpolate" + +class IdentityWrapper(nn.Module): + def __init__(self, diffusion_model, compile_model: bool = False): + super().__init__() + compile = ( + torch.compile + if (version.parse(torch.__version__) >= version.parse("2.0.0")) + and compile_model + else lambda x: x + ) + self.diffusion_model = compile(diffusion_model) + + def forward(self, *args, **kwargs): + return self.diffusion_model(*args, **kwargs) + + +class OpenAIWrapper(IdentityWrapper): + def forward( + self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs + ) -> torch.Tensor: + x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1) + model_dtype = self.diffusion_model.time_embed[0].weight.dtype + x = x.to(model_dtype) + vector = c.get("vector", None) + if vector is not None: + vector = vector.to(model_dtype) + return self.diffusion_model( + x, + timesteps=t, + context=c.get("crossattn", None), + y=vector, + **kwargs + ) + + +class OpenAIWrapperRAIG(OpenAIWrapper): + def forward( + self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs + ) -> torch.Tensor: + x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1) + model_dtype = self.diffusion_model.time_embed[0].weight.dtype + x = x.to(model_dtype) + vector = c.get("vector", None) + if vector is not None: + vector = vector.to(model_dtype) + cond_feat = c.get("cond_feat", None) + if cond_feat is not None: + cond_feat = cond_feat.to(model_dtype) + img_control = self.diffusion_model.controlnet_img( + x=x, + hint=cond_feat, + timesteps=t, + context=c.get( + "crossattn", None + ), + y=c.get("vector", None), + **kwargs + ) + else: + img_control = None + # actually, img_control is not used. features needed are hooked during the forward process + return self.diffusion_model( + x, + timesteps=t, + context=c.get("crossattn", None), + y=vector, + **kwargs + ) + + +class OpenAIWrapperControlLDM3D(IdentityWrapper): + def forward( + self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs + ) -> torch.Tensor: + x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1) + cond_feat = c["cond_feat"] + + model_dtype = self.diffusion_model.controlnet.input_hint_block[0].weight.dtype + x = x.to(model_dtype) + cond_feat = cond_feat.to(model_dtype) + + control = self.diffusion_model.controlnet( + x=x, # noisy control image, use or not used it depend on control_model style + hint=cond_feat, # control image B C H W + timesteps=t, # time step + context=c.get( + "crossattn", None + ), # text prompt, use or not used it depend on control_model style + y=c.get("vector", None), + **kwargs + ) + + out = self.diffusion_model( + x, + timesteps=t, + context=c.get("crossattn", None), + y=c.get("vector", None), + control=control, + only_mid_control=False, + **kwargs + ) + + return out + + +class OpenAIWrapperControlLDM3DSSN(IdentityWrapper): + def forward( + self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs + ) -> torch.Tensor: + x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1) + cond_feat = c["cond_feat"] + + model_dtype = self.diffusion_model.controlnet.input_hint_block[0].weight.dtype + x = x.to(model_dtype) + cond_feat = cond_feat.to(model_dtype) + + control, img_emb = self.diffusion_model.controlnet( + x=x, # noisy control image, use or not used it depend on control_model style + hint=cond_feat, # control image B C H W + timesteps=t, # time step + context=c.get( + "crossattn", None + ), # text prompt, use or not used it depend on control_model style + y=c.get("vector", None), + **kwargs + ) + + out = self.diffusion_model( + x, + timesteps=t, + context=c.get("crossattn", None), + y=c.get("vector", None), + control=control, + img_emb=img_emb, + only_mid_control=False, + **kwargs + ) + + return out + + +# ----------------------------------------------------- +# This is used for TV2V (text-video-to-video) generation +class OpenAIWrapperControlLDM3DTV2V(IdentityWrapper): + def forward( + self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs + ) -> torch.Tensor: + x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1) + control_hint = c["control_hint"] # -1 ~ 1 + control_hint = (control_hint + 1) / 2.0 # 0 ~ 1 + control_hint = 1.0 - control_hint # this follow the official controlNet (refer control 1.1 in the gradio_lineart.py) + + model_dtype = self.diffusion_model.controlnet.input_hint_block[0].weight.dtype + x = x.to(model_dtype) + control_hint = control_hint.to(model_dtype) + + control = self.diffusion_model.controlnet( + x=x, + hint=control_hint, + timesteps=t, + context=c.get( + "crossattn", None + ), + y=c.get("vector", None), + **kwargs + ) + cond_feat = c.get("cond_feat", None) + if cond_feat is not None: + cond_feat = cond_feat.to(model_dtype) + img_control = self.diffusion_model.controlnet_img( + x=x[:,:,x.shape[2]//2,:,:], + hint=cond_feat, + timesteps=t, + context=c.get( + "crossattn", None + ), + y=c.get("vector", None), + **kwargs + ) + else: + img_control = None + # control = [each * 0.5 for each in control] + # control = [each * 0. for each in control] # !!!!!! this is for test, remove it later + + out = self.diffusion_model( + x, + timesteps=t, + context=c.get("crossattn", None), + y=c.get("vector", None), + control=control, + img_control=img_control, + only_mid_control=False, + **kwargs + ) + + return out + + +class OpenAIWrapperControlLDM3DTV2VInterpolate(IdentityWrapper): + def forward( + self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs + ) -> torch.Tensor: + x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1) + control_hint = c["control_hint"] # -1 ~ 1 + control_hint = (control_hint + 1) / 2.0 # 0 ~ 1 + control_hint = 1.0 - control_hint # this follow the official controlNet (refer control 1.1 in the gradio_lineart.py) + + model_dtype = self.diffusion_model.controlnet.input_hint_block[0].weight.dtype + x = x.to(model_dtype) + control_hint = control_hint.to(model_dtype) + + control = self.diffusion_model.controlnet( + x=x, + hint=control_hint, + timesteps=t, + context=c.get( + "crossattn", None + ), + y=c.get("vector", None), + **kwargs + ) + assert 'interpolate_first_last' in c + interpolate_first = c['interpolate_first_last'][:,:,0,:,:] + interpolate_last = c['interpolate_first_last'][:,:,1,:,:] + x_tmp = torch.cat((x[:,:,0,:,:], x[:,:,-1,:,:]), dim=0) + interpolate_tmp = torch.cat((interpolate_first, interpolate_last), dim=0) + t_tmp = torch.cat((t, t), dim=0) + context_tmp = torch.cat((c['crossattn'], c['crossattn']), dim=0) if 'crossattn' in c else None + y_tmp = torch.cat([c['vector'], c['vector']], dim=0) if 'vector' in c else None + interpolate_control = self.diffusion_model.controlnet_img( + x=x_tmp, + hint=interpolate_tmp, + timesteps=t_tmp, + context=context_tmp, + y=y_tmp, + **kwargs + ) + interpolate_control = [each.chunk(2) for each in interpolate_control] + interpolate_control_first, interpolate_control_last = zip(*interpolate_control) + interpolate_control_first = list(interpolate_control_first) + interpolate_control_last = list(interpolate_control_last) + out = self.diffusion_model( + x, + timesteps=t, + context=c.get("crossattn", None), + y=c.get("vector", None), + control=control, + interpolate_control=(interpolate_control_first, interpolate_control_last), + only_mid_control=False, + **kwargs + ) + + return out + diff --git a/CCEdit-main/sgm/modules/distributions/__init__.py b/CCEdit-main/sgm/modules/distributions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/sgm/modules/distributions/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/modules/distributions/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe29fb5c53cf5404c6d91975eda0a72a0cf2a413 Binary files /dev/null and b/CCEdit-main/sgm/modules/distributions/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/distributions/__pycache__/distributions.cpython-39.pyc b/CCEdit-main/sgm/modules/distributions/__pycache__/distributions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a55ccd409a0f19e70c58c515db46bf84bcef403 Binary files /dev/null and b/CCEdit-main/sgm/modules/distributions/__pycache__/distributions.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/distributions/distributions.py b/CCEdit-main/sgm/modules/distributions/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..0b61f03077358ce4737c85842d9871f70dabb656 --- /dev/null +++ b/CCEdit-main/sgm/modules/distributions/distributions.py @@ -0,0 +1,102 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to( + device=self.parameters.device + ) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to( + device=self.parameters.device + ) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.0]) + else: + if other is None: + return 0.5 * torch.sum( + torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, + dim=[1, 2, 3], + ) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var + - 1.0 + - self.logvar + + other.logvar, + dim=[1, 2, 3], + ) + + def nll(self, sample, dims=[1, 2, 3]): + if self.deterministic: + return torch.Tensor([0.0]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims, + ) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/CCEdit-main/sgm/modules/ema.py b/CCEdit-main/sgm/modules/ema.py new file mode 100644 index 0000000000000000000000000000000000000000..97b5ae2b230f89b4dba57e44c4f851478ad86f68 --- /dev/null +++ b/CCEdit-main/sgm/modules/ema.py @@ -0,0 +1,86 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError("Decay must be between 0 and 1") + + self.m_name2s_name = {} + self.register_buffer("decay", torch.tensor(decay, dtype=torch.float32)) + self.register_buffer( + "num_updates", + torch.tensor(0, dtype=torch.int) + if use_num_upates + else torch.tensor(-1, dtype=torch.int), + ) + + for name, p in model.named_parameters(): + if p.requires_grad: + # remove as '.'-character is not allowed in buffers + s_name = name.replace(".", "") + self.m_name2s_name.update({name: s_name}) + self.register_buffer(s_name, p.clone().detach().data) + + self.collected_params = [] + + def reset_num_updates(self): + del self.num_updates + self.register_buffer("num_updates", torch.tensor(0, dtype=torch.int)) + + def forward(self, model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_( + one_minus_decay * (shadow_params[sname] - m_param[key]) + ) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/CCEdit-main/sgm/modules/encoders/__init__.py b/CCEdit-main/sgm/modules/encoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/sgm/modules/encoders/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/sgm/modules/encoders/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f5e226c868b82aef03f0543fe3bc927ee3da7e7 Binary files /dev/null and b/CCEdit-main/sgm/modules/encoders/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/encoders/__pycache__/modules.cpython-39.pyc b/CCEdit-main/sgm/modules/encoders/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebdfc56636dd246777318a871acb200977b88520 Binary files /dev/null and b/CCEdit-main/sgm/modules/encoders/__pycache__/modules.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/encoders/modules.py b/CCEdit-main/sgm/modules/encoders/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..0d2098d328ad62f7562ff0a972c1ce3e06c1f1e1 --- /dev/null +++ b/CCEdit-main/sgm/modules/encoders/modules.py @@ -0,0 +1,2374 @@ +from contextlib import nullcontext +from functools import partial +from typing import Dict, List, Optional, Tuple, Union +import torch.nn.functional as F + +import random +import omegaconf +import kornia +import numpy as np +import open_clip +import torch +import torch.nn as nn +import einops +from einops import rearrange, repeat +from omegaconf import ListConfig +from torch.utils.checkpoint import checkpoint +from transformers import ( + ByT5Tokenizer, + CLIPTextModel, + CLIPTokenizer, + T5EncoderModel, + T5Tokenizer, +) + +from ...modules.autoencoding.regularizers import DiagonalGaussianRegularizer +from ...modules.diffusionmodules.model import Encoder +from ...modules.diffusionmodules.openaimodel import Timestep +from ...modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule +from ...modules.distributions.distributions import DiagonalGaussianDistribution +from ...util import ( + autocast, + count_params, + default, + disabled_train, + expand_dims_like, + instantiate_from_config, +) + + +class AbstractEmbModel(nn.Module): + def __init__(self): + super().__init__() + self._is_trainable = None + self._ucg_rate = None + self._input_key = None + + @property + def is_trainable(self) -> bool: + return self._is_trainable + + @property + def ucg_rate(self) -> Union[float, torch.Tensor]: + return self._ucg_rate + + @property + def input_key(self) -> str: + return self._input_key + + @is_trainable.setter + def is_trainable(self, value: bool): + self._is_trainable = value + + @ucg_rate.setter + def ucg_rate(self, value: Union[float, torch.Tensor]): + self._ucg_rate = value + + @input_key.setter + def input_key(self, value: str): + self._input_key = value + + @is_trainable.deleter + def is_trainable(self): + del self._is_trainable + + @ucg_rate.deleter + def ucg_rate(self): + del self._ucg_rate + + @input_key.deleter + def input_key(self): + del self._input_key + + +class GeneralConditioner(nn.Module): + OUTPUT_DIM2KEYS = {2: "vector", 3: "crossattn", 4: "concat", 5: "concat"} + KEY2CATDIM = {"vector": 1, "crossattn": 2, "concat": 1} + + def __init__(self, emb_models: Union[List, ListConfig]): + super().__init__() + embedders = [] + for n, embconfig in enumerate(emb_models): + embedder = instantiate_from_config(embconfig) + assert isinstance( + embedder, AbstractEmbModel + ), f"embedder model {embedder.__class__.__name__} has to inherit from AbstractEmbModel" + embedder.is_trainable = embconfig.get("is_trainable", False) + embedder.ucg_rate = embconfig.get("ucg_rate", 0.0) + if not embedder.is_trainable: + embedder.train = disabled_train + for param in embedder.parameters(): + param.requires_grad = False + embedder.eval() + print( + f"Initialized embedder #{n}: {embedder.__class__.__name__} " + f"with {count_params(embedder, False)} params. Trainable: {embedder.is_trainable}" + ) + + if "input_key" in embconfig: + embedder.input_key = embconfig["input_key"] + elif "input_keys" in embconfig: + embedder.input_keys = embconfig["input_keys"] + else: + raise KeyError( + f"need either 'input_key' or 'input_keys' for embedder {embedder.__class__.__name__}" + ) + + embedder.legacy_ucg_val = embconfig.get("legacy_ucg_value", None) + if embedder.legacy_ucg_val is not None: + embedder.ucg_prng = np.random.RandomState() + + embedders.append(embedder) + self.embedders = nn.ModuleList(embedders) + + def possibly_get_ucg_val(self, embedder: AbstractEmbModel, batch: Dict) -> Dict: + assert embedder.legacy_ucg_val is not None + p = embedder.ucg_rate + val = embedder.legacy_ucg_val + for i in range(len(batch[embedder.input_key])): + if embedder.ucg_prng.choice(2, p=[1 - p, p]): + batch[embedder.input_key][i] = val + return batch + + def forward( + self, batch: Dict, force_zero_embeddings: Optional[List] = None + ) -> Dict: + output = dict() + if force_zero_embeddings is None: + force_zero_embeddings = [] + for embedder in self.embedders: + embedding_context = nullcontext if embedder.is_trainable else torch.no_grad + with embedding_context(): + if hasattr(embedder, "input_key") and (embedder.input_key is not None): + if embedder.legacy_ucg_val is not None: + batch = self.possibly_get_ucg_val(embedder, batch) + emb_out = embedder(batch[embedder.input_key]) + elif hasattr(embedder, "input_keys"): + emb_out = embedder(*[batch[k] for k in embedder.input_keys]) + assert isinstance( + emb_out, (torch.Tensor, list, tuple) + ), f"encoder outputs must be tensors or a sequence, but got {type(emb_out)}" + if not isinstance(emb_out, (list, tuple)): + emb_out = [emb_out] + for emb in emb_out: + if hasattr(embedder, "input_key") and (embedder.input_key == "cond_img"): + out_key = "cond_feat" + elif hasattr(embedder, "input_key") and (embedder.input_key == "interpolate_first"): + out_key = "interpolate_first" + elif hasattr(embedder, "input_key") and (embedder.input_key == "interpolate_last"): + out_key = "interpolate_last" + elif hasattr(embedder, "input_key") and (embedder.input_key == "interpolate_first_last"): + out_key = "interpolate_first_last" + elif hasattr(embedder, 'input_key') and (embedder.input_key == 'control_hint'): + out_key = 'control_hint' + else: + out_key = self.OUTPUT_DIM2KEYS[emb.dim()] + if embedder.ucg_rate > 0.0 and embedder.legacy_ucg_val is None: # zeros out embeddings with probability ucg_rate + emb = ( + expand_dims_like( + torch.bernoulli( + (1.0 - embedder.ucg_rate) + * torch.ones(emb.shape[0], device=emb.device) + ), + emb, + ) + * emb + ) + if ( + hasattr(embedder, "input_key") + and embedder.input_key in force_zero_embeddings + ): + emb = torch.zeros_like(emb) + if out_key in output: + output[out_key] = torch.cat( + (output[out_key], emb), self.KEY2CATDIM[out_key] + ) + else: + output[out_key] = emb + return output + + def get_unconditional_conditioning( + self, batch_c, batch_uc=None, force_uc_zero_embeddings=None + ): + if force_uc_zero_embeddings is None: + force_uc_zero_embeddings = [] + ucg_rates = list() + for embedder in self.embedders: + ucg_rates.append(embedder.ucg_rate) + embedder.ucg_rate = 0.0 + c = self(batch_c) + uc = self(batch_c if batch_uc is None else batch_uc, force_uc_zero_embeddings) + + for embedder, rate in zip(self.embedders, ucg_rates): + embedder.ucg_rate = rate + return c, uc + + +class InceptionV3(nn.Module): + """Wrapper around the https://github.com/mseitzer/pytorch-fid inception + port with an additional squeeze at the end""" + + def __init__(self, normalize_input=False, **kwargs): + super().__init__() + from pytorch_fid import inception + + kwargs["resize_input"] = True + self.model = inception.InceptionV3(normalize_input=normalize_input, **kwargs) + + def forward(self, inp): + # inp = kornia.geometry.resize(inp, (299, 299), + # interpolation='bicubic', + # align_corners=False, + # antialias=True) + # inp = inp.clamp(min=-1, max=1) + + outp = self.model(inp) + + if len(outp) == 1: + return outp[0].squeeze() + + return outp + + +class IdentityEncoder(AbstractEmbModel): + def encode(self, x): + return x + + def forward(self, x): + return x + + +class ClassEmbedder(AbstractEmbModel): + def __init__(self, embed_dim, n_classes=1000, add_sequence_dim=False): + super().__init__() + self.embedding = nn.Embedding(n_classes, embed_dim) + self.n_classes = n_classes + self.add_sequence_dim = add_sequence_dim + + def forward(self, c): + c = self.embedding(c) + if self.add_sequence_dim: + c = c[:, None, :] + return c + + def get_unconditional_conditioning(self, bs, device="cuda"): + uc_class = ( + self.n_classes - 1 + ) # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000) + uc = torch.ones((bs,), device=device) * uc_class + uc = {self.key: uc.long()} + return uc + + +class ClassEmbedderForMultiCond(ClassEmbedder): + def forward(self, batch, key=None, disable_dropout=False): + out = batch + key = default(key, self.key) + islist = isinstance(batch[key], list) + if islist: + batch[key] = batch[key][0] + c_out = super().forward(batch, key, disable_dropout) + out[key] = [c_out] if islist else c_out + return out + + +class FrozenT5Embedder(AbstractEmbModel): + """Uses the T5 transformer encoder for text""" + + def __init__( + self, version="google/t5-v1_1-xxl", device="cuda", max_length=77, freeze=True + ): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl + super().__init__() + self.tokenizer = T5Tokenizer.from_pretrained(version) + self.transformer = T5EncoderModel.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + + for param in self.parameters(): + param.requires_grad = False + + # @autocast + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + with torch.autocast("cuda", enabled=False): + outputs = self.transformer(input_ids=tokens) + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenByT5Embedder(AbstractEmbModel): + """ + Uses the ByT5 transformer encoder for text. Is character-aware. + """ + + def __init__( + self, version="google/byt5-base", device="cuda", max_length=77, freeze=True + ): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl + super().__init__() + self.tokenizer = ByT5Tokenizer.from_pretrained(version) + self.transformer = T5EncoderModel.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + with torch.autocast("cuda", enabled=False): + outputs = self.transformer(input_ids=tokens) + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPEmbedder(AbstractEmbModel): + """Uses the CLIP transformer encoder for text (from huggingface)""" + + LAYERS = ["last", "pooled", "hidden"] + + def __init__( + self, + version="openai/clip-vit-large-patch14", + device="cuda", + max_length=77, + freeze=True, + layer="last", + layer_idx=None, + always_return_pooled=False, + ): # clip-vit-base-patch32 + super().__init__() + assert layer in self.LAYERS + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + self.layer = layer + self.layer_idx = layer_idx + self.return_pooled = always_return_pooled + if layer == "hidden": + assert layer_idx is not None + assert 0 <= abs(layer_idx) <= 12 + + def freeze(self): + self.transformer = self.transformer.eval() + + for param in self.parameters(): + param.requires_grad = False + + @autocast + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer( + input_ids=tokens, output_hidden_states=self.layer == "hidden" + ) + if self.layer == "last": + z = outputs.last_hidden_state + elif self.layer == "pooled": + z = outputs.pooler_output[:, None, :] + else: + z = outputs.hidden_states[self.layer_idx] + if self.return_pooled: + return z, outputs.pooler_output + return z + + def encode(self, text): + return self(text) + + +class FrozenOpenCLIPEmbedder2(AbstractEmbModel): + """ + Uses the OpenCLIP transformer encoder for text + """ + + LAYERS = ["pooled", "last", "penultimate"] + + def __init__( + self, + arch="ViT-H-14", + version="laion2b_s32b_b79k", + device="cuda", + max_length=77, + freeze=True, + layer="last", + always_return_pooled=False, + legacy=True, + ): + super().__init__() + assert layer in self.LAYERS + model, _, _ = open_clip.create_model_and_transforms( + arch, + device=torch.device("cpu"), + pretrained=version, + ) + del model.visual + self.model = model + + self.device = device + self.max_length = max_length + self.return_pooled = always_return_pooled + if freeze: + self.freeze() + self.layer = layer + if self.layer == "last": + self.layer_idx = 0 + elif self.layer == "penultimate": + self.layer_idx = 1 + else: + raise NotImplementedError() + self.legacy = legacy + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + @autocast + def forward(self, text): + tokens = open_clip.tokenize(text) + z = self.encode_with_transformer(tokens.to(self.device)) + if not self.return_pooled and self.legacy: + return z + if self.return_pooled: + assert not self.legacy + return z[self.layer], z["pooled"] + return z[self.layer] + + def encode_with_transformer(self, text): + x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model] + x = x + self.model.positional_embedding + x = x.permute(1, 0, 2) # NLD -> LND + x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask) + if self.legacy: + x = x[self.layer] + x = self.model.ln_final(x) + return x + else: + # x is a dict and will stay a dict + o = x["last"] + o = self.model.ln_final(o) + pooled = self.pool(o, text) + x["pooled"] = pooled + return x + + def pool(self, x, text): + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = ( + x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + @ self.model.text_projection + ) + return x + + def text_transformer_forward(self, x: torch.Tensor, attn_mask=None): + outputs = {} + for i, r in enumerate(self.model.transformer.resblocks): + if i == len(self.model.transformer.resblocks) - 1: + outputs["penultimate"] = x.permute(1, 0, 2) # LND -> NLD + if ( + self.model.transformer.grad_checkpointing + and not torch.jit.is_scripting() + ): + x = checkpoint(r, x, attn_mask) + else: + x = r(x, attn_mask=attn_mask) + outputs["last"] = x.permute(1, 0, 2) # LND -> NLD + return outputs + + def encode(self, text): + return self(text) + + +class FrozenOpenCLIPEmbedder(AbstractEmbModel): + LAYERS = [ + # "pooled", + "last", + "penultimate", + ] + + def __init__( + self, + arch="ViT-H-14", + version="laion2b_s32b_b79k", + device="cuda", + max_length=77, + freeze=True, + layer="last", + use_bf16=False, + ): + super().__init__() + assert layer in self.LAYERS + model, _, _ = open_clip.create_model_and_transforms( + arch, + device=torch.device("cpu"), + pretrained=version, + precision=torch.bfloat16 if use_bf16 else torch.float32, + ) + del model.visual + self.model = model + + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + self.layer = layer + if self.layer == "last": + self.layer_idx = 0 + elif self.layer == "penultimate": + self.layer_idx = 1 + else: + raise NotImplementedError() + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + tokens = open_clip.tokenize(text) + z = self.encode_with_transformer(tokens.to(self.device)) + return z + + def encode_with_transformer(self, text): + x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model] + x = x + self.model.positional_embedding + x = x.permute(1, 0, 2) # NLD -> LND + x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.model.ln_final(x) + return x + + def text_transformer_forward(self, x: torch.Tensor, attn_mask=None): + for i, r in enumerate(self.model.transformer.resblocks): + if i == len(self.model.transformer.resblocks) - self.layer_idx: + break + if ( + self.model.transformer.grad_checkpointing + and not torch.jit.is_scripting() + ): + x = checkpoint(r, x, attn_mask) + else: + x = r(x, attn_mask=attn_mask) + return x + + def encode(self, text): + return self(text) + + +class FrozenOpenCLIPImageEmbedder(AbstractEmbModel): + """ + Uses the OpenCLIP vision transformer encoder for images + """ + + def __init__( + self, + arch="ViT-H-14", + version="laion2b_s32b_b79k", + device="cuda", + max_length=77, + freeze=True, + antialias=True, + ucg_rate=0.0, + unsqueeze_dim=False, + repeat_to_max_len=False, + num_image_crops=0, + output_tokens=False, + ): + super().__init__() + model, _, _ = open_clip.create_model_and_transforms( + arch, + device=torch.device("cpu"), + pretrained=version, + ) + del model.transformer + self.model = model + self.max_crops = num_image_crops + self.pad_to_max_len = self.max_crops > 0 + self.repeat_to_max_len = repeat_to_max_len and (not self.pad_to_max_len) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + + self.antialias = antialias + + self.register_buffer( + "mean", torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False + ) + self.register_buffer( + "std", torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False + ) + self.ucg_rate = ucg_rate + self.unsqueeze_dim = unsqueeze_dim + self.stored_batch = None + self.model.visual.output_tokens = output_tokens + self.output_tokens = output_tokens + + def preprocess(self, x): + # normalize to [0,1] + x = kornia.geometry.resize( + x, + (224, 224), + interpolation="bicubic", + align_corners=True, + antialias=self.antialias, + ) + x = (x + 1.0) / 2.0 + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + @autocast + def forward(self, image, no_dropout=False): + z = self.encode_with_vision_transformer(image) + tokens = None + if self.output_tokens: + z, tokens = z[0], z[1] + z = z.to(image.dtype) + if self.ucg_rate > 0.0 and not no_dropout and not (self.max_crops > 0): + z = ( + torch.bernoulli( + (1.0 - self.ucg_rate) * torch.ones(z.shape[0], device=z.device) + )[:, None] + * z + ) + if tokens is not None: + tokens = ( + expand_dims_like( + torch.bernoulli( + (1.0 - self.ucg_rate) + * torch.ones(tokens.shape[0], device=tokens.device) + ), + tokens, + ) + * tokens + ) + if self.unsqueeze_dim: + z = z[:, None, :] + if self.output_tokens: + assert not self.repeat_to_max_len + assert not self.pad_to_max_len + return tokens, z + if self.repeat_to_max_len: + if z.dim() == 2: + z_ = z[:, None, :] + else: + z_ = z + return repeat(z_, "b 1 d -> b n d", n=self.max_length), z + elif self.pad_to_max_len: + assert z.dim() == 3 + z_pad = torch.cat( + ( + z, + torch.zeros( + z.shape[0], + self.max_length - z.shape[1], + z.shape[2], + device=z.device, + ), + ), + 1, + ) + return z_pad, z_pad[:, 0, ...] + return z + + def encode_with_vision_transformer(self, img): + # if self.max_crops > 0: + # img = self.preprocess_by_cropping(img) + if img.dim() == 5: + assert self.max_crops == img.shape[1] + img = rearrange(img, "b n c h w -> (b n) c h w") + img = self.preprocess(img) + if not self.output_tokens: + assert not self.model.visual.output_tokens + x = self.model.visual(img) + tokens = None + else: + assert self.model.visual.output_tokens + x, tokens = self.model.visual(img) + if self.max_crops > 0: + x = rearrange(x, "(b n) d -> b n d", n=self.max_crops) + # drop out between 0 and all along the sequence axis + x = ( + torch.bernoulli( + (1.0 - self.ucg_rate) + * torch.ones(x.shape[0], x.shape[1], 1, device=x.device) + ) + * x + ) + if tokens is not None: + tokens = rearrange(tokens, "(b n) t d -> b t (n d)", n=self.max_crops) + print( + f"You are running very experimental token-concat in {self.__class__.__name__}. " + f"Check what you are doing, and then remove this message." + ) + if self.output_tokens: + return x, tokens + return x + + def encode(self, text): + return self(text) + + +class FrozenCLIPT5Encoder(AbstractEmbModel): + def __init__( + self, + clip_version="openai/clip-vit-large-patch14", + t5_version="google/t5-v1_1-xl", + device="cuda", + clip_max_length=77, + t5_max_length=77, + ): + super().__init__() + self.clip_encoder = FrozenCLIPEmbedder( + clip_version, device, max_length=clip_max_length + ) + self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length) + print( + f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder) * 1.e-6:.2f} M parameters, " + f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder) * 1.e-6:.2f} M params." + ) + + def encode(self, text): + return self(text) + + def forward(self, text): + clip_z = self.clip_encoder.encode(text) + t5_z = self.t5_encoder.encode(text) + return [clip_z, t5_z] + + +class SpatialRescaler(nn.Module): + def __init__( + self, + n_stages=1, + method="bilinear", + multiplier=0.5, + in_channels=3, + out_channels=None, + bias=False, + wrap_video=False, + kernel_size=1, + remap_output=False, + ): + super().__init__() + self.n_stages = n_stages + assert self.n_stages >= 0 + assert method in [ + "nearest", + "linear", + "bilinear", + "trilinear", + "bicubic", + "area", + ] + self.multiplier = multiplier + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) + self.remap_output = out_channels is not None or remap_output + if self.remap_output: + print( + f"Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing." + ) + self.channel_mapper = nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + bias=bias, + padding=kernel_size // 2, + ) + self.wrap_video = wrap_video + + def forward(self, x): + if self.wrap_video and x.ndim == 5: + B, C, T, H, W = x.shape + x = rearrange(x, "b c t h w -> b t c h w") + x = rearrange(x, "b t c h w -> (b t) c h w") + + for stage in range(self.n_stages): + x = self.interpolator(x, scale_factor=self.multiplier) + + if self.wrap_video: + x = rearrange(x, "(b t) c h w -> b t c h w", b=B, t=T, c=C) + x = rearrange(x, "b t c h w -> b c t h w") + if self.remap_output: + x = self.channel_mapper(x) + return x + + def encode(self, x): + return self(x) + + +class LowScaleEncoder(nn.Module): + def __init__( + self, + model_config, + linear_start, + linear_end, + timesteps=1000, + max_noise_level=250, + output_size=64, + scale_factor=1.0, + ): + super().__init__() + self.max_noise_level = max_noise_level + self.model = instantiate_from_config(model_config) + self.augmentation_schedule = self.register_schedule( + timesteps=timesteps, linear_start=linear_start, linear_end=linear_end + ) + self.out_size = output_size + self.scale_factor = scale_factor + + def register_schedule( + self, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + betas = make_beta_schedule( + beta_schedule, + timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) + + (timesteps,) = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert ( + alphas_cumprod.shape[0] == self.num_timesteps + ), "alphas have to be defined for each timestep" + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer("betas", to_torch(betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) + ) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise + ) + + def forward(self, x): + z = self.model.encode(x) + if isinstance(z, DiagonalGaussianDistribution): + z = z.sample() + z = z * self.scale_factor + noise_level = torch.randint( + 0, self.max_noise_level, (x.shape[0],), device=x.device + ).long() + z = self.q_sample(z, noise_level) + if self.out_size is not None: + z = torch.nn.functional.interpolate(z, size=self.out_size, mode="nearest") + # z = z.repeat_interleave(2, -2).repeat_interleave(2, -1) + return z, noise_level + + def decode(self, z): + z = z / self.scale_factor + return self.model.decode(z) + + +class ConcatTimestepEmbedderND(AbstractEmbModel): + """embeds each dimension independently and concatenates them""" + + def __init__(self, outdim): + super().__init__() + self.timestep = Timestep(outdim) + self.outdim = outdim + + def forward(self, x): + if x.ndim == 1: + x = x[:, None] + assert len(x.shape) == 2 + b, dims = x.shape[0], x.shape[1] + x = rearrange(x, "b d -> (b d)") + emb = self.timestep(x) + emb = rearrange(emb, "(b d) d2 -> b (d d2)", b=b, d=dims, d2=self.outdim) + return emb + + +class GaussianEncoder(Encoder, AbstractEmbModel): + def __init__( + self, weight: float = 1.0, flatten_output: bool = True, *args, **kwargs + ): + super().__init__(*args, **kwargs) + self.posterior = DiagonalGaussianRegularizer() + self.weight = weight + self.flatten_output = flatten_output + + def forward(self, x) -> Tuple[Dict, torch.Tensor]: + z = super().forward(x) + z, log = self.posterior(z) + log["loss"] = log["kl_loss"] + log["weight"] = self.weight + if self.flatten_output: + z = rearrange(z, "b c h w -> b (h w ) c") + return log, z + + +class VAEEmbedder(AbstractEmbModel): + def __init__(self, down_blur_factor=1, *args, **kwargs): + super().__init__(*args, **kwargs) + self.down_blur_factor = down_blur_factor + assert down_blur_factor >= 1, "down_blur_factor must be >= 1" + + def freeze(self): + self.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, x): + # return x + assert hasattr(self, "first_stage_model"), "first_stage_model not defined" + assert hasattr( + self, "disable_first_stage_autocast" + ), "disable_first_stage_autocast not defined" + assert hasattr(self, "scale_factor"), "scale_factor not defined" + + if self.down_blur_factor > 1: + hx, wx = x.shape[-2:] + # downsample + x = torch.nn.functional.interpolate( + x, + scale_factor=1.0 / self.down_blur_factor, + mode="bilinear", + align_corners=False, + ) + # upsample back + x = torch.nn.functional.interpolate( + x, + size=(hx, wx), + mode="bilinear", + align_corners=False, + ) + with torch.autocast("cuda", enabled=not self.disable_first_stage_autocast): + z = self.first_stage_model.encode(x) + z = self.scale_factor * z + return z + + def encode(self, x): + return self(x) + + +class CustomIdentityEncoder(AbstractEmbModel): + def __init__(self, down_blur_factor=None, down_blur_probs=None, *args, **kwargs): + super().__init__(*args, **kwargs) + self.down_blur_factor = down_blur_factor + if down_blur_factor: + assert down_blur_factor.__class__ == omegaconf.listconfig.ListConfig, "down_blur_factor must be a list" + assert min(down_blur_factor) >= 1, "down_blur_factor must be >= 1" + if down_blur_probs: + assert down_blur_probs.__class__ == omegaconf.listconfig.ListConfig, "probs must be a list" + assert len(down_blur_probs) == len(down_blur_factor), "probs must have the same length as down_blur_factor" + assert sum(down_blur_probs) == 1, "probs must sum to 1" + self.down_blur_probs = down_blur_probs + else: + self.down_blur_probs = [1.0/len(down_blur_factor) for _ in range(len(down_blur_factor))] + + def encode(self, x): + return self(x) + + def forward(self, x): + if self.down_blur_factor: + factor = np.random.choice(self.down_blur_factor, p=self.down_blur_probs) + + hx, wx = x.shape[-2:] + if x.dim() == 4: + mode = "bilinear" + size_down = int(hx / factor), int(wx / factor) + size_ori = hx, wx + elif x.dim() == 5: + nframe = x.shape[2] + mode = "trilinear" + size_down = nframe, int(hx / factor), int(wx / factor) + size_ori = nframe, hx, wx + else: + raise NotImplementedError("CustomIdentityEncoder only support 4D and 5D input") + + # downsample + x = torch.nn.functional.interpolate( + x, + size=size_down, + mode=mode, + align_corners=False, + ) + # upsample back + x = torch.nn.functional.interpolate( + x, + size=size_ori, + mode=mode, + align_corners=False, + ) + + # if x.dim() == 4: + # hx, wx = x.shape[-2:] + # # downsample + # x = torch.nn.functional.interpolate( + # x, + # scale_factor=1.0 / factor, + # mode="bilinear", + # align_corners=False, + # ) + # # upsample back + # x = torch.nn.functional.interpolate( + # x, + # size=(hx, wx), + # mode="bilinear", + # align_corners=False, + # ) + # elif x.dim() == 5: + # hx, wx = x.shape[-2:] + # nframe = x.shape[2] + # # downsample + # x = torch.nn.functional.interpolate( + # x, + # size=(nframe, int(hx / factor), int(wx / factor)), + # mode="trilinear", + # align_corners=False, + # ) + # # upsample back + # x = torch.nn.functional.interpolate( + # x, + # size=(nframe, hx, wx), + # mode="trilinear", + # align_corners=False, + # ) + # else: + # raise NotImplementedError("CustomIdentityEncoder only support 4D and 5D input") + + return x + + +class CustomIdentityDownCondEncoder(CustomIdentityEncoder): + def __init__(self, outdim, *args, **kwargs): + super().__init__(*args, **kwargs) + self.timestep = Timestep(outdim) + self.outdim = outdim + + def forward(self, x): + if self.down_blur_factor: + factor = np.random.choice(self.down_blur_factor, p=self.down_blur_probs) + + hx, wx = x.shape[-2:] + if x.dim() == 4: + mode = "bilinear" + size_down = int(hx / factor), int(wx / factor) + size_ori = hx, wx + elif x.dim() == 5: + nframe = x.shape[2] + mode = "trilinear" + size_down = nframe, int(hx / factor), int(wx / factor) + size_ori = nframe, hx, wx + else: + raise NotImplementedError("CustomIdentityEncoder only support 4D and 5D input") + + # downsample + x = torch.nn.functional.interpolate( + x, + size=size_down, + mode=mode, + align_corners=False, + ) + # upsample back + x = torch.nn.functional.interpolate( + x, + size=size_ori, + mode=mode, + align_corners=False, + ) + + factor = torch.tensor(factor).to(x.device).unsqueeze(0).float() + factor = einops.repeat(factor, 'n -> b n', b=x.shape[0]) + assert len(factor.shape) == 2 + b, dims = factor.shape[0], factor.shape[1] + factor = rearrange(factor, "b d -> (b d)") + emb = self.timestep(factor) + emb = rearrange(emb, "(b d) d2 -> b (d d2)", b=b, d=dims, d2=self.outdim) + emb = emb[:,:,None,None,None] + emb = emb.expand(-1, -1, x.shape[2], x.shape[3], x.shape[4]) + x = torch.cat([x, emb], dim=1) + + return x + +# ----------------------------------------------------- +# This is used for TV2V (text-video-to-video) generation + +def safer_memory(x): + # Fix many MAC/AMD problems + return np.ascontiguousarray(x.copy()).copy() + +def pad64(x): + return int(np.ceil(float(x) / 64.0) * 64 - x) + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + +def resize_image_with_pad(input_image, resolution, skip_hwc3=False): + if skip_hwc3: + img = input_image + else: + img = HWC3(input_image) + H_raw, W_raw, _ = img.shape + k = float(resolution) / float(min(H_raw, W_raw)) + interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA + H_target = int(np.round(float(H_raw) * k)) + W_target = int(np.round(float(W_raw) * k)) + img = cv2.resize(img, (W_target, H_target), interpolation=interpolation) + H_pad, W_pad = pad64(H_target), pad64(W_target) + img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge') + + def remove_pad(x): + return safer_memory(x[:H_target, :W_target]) + + return safer_memory(img_padded), remove_pad + +def lineart_standard(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + x = img.astype(np.float32) + g = cv2.GaussianBlur(x, (0, 0), 6.0) + intensity = np.min(g - x, axis=2).clip(0, 255) + intensity /= max(16, np.median(intensity[intensity > 8])) + intensity *= 127 + result = intensity.clip(0, 255).astype(np.uint8) + return remove_pad(result), True + +class LineartEncoder(AbstractEmbModel): + def __init__(self, lineart_coarse=False, lineart_standard=False, *args, **kwargs): + # def __init__(self, lineart_coarse=False, lineart_standard=True, *args, **kwargs): + super().__init__(*args, **kwargs) + from sgm.modules.diffusionmodules.util import LineartDetector + self.lineart_coarse = lineart_coarse + self.lineart_standard = lineart_standard + self.lineart_detector = LineartDetector() + # freeze the lineart detector + self.lineart_detector.eval() + for param in self.lineart_detector.parameters(): + param.requires_grad = False + + def forward(self, x): + assert x.ndim == 5, "input must be 5D tensor" + n_frames = x.shape[2] + x = einops.rearrange(x, 'b c t h w -> (b t) c h w') + # with torch.no_grad(): + # x = self.lineart_detector(x, coarse=self.lineart_coarse) # -1 ~ 1 + if self.lineart_standard: + b,c,h,w = x.shape + x_bef = x + x = einops.rearrange(x, 'b c h w -> b h w c') + x = (x.cpu().numpy()+1)/2*255. + x = x.astype(np.uint8) + xs = [e for e in x] + for i in range(len(xs)): + xs[i], _ = lineart_standard(xs[i]) + x = np.stack(xs) + x = torch.from_numpy(x).cuda() + x = x.float()/255*2-1 + x = -x + x = x.unsqueeze(3) + x = einops.rearrange(x, 'b h w c -> b c h w') + x = torch.nn.functional.interpolate( + x, + size=(h, w), + mode="bilinear", + align_corners=False, + ) + # import torchvision + # torchvision.utils.save_image(x_bef, 'debug_lineart_standard_bef.png', normalize=True) + # torchvision.utils.save_image(x, 'debug_lineart_standard.png', normalize=True) + else: + with torch.no_grad(): + x = self.lineart_detector(x, coarse=self.lineart_coarse) # -1 ~ 1 + x = einops.rearrange(x, '(b t) c h w -> b c t h w', t=n_frames) + out_data = einops.repeat(x, 'b c t h w -> b (3 c) t h w') + return out_data + + def encode(self, x): + return self(x) + +import sys +sys.path.append('./src/controlnet11') +# from src.controlnet11.annotator.zoe import ZoeDetector +import os +import cv2 +import numpy as np +import torch + +from einops import rearrange +from src.controlnet11.annotator.zoe.zoedepth.models.zoedepth.zoedepth_v1 import ZoeDepth +from src.controlnet11.annotator.zoe.zoedepth.utils.config import get_config +from src.controlnet11.annotator.util import annotator_ckpts_path + +class DepthZoeEncoder(AbstractEmbModel): # TODO: Support more depth encoder + def __init__(self): + super().__init__() + remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ZoeD_M12_N.pt" + modelpath = os.path.join(annotator_ckpts_path, "ZoeD_M12_N.pt") + if not os.path.exists(modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + conf = get_config("zoedepth", "infer") + model = ZoeDepth.build_from_config(conf) + model.load_state_dict(torch.load(modelpath)['model']) + # model = model.cuda() + # model.device = 'cuda' + model.eval() + self.model = model + + for param in self.parameters(): + param.requires_grad = False + self.model.float() # this model must use float32, or nan will occur, don't know why + + def forward(self, input_image): + dtype_ = input_image.dtype + if self.model.state_dict()['conditional_log_binomial.mlp.2.weight'].dtype != torch.float32: + print('converting depth model to torch.float32') + self.model.float() + # assert input_image.ndim == 3 + assert input_image.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + n_frames = input_image.shape[2] + input_image = einops.rearrange(input_image, 'b c t h w -> (b t) c h w') + input_image = (input_image + 1) / 2 # 0 ~ 1 + + image_depth = input_image.float() + with torch.no_grad(): + depth = self.model.infer(image_depth) + depth = einops.rearrange(depth, '(b t) c h w -> b c t h w', t=n_frames) + # TODO: not sure whether conduct on THW or HW + # calculate the 2nd percentile (vmin) along the CTHW dimension + percentile_2 = int(0.02 * depth[0].numel()) + vmin = torch.kthvalue(depth.view(depth.shape[0], -1), percentile_2, dim=1).values + # Calculate the 85th percentile (vmax) along the CTHW dimension + percentile_85 = int(0.85 * depth[0].numel()) + vmax = torch.kthvalue(depth.view(depth.shape[0], -1), percentile_85, dim=1).values + + depth -= vmin[:,None,None,None,None] + depth /= (vmax - vmin)[:,None,None,None,None] + depth = torch.clamp(depth, 0, 1) + depth = depth * 2 - 1 # -1 ~ 1 + depth = einops.repeat(depth, 'b c t h w -> b (3 c) t h w') + + depth = depth.to(dtype_) + return depth + + def encode(self, x): + return self(x) + + +from src.controlnet11.annotator.midas.api import MiDaSInference +class DepthMidasEncoder(AbstractEmbModel): + def __init__(self): + super().__init__() + self.model = MiDaSInference(model_type="dpt_hybrid").cuda() + for param in self.parameters(): + param.requires_grad = False + + def __call__(self, input_image): + dtype_ = input_image.dtype + if self.model.state_dict()['model.pretrained.model.cls_token'].dtype != torch.float32: + print('converting depthmidas model to torch.float32') + self.model.float() + assert input_image.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + # assert input_image.ndim == 3 + # image_depth = input_image + n_frames = input_image.shape[2] + input_image = einops.rearrange(input_image, 'b c t h w -> (b t) c h w') + # input_image = (input_image + 1) / 2 # 0 ~ 1 + image_depth = input_image.float() + with torch.no_grad(): + # image_depth = torch.from_numpy(image_depth).float().cuda() + # image_depth = image_depth / 127.5 - 1.0 + # image_depth = rearrange(image_depth, 'h w c -> 1 c h w') + # depth = self.model(image_depth)[0] + depth = self.model(image_depth) + depth = depth.unsqueeze(1) + # import pdb; pdb.set_trace() + # import torchvision + # torchvision.utils.save_image(depth, 'debug.png', normalize=True, nrow=12) + + depth -= torch.min(depth) + depth /= torch.max(depth) + # depth = depth.cpu().numpy() + # depth_image = (depth * 255.0).clip(0, 255).astype(np.uint8) + + # return depth_image.to(dtype_) + depth = torch.clamp(depth, 0, 1) + depth = depth * 2 - 1 # -1 ~ 1 + depth = - depth + depth = einops.rearrange(depth, '(b t) c h w -> b c t h w', t=n_frames) + depth = einops.repeat(depth, 'b c t h w -> b (3 c) t h w') + + depth = depth.to(dtype_) + return depth + + def encode(self, x): + return self(x) + +# Pidinet +# https://github.com/hellozhuo/pidinet + +import os +import torch +import numpy as np +from einops import rearrange +from src.controlnet11.annotator.pidinet.model import pidinet +from src.controlnet11.annotator.util import annotator_ckpts_path, safe_step + + +# class PidiNetDetector(AbstractEmbModel): +class SoftEdgeEncoder(AbstractEmbModel): # TODO: currently, PidiNet is used to generate soft edge, support more softedge encoder + def __init__(self): + super().__init__() + remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/table5_pidinet.pth" + modelpath = os.path.join(annotator_ckpts_path, "table5_pidinet.pth") + if not os.path.exists(modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + self.netNetwork = pidinet() + self.netNetwork.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(modelpath)['state_dict'].items()}) + # self.netNetwork = self.netNetwork.cuda() + self.netNetwork.eval() + for param in self.parameters(): + param.requires_grad = False + + def __call__(self, input_image, safe=False): + dtype_ = input_image.dtype + if self.netNetwork.state_dict()['classifier.weight'].dtype != torch.float32: + print('converting softedge model to torch.float32') + self.netNetwork.float() + assert input_image.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + n_frames = input_image.shape[2] + input_image = einops.rearrange(input_image, 'b c t h w -> (b t) c h w') + input_image = (input_image + 1) / 2 # 0 ~ 1 + input_image = input_image[:,[2,1,0],:,:] + input_image = input_image.float() + with torch.no_grad(): + edge = self.netNetwork(input_image)[-1] + if safe: + edge = safe_step(edge) + edge = torch.clamp(edge, 0, 1) + edge = 1 - edge # + edge = edge * 2 - 1 # -1 ~ 1 + edge = einops.rearrange(edge, '(b t) c h w -> b c t h w', t=n_frames) + edge = einops.repeat(edge, 'b c t h w -> b (3 c) t h w') + + edge = edge.to(dtype_) + return edge + + def encode(self, x): + return self(x) + + +# Estimating and Exploiting the Aleatoric Uncertainty in Surface Normal Estimation +# https://github.com/baegwangbin/surface_normal_uncertainty + +import os +import types +import torch +import numpy as np + +from einops import rearrange +from src.controlnet11.annotator.normalbae.models.NNET import NNET +from src.controlnet11.annotator.normalbae.utils import utils +from src.controlnet11.annotator.util import annotator_ckpts_path +import torchvision.transforms as transforms + + +# class NormalBaeDetector: +class NormalBaeEncoder(AbstractEmbModel): + def __init__(self): + super().__init__() + remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/scannet.pt" + modelpath = os.path.join(annotator_ckpts_path, "scannet.pt") + if not os.path.exists(modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + args = types.SimpleNamespace() + args.mode = 'client' + args.architecture = 'BN' + args.pretrained = 'scannet' + args.sampling_ratio = 0.4 + args.importance_ratio = 0.7 + model = NNET(args) + model = utils.load_checkpoint(modelpath, model) + model = model.cuda() + model.eval() + self.model = model + self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + for param in self.parameters(): + param.requires_grad = False + + def __call__(self, input_image): + # assert input_image.ndim == 3 + # image_normal = input_image + dtype_ = input_image.dtype + # TODO: + if self.model.state_dict()['decoder.out_conv_res1.6.bias'].dtype != torch.float32: + print('converting normalbae model to torch.float32') + self.model.float() + # assert input_image.ndim == 3 + assert input_image.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + n_frames = input_image.shape[2] + input_image = einops.rearrange(input_image, 'b c t h w -> (b t) c h w') + input_image = (input_image + 1) / 2 # 0 ~ 1 + # image_normal = input_image.float() + image_normal = input_image.float() + with torch.no_grad(): + # image_normal = torch.from_numpy(image_normal).float().cuda() + # image_normal = image_normal / 255.0 + # image_normal = rearrange(image_normal, 'h w c -> 1 c h w') + # TODO + image_normal = self.norm(image_normal) + normal = self.model(image_normal) + + normal = normal[0][-1][:, :3] + # import torchvision + # torchvision.utils.save_image(input_image, 'debug_img.png', nrow=12) + # torchvision.utils.save_image(normal, 'debug_normal.png', normalize=True, nrow=12) + # d = torch.sum(normal ** 2.0, dim=1, keepdim=True) ** 0.5 + # d = torch.maximum(d, torch.ones_like(d) * 1e-5) + # normal /= d + + normal = einops.rearrange(normal, '(b t) c h w -> b c t h w', t=n_frames) + normal = - normal # todo : not elegant + normal = torch.clamp(normal, -1, 1) + # normal = ((normal + 1) * 0.5).clip(0, 1) + + # normal = rearrange(normal[0], 'c h w -> h w c').cpu().numpy() + # normal_image = (normal * 255.0).clip(0, 255).astype(np.uint8) + + # return normal_image + normal = normal.to(dtype_) + return normal + + def encode(self, x): + return self(x) + +# Scribble +class DoubleConvBlock(torch.nn.Module): + def __init__(self, input_channel, output_channel, layer_number): + super().__init__() + self.convs = torch.nn.Sequential() + self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1)) + for i in range(1, layer_number): + self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1)) + self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0) + + def __call__(self, x, down_sampling=False): + h = x + if down_sampling: + h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2)) + for conv in self.convs: + h = conv(h) + h = torch.nn.functional.relu(h) + return h, self.projection(h) + + +class ControlNetHED_Apache2(torch.nn.Module): + def __init__(self): + super().__init__() + self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1))) + self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2) + self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2) + self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3) + self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3) + self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3) + + def __call__(self, x): + h = x - self.norm + h, projection1 = self.block1(h) + h, projection2 = self.block2(h, down_sampling=True) + h, projection3 = self.block3(h, down_sampling=True) + h, projection4 = self.block4(h, down_sampling=True) + h, projection5 = self.block5(h, down_sampling=True) + return projection1, projection2, projection3, projection4, projection5 + + +class ScribbleHEDEncoder(AbstractEmbModel): + def __init__(self, lineart_coarse=False, *args, **kwargs): + super().__init__(*args, **kwargs) + from sgm.modules.diffusionmodules.util import LineartDetector + remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth" + modelpath = os.path.join(annotator_ckpts_path, "ControlNetHED.pth") + if not os.path.exists(modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + self.netNetwork = ControlNetHED_Apache2().float().cuda().eval() + self.netNetwork.load_state_dict(torch.load(modelpath)) + for param in self.netNetwork.parameters(): + param.requires_grad = False + + def forward(self, x, safe=False): + dtype_ = x.dtype + if self.netNetwork.state_dict()['block1.convs.0.weight'].dtype != torch.float32: + print('converting softedge model to torch.float32') + self.netNetwork.float() + x = x.float() + assert x.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + B, C, n_frames, Hh, Ww = x.shape + + x = einops.rearrange(x, 'b c t h w -> (b t) c h w') + x = (x + 1) / 2 # 0 ~ 1 + with torch.no_grad(): + edges = self.netNetwork(x) + edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges] + edges = [cv2.resize(e, (Ww, Hh), interpolation=cv2.INTER_LINEAR) for e in edges] + import pdb; pdb.set_trace() + raise NotImplementedError + edges = np.stack(edges, axis=2) + edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64))) + + + edges = [e.detach().float() for e in edges] + edges = [torch.nn.functional.interpolate(e, size=(Hh, Ww), mode='bilinear', align_corners=False) for e in edges] + edges = torch.cat(edges, dim=1) + + # TODO: keep on developing, seems bug here + edge = 1 / (1 + torch.exp(-torch.mean(edges, axis=1).to(torch.float64))) + if safe: + edge = safe_step(edge) + edge = edge.unsqueeze(1) + import pdb; pdb.set_trace() + + edge = 1 - edge # + edge = edge * 2 - 1 # -1 ~ 1 + edge = einops.rearrange(edge, '(b t) c h w -> b c t h w', t=n_frames) + edge = einops.repeat(edge, 'b c t h w -> b (3 c) t h w') + return edges.to(dtype_) + + def encode(self, x): + return self(x) + + +def nms(x, t, s): + x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) + + f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) + f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) + f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) + f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) + + y = np.zeros_like(x) + + for f in [f1, f2, f3, f4]: + np.putmask(y, cv2.dilate(x, kernel=f) == x, x) + + z = np.zeros_like(y, dtype=np.uint8) + z[y > t] = 255 + return z + +import cv2 + +class ScribblePidiNetEncoder(AbstractEmbModel): + def __init__(self): + super().__init__() + remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/table5_pidinet.pth" + modelpath = os.path.join(annotator_ckpts_path, "table5_pidinet.pth") + if not os.path.exists(modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + self.netNetwork = pidinet() + self.netNetwork.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(modelpath)['state_dict'].items()}) + self.netNetwork = self.netNetwork.cuda() + self.netNetwork.eval() + for param in self.netNetwork.parameters(): + param.requires_grad = False + + def __call__(self, input_image, safe=False): + dtype_ = input_image.dtype + if self.netNetwork.state_dict()['block1_1.conv2.weight'].dtype != torch.float32: + print('converting softedge model to torch.float32') + self.netNetwork.float() + input_image = input_image.float() + assert input_image.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + B, C, n_frames, H, W = input_image.shape + + input_image = einops.rearrange(input_image, 'b c t h w -> (b t) c h w') + input_image = (input_image + 1) / 2 # 0 ~ 1 + # input_image = input_image[:, :, ::-1].copy() + input_image = input_image[:,[2,1,0],:,:].clone().float() + with torch.no_grad(): + # image_pidi = torch.from_numpy(input_image).float().cuda() + # image_pidi = image_pidi / 255.0 + # image_pidi = rearrange(image_pidi, 'h w c -> 1 c h w') + # edge = self.netNetwork(image_pidi)[-1] + edge = self.netNetwork(input_image)[-1] + + edge = torch.clamp(edge * 255., 0, 255) + edge = edge.cpu().squeeze(1).numpy().astype(np.uint8) + edge_ = [] + for e in edge: + e = nms(e, 127, 3.0) + e = cv2.GaussianBlur(e, (0, 0), 3.0) + e[e > 4] = 255 + e[e < 255] = 0 + edge_.append(e) + edge = np.stack(edge_, axis=0) + edge = torch.from_numpy(edge).float().cuda().unsqueeze(1) + edge = edge / 255. + + if safe: + edge = safe_step(edge) + + edge = 1 - edge # + edge = edge * 2 - 1 # -1 ~ 1 + edge = einops.rearrange(edge, '(b t) c h w -> b c t h w', t=n_frames) + edge = einops.repeat(edge, 'b c t h w -> b (3 c) t h w') + return edge.to(dtype_) + + def encode(self, x): + return self(x) + + +# openpose +import src.controlnet11.annotator.util +import src.controlnet11.annotator.openpose +from src.controlnet11.annotator.openpose.body import Body +from src.controlnet11.annotator.openpose.hand import Hand +from src.controlnet11.annotator.openpose.face import Face +from scipy.ndimage.filters import gaussian_filter +import math +from math import exp + +body_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/body_pose_model.pth" +hand_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/hand_pose_model.pth" +face_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/facenet.pth" + + +def draw_pose(pose, H, W, draw_body=True, draw_hand=True, draw_face=True): + bodies = pose['bodies'] + faces = pose['faces'] + hands = pose['hands'] + candidate = bodies['candidate'] + subset = bodies['subset'] + canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8) + + if draw_body: + # canvas = util.draw_bodypose(canvas, candidate, subset) + canvas = src.controlnet11.annotator.openpose.util.draw_bodypose(canvas, candidate, subset) + + if draw_hand: + # canvas = util.draw_handpose(canvas, hands) + canvas = src.controlnet11.annotator.openpose.util.draw_handpose(canvas, hands) + + if draw_face: + # canvas = util.draw_facepose(canvas, faces) + canvas = src.controlnet11.annotator.openpose.util.draw_facepose(canvas, faces) + + return canvas + + +def smart_resize_torch(x, s): + Ht, Wt = s + if len(x.shape) == 2: + Ho, Wo = x.shape + Co = 1 + x = x.unsqueeze(0).unsqueeze(0) # Convert to (1, 1, Ho, Wo) + elif len(x.shape) == 3: + Ho, Wo, Co = x.shape + x = x.permute(2, 0, 1).unsqueeze(0) # Convert to (1, Co, Ho, Wo) + elif len(x.shape) == 4: + x = x.permute(0, 3, 1, 2) # Convert to (B, Co, Ho, Wo) + else: + raise ValueError("Unsupported shape for x") + + k = float(Ht + Wt) / float(Ho + Wo) + + mode = 'area' if k < 1 else 'bicubic' + + if mode == 'bicubic': + resized_x = F.interpolate(x, size=(Ht, Wt), mode=mode, align_corners=True) + else: + resized_x = F.interpolate(x, size=(Ht, Wt), mode=mode) + + if Co == 1: + return resized_x.squeeze(0).squeeze(0) + elif Co == 3: + return resized_x.squeeze(0).permute(1, 2, 0) + else: + return torch.stack([smart_resize_torch(resized_x[0, i], s) for i in range(Co)], dim=2) + + +def smart_resize_k_torch(x, fx, fy): + """ + Resize the input tensor `x` using the scaling factors `fx` and `fy`. + + Args: + x (torch.Tensor): The input tensor to be resized. + fx (float): The scaling factor for the width dimension. + fy (float): The scaling factor for the height dimension. + + Returns: + torch.Tensor: The resized tensor. + + Raises: + ValueError: If the shape of `x` is not supported. + + """ + + if len(x.shape) == 2: + Ho, Wo = x.shape + Co = 1 + x = x.unsqueeze(0).unsqueeze(0) # Convert to (1, 1, Ho, Wo) + elif len(x.shape) == 3: + Ho, Wo, Co = x.shape + x = x.permute(2, 0, 1).unsqueeze(0) # Convert to (1, Co, Ho, Wo) + elif len(x.shape) == 4: + B, Ho, Wo, Co = x.shape + x = x.permute(0, 3, 1, 2) # Convert to (B, Co, Ho, Wo) + else: + raise ValueError("Unsupported shape for x") + + Ht, Wt = int(Ho * fy), int(Wo * fx) + k = float(Ht + Wt) / float(Ho + Wo) + + mode = 'area' if k < 1 else 'bicubic' + + if mode == 'bicubic': + resized_x = F.interpolate(x, size=(Ht, Wt), mode=mode, align_corners=True) + else: + resized_x = F.interpolate(x, size=(Ht, Wt), mode=mode) + + if Co == 1: + return resized_x.squeeze(0).squeeze(0) + elif Co == 3: + return resized_x.squeeze(0).permute(1, 2, 0) + else: + return torch.stack([smart_resize_k_torch(resized_x[0, i], fx, fy) for i in range(Co)], dim=2) + + +def padRightDownCorner_torch(img, stride, padValue): + h, w = img.shape[0], img.shape[1] + + pad = 4 * [None] + pad[0] = 0 # up + pad[1] = 0 # left + pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down + pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right + + img_padded = img.clone() + + if pad[0] > 0: + pad_up = torch.zeros((pad[0], w, img.shape[2]), dtype=img.dtype, device=img.device) + padValue + img_padded = torch.cat((pad_up, img_padded), dim=0) + + if pad[1] > 0: + pad_left = torch.zeros((h + pad[0], pad[1], img.shape[2]), dtype=img.dtype, device=img.device) + padValue + img_padded = torch.cat((pad_left, img_padded), dim=1) + + if pad[2] > 0: + pad_down = torch.zeros((pad[2], w + pad[1], img.shape[2]), dtype=img.dtype, device=img.device) + padValue + img_padded = torch.cat((img_padded, pad_down), dim=0) + + if pad[3] > 0: + pad_right = torch.zeros((h + pad[0] + pad[2], pad[3], img.shape[2]), dtype=img.dtype, device=img.device) + padValue + img_padded = torch.cat((img_padded, pad_right), dim=1) + + return img_padded, pad + + +def gaussian_kernel(size: int, sigma: float): + kernel = torch.tensor([exp(-(x - size // 2)**2 / float(2 * sigma**2)) for x in range(size)]) + return kernel / kernel.sum() + + +def apply_gaussian_filter(input, sigma): + kernel = gaussian_kernel(3 * int(sigma), sigma) + kernel = kernel.view(1, 1, -1, 1) + padding = kernel.shape[2] // 2 + + if len(input.shape) == 2: + input = input.unsqueeze(0).unsqueeze(0) + elif len(input.shape) == 3: + input = input.unsqueeze(0) + kernel = kernel.to(input.device) + input = F.conv2d(input, kernel, padding=(padding, padding), stride=(1, 1)) + + return input.squeeze(0).squeeze(0) + + +class OpenposeEncoder(AbstractEmbModel): + def __init__(self): + super().__init__() + os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" # move to here, not influence other modules + + body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth") + hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth") + face_modelpath = os.path.join(annotator_ckpts_path, "facenet.pth") + + if not os.path.exists(body_modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(body_model_path, model_dir=annotator_ckpts_path) + + if not os.path.exists(hand_modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path) + + if not os.path.exists(face_modelpath): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(face_model_path, model_dir=annotator_ckpts_path) + + self.body_estimation = HackedBody(body_modelpath) + # TODO + self.hand_estimation = Hand(hand_modelpath) + self.face_estimation = Face(face_modelpath) + + + for param in self.body_estimation.model.parameters(): + param.requires_grad = False + for param in self.hand_estimation.model.parameters(): + param.requires_grad = False + for param in self.face_estimation.model.parameters(): + param.requires_grad = False + + def __call__(self, oriImg, hand_and_face=False, return_is_index=False): + dtype_ = oriImg.dtype + device = oriImg.device + if self.body_estimation.model.state_dict()['model0.conv1_1.weight'].dtype != torch.float32: + print('converting body_estimation model to torch.float32') + self.body_estimation.model.float() + if self.hand_estimation.model.state_dict()['model1_0.conv1_1.weight'].dtype != torch.float32: + print('converting hand_estimation model to torch.float32') + self.hand_estimation.model.float() + if self.face_estimation.model.state_dict()['conv1_1.weight'].dtype != torch.float32: + print('converting face_estimation model to torch.float32') + self.hand_estimation.model.float() + oriImg = oriImg.float() + assert oriImg.ndim == 5, "input must be 5D tensor" # range -1 ~ 1 + _, _, n_frames, _, _ = oriImg.shape + oriImg = einops.rearrange(oriImg, 'b c t h w -> (b t) h w c') + oriImg = (oriImg + 1) / 2 # 0 ~ 1 + oriImg = oriImg * 255. # 0 ~ 255 + oriImg = torch.clamp(oriImg, 0, 255) + # oriImg = oriImg.cpu().numpy().astype(np.uint8) + + # oriImg = oriImg[:, :, ::-1].copy() + oriImgs = torch.flip(oriImg, dims=[3]).clone() + del oriImg + B, H, W, C = oriImgs.shape + poses = [] + with torch.no_grad(): + # TODO: optimize speed + # TODO: the operation on device change is not elegant. + for i in range(B): + oriImg = oriImgs[i] + with torch.autocast("cuda", enabled=False): + candidate, subset = self.body_estimation(oriImg, device) + + hands = [] + faces = [] + if hand_and_face: + assert False, "not implemented" + # Hand + # hands_list = util.handDetect(candidate, subset, oriImg) + hands_list = src.controlnet11.annotator.openpose.util.handDetect(candidate, subset, oriImg) + for x, y, w, is_left in hands_list: + with torch.autocast("cuda", enabled=False): + peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :]).astype(np.float32) + if peaks.ndim == 2 and peaks.shape[1] == 2: + peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W) + peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H) + hands.append(peaks.tolist()) + # Face + # faces_list = util.faceDetect(candidate, subset, oriImg) + faces_list = src.controlnet11.annotator.openpose.util.faceDetect(candidate, subset, oriImg) + for x, y, w in faces_list: + with torch.autocast("cuda", enabled=False): + heatmaps = self.face_estimation(oriImg[y:y+w, x:x+w, :]) + peaks = self.face_estimation.compute_peaks_from_heatmaps(heatmaps).astype(np.float32) + if peaks.ndim == 2 and peaks.shape[1] == 2: + peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W) + peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H) + faces.append(peaks.tolist()) + if candidate.ndim == 2 and candidate.shape[1] == 4: + candidate = candidate[:, :2] + candidate[:, 0] /= float(W) + candidate[:, 1] /= float(H) + bodies = dict(candidate=candidate.tolist(), subset=subset.tolist()) + pose = dict(bodies=bodies, hands=hands, faces=faces) + + if return_is_index: + poses.append(pose) + else: + poses.append(draw_pose(pose, H, W)) + + poses = np.stack(poses, axis=0) + poses = torch.from_numpy(poses).float().to(device) + poses = poses / 255. # 0 ~ 1 + poses = 1 - poses + poses = poses * 2 - 1 # -1 ~ 1 + # import pdb; pdb.set_trace() + # import torchvision + # oriImgs = torch.from_numpy(oriImgs).float().cuda() + # oriImgs = oriImgs / 255. + # oriImgs = einops.rearrange(oriImgs, 'bt h w c -> bt c h w') + # poses_vis = einops.rearrange(poses, 'bt h w c -> bt c h w') + # torchvision.utils.save_image(oriImgs, 'debug_oriImgs.png', normalize=True, nrow=12) + # torchvision.utils.save_image(poses_vis, 'debug_poses.png', normalize=True, nrow=12) + + poses = einops.rearrange(poses, '(b t) h w c -> b c t h w', t=n_frames) + return poses.to(dtype_) + + def encode(self, x): + return self(x) + + +class HackedBody(object): + def __init__(self, model_path): + from src.controlnet11.annotator.openpose.model import bodypose_model + # self.model = bodypose_model().cuda() + self.model = bodypose_model() + # if torch.cuda.is_available(): + # self.model = self.model.cuda() + # print('cuda') + model_dict = src.controlnet11.annotator.openpose.util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def __call__(self, oriImg, device): + # import time + + # scale_search = [0.5, 1.0, 1.5, 2.0] + scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre1 = 0.1 + thre2 = 0.05 + multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] + # heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) + heatmap_avg = torch.zeros((oriImg.shape[0], oriImg.shape[1], 19)).to(device) + # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) + paf_avg = torch.zeros((oriImg.shape[0], oriImg.shape[1], 38)).to(device) + + # t_ = time.time() + + # oriImg = torch.from_numpy(oriImg).float().to(device) + for m in range(len(multiplier)): + scale = multiplier[m] + # time_head = time.time() + # imageToTest = src.controlnet11.annotator.openpose.util.smart_resize_k(oriImg, fx=scale, fy=scale) + # imageToTest_padded, pad = src.controlnet11.annotator.openpose.util.padRightDownCorner(imageToTest, stride, padValue) + # im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + # im = np.ascontiguousarray(im) + imageToTest = smart_resize_k_torch(oriImg, fx=scale, fy=scale) + imageToTest_padded, pad = padRightDownCorner_torch(imageToTest, stride, padValue) + im = imageToTest_padded.unsqueeze(0).permute(0, 3, 1, 2) / 256 - 0.5 + + # data = torch.from_numpy(im).float() + data = im + # if torch.cuda.is_available(): + # data = data.cuda() + # print('time_head:{}'.format(time.time() - time_head), end='\t') + # data = data.permute([2, 0, 1]).unsqueeze(0).float() + with torch.no_grad(): + # t_model = time.time() + device = data.device + self.model = self.model.to(device) + Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data) + # print('time_model_forward:{}'.format(time.time() - t_model), end='\t') + # time_post = time.time() + # Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() + # Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() + + # extract outputs, resize, and remove padding + # heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps + # heatmap = src.controlnet11.annotator.openpose.util.smart_resize_k(heatmap, fx=stride, fy=stride) + # heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + # heatmap = src.controlnet11.annotator.openpose.util.smart_resize(heatmap, (oriImg.shape[0], oriImg.shape[1])) + heatmap = torch.nn.functional.interpolate(Mconv7_stage6_L2, scale_factor=stride, mode='bilinear', align_corners=False) + heatmap = heatmap[:,:,:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3]] + heatmap = torch.nn.functional.interpolate(heatmap, size=(oriImg.shape[0], oriImg.shape[1]), mode='bilinear', align_corners=False) + # heatmap = heatmap.squeeze(0).permute(1, 2, 0).cpu().numpy() + heatmap = heatmap.squeeze(0).permute(1, 2, 0) + + # paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs + # paf = src.controlnet11.annotator.openpose.util.smart_resize_k(paf, fx=stride, fy=stride) + # paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + # paf = src.controlnet11.annotator.openpose.util.smart_resize(paf, (oriImg.shape[0], oriImg.shape[1])) + paf = torch.nn.functional.interpolate(Mconv7_stage6_L1, scale_factor=stride, mode='bilinear', align_corners=False) + paf = paf[:,:,:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3]] + paf = torch.nn.functional.interpolate(paf, size=(oriImg.shape[0], oriImg.shape[1]), mode='bilinear', align_corners=False) + # paf = paf.squeeze(0).permute(1, 2, 0).cpu().numpy() + paf = paf.squeeze(0).permute(1, 2, 0) + + heatmap_avg += heatmap_avg + heatmap / len(multiplier) + paf_avg += + paf / len(multiplier) + # print('time_post:{}'.format(time.time() - time_post), end='\t') + + # print('time1:{}'.format(time.time() - t_), end='\t') + # t_ = time.time() + + all_peaks = [] + peak_counter = 0 + + # heatmap_avg = torch.from_numpy(heatmap_avg).float().cuda() + heatmap_avg = heatmap_avg.float() + + for part in range(18): + map_ori = heatmap_avg[:, :, part] + one_heatmap = apply_gaussian_filter(map_ori, sigma=3) + + map_left = torch.zeros_like(one_heatmap) + map_left[1:, :] = one_heatmap[:-1, :] + map_right = torch.zeros_like(one_heatmap) + map_right[:-1, :] = one_heatmap[1:, :] + map_up = torch.zeros_like(one_heatmap) + map_up[:, 1:] = one_heatmap[:, :-1] + map_down = torch.zeros_like(one_heatmap) + map_down[:, :-1] = one_heatmap[:, 1:] + + peaks_binary = (one_heatmap >= map_left) & (one_heatmap >= map_right) & \ + (one_heatmap >= map_up) & (one_heatmap >= map_down) & (one_heatmap > thre1) + + peaks = torch.nonzero(peaks_binary, as_tuple=False) + peaks[:,0] = torch.clamp(peaks[:,0], 0, heatmap_avg.shape[0]-1) + peaks[:,1] = torch.clamp(peaks[:,1], 0, heatmap_avg.shape[1]-1) + peaks_with_score = [(x[1].item(), x[0].item(), map_ori[x[0], x[1]].item()) for x in peaks] + peak_id = range(peak_counter, peak_counter + len(peaks)) + peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] + + all_peaks.append(peaks_with_score_and_id) + peak_counter += len(peaks) + + + # print('time2:{}'.format(time.time() - t_), end='\t') + # t_ = time.time() + + # find connection in the specified sequence, center 29 is in the position 15 + limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ + [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ + [1, 16], [16, 18], [3, 17], [6, 18]] + # the middle joints heatmap correpondence + mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ + [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ + [55, 56], [37, 38], [45, 46]] + + connection_all = [] + special_k = [] + mid_num = 10 + + # for k in range(len(mapIdx)): + # score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] + # candA = all_peaks[limbSeq[k][0] - 1] + # candB = all_peaks[limbSeq[k][1] - 1] + # nA = len(candA) + # nB = len(candB) + # indexA, indexB = limbSeq[k] + # if (nA != 0 and nB != 0): + # connection_candidate = [] + # for i in range(nA): + # for j in range(nB): + # vec = np.subtract(candB[j][:2], candA[i][:2]) + # norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + # norm = max(0.001, norm) + # vec = np.divide(vec, norm) + + # startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ + # np.linspace(candA[i][1], candB[j][1], num=mid_num))) + + # vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ + # for I in range(len(startend))]) + # vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ + # for I in range(len(startend))]) + + # score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) + # score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( + # 0.5 * oriImg.shape[0] / norm - 1, 0) + # criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) + # criterion2 = score_with_dist_prior > 0 + # if criterion1 and criterion2: + # connection_candidate.append( + # [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) + + # connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) + # connection = np.zeros((0, 5)) + # for c in range(len(connection_candidate)): + # i, j, s = connection_candidate[c][0:3] + # if (i not in connection[:, 3] and j not in connection[:, 4]): + # connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) + # if (len(connection) >= min(nA, nB)): + # break + + # connection_all.append(connection) + # else: + # special_k.append(k) + # connection_all.append([]) + + for k in range(len(mapIdx)): + score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] + candA = all_peaks[limbSeq[k][0] - 1] + candB = all_peaks[limbSeq[k][1] - 1] + nA = len(candA) + nB = len(candB) + indexA, indexB = limbSeq[k] + if nA != 0 and nB != 0: + connection_candidate = [] + for i in range(nA): + for j in range(nB): + vec = torch.tensor(candB[j][:2]) - torch.tensor(candA[i][:2]) + norm = torch.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + norm = max(0.001, norm) + vec = vec / norm + + startend = [torch.linspace(candA[i][0], candB[j][0], steps=mid_num), + torch.linspace(candA[i][1], candB[j][1], steps=mid_num)] + + vec_x = torch.tensor([score_mid[int(torch.round(startend[1][I])), int(torch.round(startend[0][I])), 0] for I in range(len(startend[0]))]) + vec_y = torch.tensor([score_mid[int(torch.round(startend[1][I])), int(torch.round(startend[0][I])), 1] for I in range(len(startend[0]))]) + + score_midpts = vec_x * vec[0] + vec_y * vec[1] + score_with_dist_prior = torch.sum(score_midpts) / len(score_midpts) + min( + 0.5 * oriImg.shape[0] / norm - 1, 0) + criterion1 = len(torch.nonzero(score_midpts > thre2)) > 0.8 * len(score_midpts) + criterion2 = score_with_dist_prior > 0 + + if criterion1 and criterion2: + connection_candidate.append( + [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) + + connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) + connection = torch.zeros((0, 5)) + + for c in range(len(connection_candidate)): + i, j, s = connection_candidate[c][0:3] + if i not in connection[:, 3] and j not in connection[:, 4]: + connection = torch.cat([connection, torch.tensor([[candA[i][3], candB[j][3], s, i, j]])], dim=0) + if len(connection) >= min(nA, nB): + break + + # connection_all.append(connection) + connection_all.append(connection.cpu().numpy()) + else: + special_k.append(k) + connection_all.append([]) + + + # print('time3:{}'.format(time.time() - t_), end='\t') + # t_ = time.time() + + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + subset = -1 * np.ones((0, 20)) + candidate = np.array([item for sublist in all_peaks for item in sublist]) + + for k in range(len(mapIdx)): + if k not in special_k: + partAs = connection_all[k][:, 0] + partBs = connection_all[k][:, 1] + indexA, indexB = np.array(limbSeq[k]) - 1 + + for i in range(len(connection_all[k])): # = 1:size(temp,1) + found = 0 + subset_idx = [-1, -1] + for j in range(len(subset)): # 1:size(subset,1): + if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if subset[j][indexB] != partBs[i]: + subset[j][indexB] = partBs[i] + subset[j][-1] += 1 + subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + subset[j1][:-2] += (subset[j2][:-2] + 1) + subset[j1][-2:] += subset[j2][-2:] + subset[j1][-2] += connection_all[k][i][2] + subset = np.delete(subset, j2, 0) + else: # as like found == 1 + subset[j1][indexB] = partBs[i] + subset[j1][-1] += 1 + subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[indexA] = partAs[i] + row[indexB] = partBs[i] + row[-1] = 2 + row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] + subset = np.vstack([subset, row]) + + # print('time4:{}'.format(time.time() - t_)) + # t_ = time.time() + + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(subset)): + if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: + deleteIdx.append(i) + subset = np.delete(subset, deleteIdx, axis=0) + + # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts + # candidate: x, y, score, id + return candidate, subset + + +# Inpainting +class InpaintingEncoder(AbstractEmbModel): + def __init__(self, mask_ratio_min=0.3, mask_ratio_max=0.5): + super().__init__() + assert 0 <= mask_ratio_min < mask_ratio_max <= 0.5 + self.mask_ratio_max = mask_ratio_max + self.mask_ratio_min = mask_ratio_min + + def __call__(self, x): + with torch.no_grad(): + with torch.autocast("cuda", enabled=False): + B,C,T,H,W = x.shape + mask_ratio_max = self.mask_ratio_max + mask_ratio_min = self.mask_ratio_min + mask_ratio_range = mask_ratio_max - mask_ratio_min + assert mask_ratio_range > 0 + + mask_h_start = ((torch.rand(1) * mask_ratio_range + self.mask_ratio_min) * H).int() + mask_h_end = ((1 - (torch.rand(1) * mask_ratio_range + self.mask_ratio_min)) * H).int() + mask_w_start = ((torch.rand(1) * mask_ratio_range + self.mask_ratio_min) * W).int() + mask_w_end = ((1 - (torch.rand(1) * mask_ratio_range + self.mask_ratio_min)) * W).int() + + # mask = torch.ones((B, C, T, H, W)).float().to(x.device) + # # mask[:, :, :, mask_h_start:mask_h_end, mask_w_start:mask_w_end] = 1 + # mask[:, :, :, mask_h_start:mask_h_end, mask_w_start:mask_w_end] = 0 + # x = (x + 1) / 2 # 0 ~ 1 + # x = x * mask # 0 ~ 1 + # x = x * 2 - 1 # -1 ~ 1 + # x = -x + mask = torch.zeros((B, C, T, H, W)).float().to(x.device) + mask[:, :, :, mask_h_start:mask_h_end, mask_w_start:mask_w_end] = 1 + x = (x + 1) / 2 # 0 ~ 1 + x[mask == 1] = -1 # 0 ~ 1 + + x = -x + + return x + + def encode(self, x): + return self(x) + + +# Outpainting +class OutpaintingEncoder(AbstractEmbModel): + def __init__(self, mask_ratio_min=0.0, mask_ratio_max=0.4): + super().__init__() + assert 0 <= mask_ratio_min < mask_ratio_max <= 0.5 + self.mask_ratio_max = mask_ratio_max + self.mask_ratio_min = mask_ratio_min + + def __call__(self, x): + with torch.no_grad(): + with torch.autocast("cuda", enabled=False): + B,C,T,H,W = x.shape + mask_ratio_max = self.mask_ratio_max + mask_ratio_min = self.mask_ratio_min + mask_ratio_range = mask_ratio_max - mask_ratio_min + assert mask_ratio_range > 0 + + mask_h_start = ((torch.rand(1) * mask_ratio_range + self.mask_ratio_min) * H).int() + mask_h_end = ((1 - (torch.rand(1) * mask_ratio_range + self.mask_ratio_min)) * H).int() + mask_w_start = ((torch.rand(1) * mask_ratio_range + self.mask_ratio_min) * W).int() + mask_w_end = ((1 - (torch.rand(1) * mask_ratio_range + self.mask_ratio_min)) * W).int() + + mask = torch.zeros((B, C, T, H, W)).float().to(x.device) + mask[:, :, :, mask_h_start:mask_h_end, mask_w_start:mask_w_end] = 1 + x = (x + 1) / 2 # 0 ~ 1 + x = x * mask # 0 ~ 1 + x = x * 2 - 1 # -1 ~ 1 + x = -x + + return x + + def encode(self, x): + return self(x) \ No newline at end of file diff --git a/CCEdit-main/sgm/util.py b/CCEdit-main/sgm/util.py new file mode 100644 index 0000000000000000000000000000000000000000..d18735915f3c8cdb0438e85752aebec2ca47ac96 --- /dev/null +++ b/CCEdit-main/sgm/util.py @@ -0,0 +1,239 @@ +import functools +import importlib +import os +from functools import partial +from inspect import isfunction + +import fsspec +import numpy as np +import torch +from PIL import Image, ImageDraw, ImageFont +from safetensors.torch import load_file as load_safetensors + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def get_string_from_tuple(s): + try: + # Check if the string starts and ends with parentheses + if s[0] == "(" and s[-1] == ")": + # Convert the string to a tuple + t = eval(s) + # Check if the type of t is tuple + if type(t) == tuple: + return t[0] + else: + pass + except: + pass + return s + + +def is_power_of_two(n): + """ + chat.openai.com/chat + Return True if n is a power of 2, otherwise return False. + + The function is_power_of_two takes an integer n as input and returns True if n is a power of 2, otherwise it returns False. + The function works by first checking if n is less than or equal to 0. If n is less than or equal to 0, it can't be a power of 2, so the function returns False. + If n is greater than 0, the function checks whether n is a power of 2 by using a bitwise AND operation between n and n-1. If n is a power of 2, then it will have only one bit set to 1 in its binary representation. When we subtract 1 from a power of 2, all the bits to the right of that bit become 1, and the bit itself becomes 0. So, when we perform a bitwise AND between n and n-1, we get 0 if n is a power of 2, and a non-zero value otherwise. + Thus, if the result of the bitwise AND operation is 0, then n is a power of 2 and the function returns True. Otherwise, the function returns False. + + """ + if n <= 0: + return False + return (n & (n - 1)) == 0 + + +def autocast(f, enabled=True): + def do_autocast(*args, **kwargs): + with torch.cuda.amp.autocast( + enabled=enabled, + dtype=torch.get_autocast_gpu_dtype(), + cache_enabled=torch.is_autocast_cache_enabled(), + ): + return f(*args, **kwargs) + + return do_autocast + + +def load_partial_from_config(config): + return partial(get_obj_from_str(config["target"]), **config.get("params", dict())) + + +def log_txt_as_img(wh, xc, size=10, split_loc=40): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype("data/DejaVuSans.ttf", size=size) + nc = int(split_loc * (wh[0] / 256)) + if isinstance(xc[bi], list): + text_seq = xc[bi][0] + else: + text_seq = xc[bi] + lines = "\n".join( + text_seq[start : start + nc] for start in range(0, len(text_seq), nc) + ) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def partialclass(cls, *args, **kwargs): + class NewCls(cls): + __init__ = functools.partialmethod(cls.__init__, *args, **kwargs) + + return NewCls + + +def make_path_absolute(path): + fs, p = fsspec.core.url_to_fs(path) + if fs.protocol == "file": + return os.path.abspath(p) + return path + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def isheatmap(x): + if not isinstance(x, torch.Tensor): + return False + + return x.ndim == 2 + + +def isneighbors(x): + if not isinstance(x, torch.Tensor): + return False + return x.ndim == 5 and (x.shape[2] == 3 or x.shape[2] == 1) + + +def exists(x): + return x is not None + + +def expand_dims_like(x, y): + while x.dim() != y.dim(): + x = x.unsqueeze(-1) + return x + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == "__is_first_stage__": + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False, invalidate_cache=True): + module, cls = string.rsplit(".", 1) + if invalidate_cache: + importlib.invalidate_caches() + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def append_zero(x): + return torch.cat([x, x.new_zeros([1])]) + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError( + f"input has {x.ndim} dims but target_dims is {target_dims}, which is less" + ) + return x[(...,) + (None,) * dims_to_append] + + +def load_model_from_config(config, ckpt, verbose=True, freeze=True): + print(f"Loading model from {ckpt}") + if ckpt.endswith("ckpt"): + pl_sd = torch.load(ckpt, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + elif ckpt.endswith("safetensors"): + sd = load_safetensors(ckpt) + else: + raise NotImplementedError + + model = instantiate_from_config(config.model) + sd = pl_sd["state_dict"] + + m, u = model.load_state_dict(sd, strict=False) + + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + + if freeze: + for param in model.parameters(): + param.requires_grad = False + + model.eval() + return model + + +# DFS Search for Torch.nn.Module, Written by Lvmin +def torch_dfs(model: torch.nn.Module): + result = [model] + for child in model.children(): + result += torch_dfs(child) + return result \ No newline at end of file diff --git a/FateZero-main/data/attribute/swan_swarov/00000.png b/FateZero-main/data/attribute/swan_swarov/00000.png new file mode 100644 index 0000000000000000000000000000000000000000..477b1622704a978375f2253bec1147d58ef6801d --- /dev/null +++ b/FateZero-main/data/attribute/swan_swarov/00000.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f17f6634276283a259a89aad1ce81b75e2bf4e867afe010b696019d229c56bd +size 426513 diff --git a/FateZero-main/data/attribute/swan_swarov/00003.png b/FateZero-main/data/attribute/swan_swarov/00003.png new file mode 100644 index 0000000000000000000000000000000000000000..378d656179bf7172fa2702f10624d29a0fd2488f --- /dev/null +++ b/FateZero-main/data/attribute/swan_swarov/00003.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c3e972e26e35713865891318fdd8cf1c58d82fecbe21ef5493343838a629cc2 +size 439649 diff --git a/FateZero-main/data/shape/man_skate/00002.png b/FateZero-main/data/shape/man_skate/00002.png new file mode 100644 index 0000000000000000000000000000000000000000..82aa9fb05d9924759c690dd2bc7cd6a7bf8a1df6 --- /dev/null +++ b/FateZero-main/data/shape/man_skate/00002.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbf2096c159e08c8c556bf44efd8276a384408e3f4b3a8aee38c3b174b96ad37 +size 400008