| | """ |
| | Convert a CogView4 checkpoint from SAT(https://github.com/THUDM/SwissArmyTransformer) to the Diffusers format. |
| | (deprecated Since 2025-02-07 and will remove it in later CogView4 version) |
| | |
| | This script converts a CogView4 checkpoint to the Diffusers format, which can then be used |
| | with the Diffusers library. |
| | |
| | Example usage: |
| | python scripts/convert_cogview4_to_diffusers.py \ |
| | --transformer_checkpoint_path 'your path/cogview4_6b/1/mp_rank_00_model_states.pt' \ |
| | --vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \ |
| | --output_path "THUDM/CogView4-6B" \ |
| | --dtype "bf16" |
| | |
| | Arguments: |
| | --transformer_checkpoint_path: Path to Transformer state dict. |
| | --vae_checkpoint_path: Path to VAE state dict. |
| | --output_path: The path to save the converted model. |
| | --push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`. |
| | --text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used |
| | --dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered. |
| | |
| | Default is "bf16" because CogView4 uses bfloat16 for Training. |
| | |
| | Note: You must provide either --original_state_dict_repo_id or --checkpoint_path. |
| | """ |
| |
|
| | import argparse |
| | from contextlib import nullcontext |
| |
|
| | import torch |
| | from accelerate import init_empty_weights |
| | from transformers import GlmForCausalLM, PreTrainedTokenizerFast |
| |
|
| | from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler |
| | from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint |
| | from diffusers.utils.import_utils import is_accelerate_available |
| |
|
| |
|
| | CTX = init_empty_weights if is_accelerate_available() else nullcontext |
| |
|
| | parser = argparse.ArgumentParser() |
| | parser.add_argument("--transformer_checkpoint_path", default=None, type=str) |
| | parser.add_argument("--vae_checkpoint_path", default=None, type=str) |
| | parser.add_argument("--output_path", required=True, type=str) |
| | parser.add_argument("--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving") |
| | parser.add_argument("--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory") |
| | parser.add_argument("--dtype", type=str, default="bf16") |
| |
|
| | args = parser.parse_args() |
| |
|
| |
|
| | |
| | |
| | def swap_scale_shift(weight, dim): |
| | """ |
| | Swap the scale and shift components in the weight tensor. |
| | |
| | Args: |
| | weight (torch.Tensor): The original weight tensor. |
| | dim (int): The dimension along which to split. |
| | |
| | Returns: |
| | torch.Tensor: The modified weight tensor with scale and shift swapped. |
| | """ |
| | shift, scale = weight.chunk(2, dim=dim) |
| | new_weight = torch.cat([scale, shift], dim=dim) |
| | return new_weight |
| |
|
| |
|
| | def convert_cogview4_transformer_checkpoint_to_diffusers(ckpt_path): |
| | original_state_dict = torch.load(ckpt_path, map_location="cpu") |
| | original_state_dict = original_state_dict["module"] |
| | original_state_dict = {k.replace("model.diffusion_model.", ""): v for k, v in original_state_dict.items()} |
| |
|
| | new_state_dict = {} |
| |
|
| | |
| | new_state_dict["patch_embed.proj.weight"] = original_state_dict.pop("mixins.patch_embed.proj.weight") |
| | new_state_dict["patch_embed.proj.bias"] = original_state_dict.pop("mixins.patch_embed.proj.bias") |
| | new_state_dict["patch_embed.text_proj.weight"] = original_state_dict.pop("mixins.patch_embed.text_proj.weight") |
| | new_state_dict["patch_embed.text_proj.bias"] = original_state_dict.pop("mixins.patch_embed.text_proj.bias") |
| |
|
| | |
| | new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop( |
| | "time_embed.0.weight" |
| | ) |
| | new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop( |
| | "time_embed.0.bias" |
| | ) |
| | new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop( |
| | "time_embed.2.weight" |
| | ) |
| | new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop( |
| | "time_embed.2.bias" |
| | ) |
| | new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = original_state_dict.pop( |
| | "label_emb.0.0.weight" |
| | ) |
| | new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = original_state_dict.pop( |
| | "label_emb.0.0.bias" |
| | ) |
| | new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = original_state_dict.pop( |
| | "label_emb.0.2.weight" |
| | ) |
| | new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = original_state_dict.pop( |
| | "label_emb.0.2.bias" |
| | ) |
| |
|
| | |
| | for i in range(28): |
| | block_prefix = f"transformer_blocks.{i}." |
| | old_prefix = f"transformer.layers.{i}." |
| | adaln_prefix = f"mixins.adaln.adaln_modules.{i}." |
| | new_state_dict[block_prefix + "norm1.linear.weight"] = original_state_dict.pop(adaln_prefix + "1.weight") |
| | new_state_dict[block_prefix + "norm1.linear.bias"] = original_state_dict.pop(adaln_prefix + "1.bias") |
| |
|
| | qkv_weight = original_state_dict.pop(old_prefix + "attention.query_key_value.weight") |
| | qkv_bias = original_state_dict.pop(old_prefix + "attention.query_key_value.bias") |
| | q, k, v = qkv_weight.chunk(3, dim=0) |
| | q_bias, k_bias, v_bias = qkv_bias.chunk(3, dim=0) |
| |
|
| | new_state_dict[block_prefix + "attn1.to_q.weight"] = q |
| | new_state_dict[block_prefix + "attn1.to_q.bias"] = q_bias |
| | new_state_dict[block_prefix + "attn1.to_k.weight"] = k |
| | new_state_dict[block_prefix + "attn1.to_k.bias"] = k_bias |
| | new_state_dict[block_prefix + "attn1.to_v.weight"] = v |
| | new_state_dict[block_prefix + "attn1.to_v.bias"] = v_bias |
| |
|
| | new_state_dict[block_prefix + "attn1.to_out.0.weight"] = original_state_dict.pop( |
| | old_prefix + "attention.dense.weight" |
| | ) |
| | new_state_dict[block_prefix + "attn1.to_out.0.bias"] = original_state_dict.pop( |
| | old_prefix + "attention.dense.bias" |
| | ) |
| |
|
| | new_state_dict[block_prefix + "ff.net.0.proj.weight"] = original_state_dict.pop( |
| | old_prefix + "mlp.dense_h_to_4h.weight" |
| | ) |
| | new_state_dict[block_prefix + "ff.net.0.proj.bias"] = original_state_dict.pop( |
| | old_prefix + "mlp.dense_h_to_4h.bias" |
| | ) |
| | new_state_dict[block_prefix + "ff.net.2.weight"] = original_state_dict.pop( |
| | old_prefix + "mlp.dense_4h_to_h.weight" |
| | ) |
| | new_state_dict[block_prefix + "ff.net.2.bias"] = original_state_dict.pop(old_prefix + "mlp.dense_4h_to_h.bias") |
| |
|
| | |
| | new_state_dict["norm_out.linear.weight"] = swap_scale_shift( |
| | original_state_dict.pop("mixins.final_layer.adaln.1.weight"), dim=0 |
| | ) |
| | new_state_dict["norm_out.linear.bias"] = swap_scale_shift( |
| | original_state_dict.pop("mixins.final_layer.adaln.1.bias"), dim=0 |
| | ) |
| | new_state_dict["proj_out.weight"] = original_state_dict.pop("mixins.final_layer.linear.weight") |
| | new_state_dict["proj_out.bias"] = original_state_dict.pop("mixins.final_layer.linear.bias") |
| |
|
| | return new_state_dict |
| |
|
| |
|
| | def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config): |
| | original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"] |
| | return convert_ldm_vae_checkpoint(original_state_dict, vae_config) |
| |
|
| |
|
| | def main(args): |
| | if args.dtype == "fp16": |
| | dtype = torch.float16 |
| | elif args.dtype == "bf16": |
| | dtype = torch.bfloat16 |
| | elif args.dtype == "fp32": |
| | dtype = torch.float32 |
| | else: |
| | raise ValueError(f"Unsupported dtype: {args.dtype}") |
| |
|
| | transformer = None |
| | vae = None |
| |
|
| | if args.transformer_checkpoint_path is not None: |
| | converted_transformer_state_dict = convert_cogview4_transformer_checkpoint_to_diffusers( |
| | args.transformer_checkpoint_path |
| | ) |
| | transformer = CogView4Transformer2DModel( |
| | patch_size=2, |
| | in_channels=16, |
| | num_layers=28, |
| | attention_head_dim=128, |
| | num_attention_heads=32, |
| | out_channels=16, |
| | text_embed_dim=4096, |
| | time_embed_dim=512, |
| | condition_dim=256, |
| | pos_embed_max_size=128, |
| | ) |
| | transformer.load_state_dict(converted_transformer_state_dict, strict=True) |
| | if dtype is not None: |
| | |
| | transformer = transformer.to(dtype=dtype) |
| |
|
| | if args.vae_checkpoint_path is not None: |
| | vae_config = { |
| | "in_channels": 3, |
| | "out_channels": 3, |
| | "down_block_types": ("DownEncoderBlock2D",) * 4, |
| | "up_block_types": ("UpDecoderBlock2D",) * 4, |
| | "block_out_channels": (128, 512, 1024, 1024), |
| | "layers_per_block": 3, |
| | "act_fn": "silu", |
| | "latent_channels": 16, |
| | "norm_num_groups": 32, |
| | "sample_size": 1024, |
| | "scaling_factor": 1.0, |
| | "shift_factor": 0.0, |
| | "force_upcast": True, |
| | "use_quant_conv": False, |
| | "use_post_quant_conv": False, |
| | "mid_block_add_attention": False, |
| | } |
| | converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config) |
| | vae = AutoencoderKL(**vae_config) |
| | vae.load_state_dict(converted_vae_state_dict, strict=True) |
| | if dtype is not None: |
| | vae = vae.to(dtype=dtype) |
| |
|
| | text_encoder_id = "THUDM/glm-4-9b-hf" |
| | tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id) |
| | text_encoder = GlmForCausalLM.from_pretrained( |
| | text_encoder_id, |
| | cache_dir=args.text_encoder_cache_dir, |
| | torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32, |
| | ) |
| |
|
| | for param in text_encoder.parameters(): |
| | param.data = param.data.contiguous() |
| |
|
| | scheduler = FlowMatchEulerDiscreteScheduler( |
| | base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear" |
| | ) |
| |
|
| | pipe = CogView4Pipeline( |
| | tokenizer=tokenizer, |
| | text_encoder=text_encoder, |
| | vae=vae, |
| | transformer=transformer, |
| | scheduler=scheduler, |
| | ) |
| |
|
| | |
| | |
| | pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main(args) |
| |
|