| import argparse |
| import re |
|
|
| import torch |
| import yaml |
| from transformers import ( |
| CLIPProcessor, |
| CLIPTextModel, |
| CLIPTokenizer, |
| CLIPVisionModelWithProjection, |
| ) |
|
|
| from diffusers import ( |
| AutoencoderKL, |
| DDIMScheduler, |
| StableDiffusionGLIGENPipeline, |
| StableDiffusionGLIGENTextImagePipeline, |
| UNet2DConditionModel, |
| ) |
| from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( |
| assign_to_checkpoint, |
| conv_attn_to_linear, |
| protected, |
| renew_attention_paths, |
| renew_resnet_paths, |
| renew_vae_attention_paths, |
| renew_vae_resnet_paths, |
| shave_segments, |
| textenc_conversion_map, |
| textenc_pattern, |
| ) |
|
|
|
|
| def convert_open_clip_checkpoint(checkpoint): |
| checkpoint = checkpoint["text_encoder"] |
| text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
|
|
| keys = list(checkpoint.keys()) |
|
|
| text_model_dict = {} |
|
|
| if "cond_stage_model.model.text_projection" in checkpoint: |
| d_model = int(checkpoint["cond_stage_model.model.text_projection"].shape[0]) |
| else: |
| d_model = 1024 |
|
|
| for key in keys: |
| if "resblocks.23" in key: |
| continue |
| if key in textenc_conversion_map: |
| text_model_dict[textenc_conversion_map[key]] = checkpoint[key] |
| |
| new_key = key[len("transformer.") :] |
| if new_key.endswith(".in_proj_weight"): |
| new_key = new_key[: -len(".in_proj_weight")] |
| new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) |
| text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] |
| text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] |
| text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] |
| elif new_key.endswith(".in_proj_bias"): |
| new_key = new_key[: -len(".in_proj_bias")] |
| new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) |
| text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] |
| text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] |
| text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] |
| else: |
| if key != "transformer.text_model.embeddings.position_ids": |
| new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) |
|
|
| text_model_dict[new_key] = checkpoint[key] |
|
|
| if key == "transformer.text_model.embeddings.token_embedding.weight": |
| text_model_dict["text_model.embeddings.token_embedding.weight"] = checkpoint[key] |
|
|
| text_model_dict.pop("text_model.embeddings.transformer.text_model.embeddings.token_embedding.weight") |
|
|
| text_model.load_state_dict(text_model_dict) |
|
|
| return text_model |
|
|
|
|
| def convert_gligen_vae_checkpoint(checkpoint, config): |
| checkpoint = checkpoint["autoencoder"] |
| vae_state_dict = {} |
| vae_key = "first_stage_model." |
| keys = list(checkpoint.keys()) |
| for key in keys: |
| vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) |
|
|
| new_checkpoint = {} |
|
|
| new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] |
| new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] |
| new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] |
| new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] |
| new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] |
| new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] |
|
|
| new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] |
| new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] |
| new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] |
| new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] |
| new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] |
| new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] |
|
|
| new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] |
| new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] |
| new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] |
| new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] |
|
|
| |
| num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) |
| down_blocks = { |
| layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) |
| } |
|
|
| |
| num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) |
| up_blocks = { |
| layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) |
| } |
|
|
| for i in range(num_down_blocks): |
| resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] |
|
|
| if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: |
| new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( |
| f"encoder.down.{i}.downsample.conv.weight" |
| ) |
| new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( |
| f"encoder.down.{i}.downsample.conv.bias" |
| ) |
|
|
| paths = renew_vae_resnet_paths(resnets) |
| meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} |
| assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) |
|
|
| mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] |
| num_mid_res_blocks = 2 |
| for i in range(1, num_mid_res_blocks + 1): |
| resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] |
|
|
| paths = renew_vae_resnet_paths(resnets) |
| meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} |
| assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) |
|
|
| mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] |
| paths = renew_vae_attention_paths(mid_attentions) |
| meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} |
| assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) |
| conv_attn_to_linear(new_checkpoint) |
|
|
| for i in range(num_up_blocks): |
| block_id = num_up_blocks - 1 - i |
| resnets = [ |
| key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key |
| ] |
|
|
| if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: |
| new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ |
| f"decoder.up.{block_id}.upsample.conv.weight" |
| ] |
| new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ |
| f"decoder.up.{block_id}.upsample.conv.bias" |
| ] |
|
|
| paths = renew_vae_resnet_paths(resnets) |
| meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} |
| assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) |
|
|
| mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] |
| num_mid_res_blocks = 2 |
| for i in range(1, num_mid_res_blocks + 1): |
| resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] |
|
|
| paths = renew_vae_resnet_paths(resnets) |
| meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} |
| assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) |
|
|
| mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] |
| paths = renew_vae_attention_paths(mid_attentions) |
| meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} |
| assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) |
| conv_attn_to_linear(new_checkpoint) |
|
|
| for key in new_checkpoint.keys(): |
| if "encoder.mid_block.attentions.0" in key or "decoder.mid_block.attentions.0" in key: |
| if "query" in key: |
| new_checkpoint[key.replace("query", "to_q")] = new_checkpoint.pop(key) |
| if "value" in key: |
| new_checkpoint[key.replace("value", "to_v")] = new_checkpoint.pop(key) |
| if "key" in key: |
| new_checkpoint[key.replace("key", "to_k")] = new_checkpoint.pop(key) |
| if "proj_attn" in key: |
| new_checkpoint[key.replace("proj_attn", "to_out.0")] = new_checkpoint.pop(key) |
|
|
| return new_checkpoint |
|
|
|
|
| def convert_gligen_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): |
| unet_state_dict = {} |
| checkpoint = checkpoint["model"] |
| keys = list(checkpoint.keys()) |
|
|
| unet_key = "model.diffusion_model." |
|
|
| if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: |
| print(f"Checkpoint {path} has bot EMA and non-EMA weights.") |
| print( |
| "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" |
| " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." |
| ) |
| for key in keys: |
| if key.startswith("model.diffusion_model"): |
| flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) |
| unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) |
| else: |
| if sum(k.startswith("model_ema") for k in keys) > 100: |
| print( |
| "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" |
| " weights (usually better for inference), please make sure to add the `--extract_ema` flag." |
| ) |
| for key in keys: |
| unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) |
|
|
| new_checkpoint = {} |
|
|
| new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] |
| new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] |
| new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] |
| new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] |
|
|
| new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] |
| new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] |
|
|
| new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] |
| new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] |
| new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] |
| new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] |
|
|
| |
| num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) |
| input_blocks = { |
| layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] |
| for layer_id in range(num_input_blocks) |
| } |
|
|
| |
| num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) |
| middle_blocks = { |
| layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] |
| for layer_id in range(num_middle_blocks) |
| } |
|
|
| |
| num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) |
| output_blocks = { |
| layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] |
| for layer_id in range(num_output_blocks) |
| } |
|
|
| for i in range(1, num_input_blocks): |
| block_id = (i - 1) // (config["layers_per_block"] + 1) |
| layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) |
|
|
| resnets = [ |
| key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key |
| ] |
| attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] |
|
|
| if f"input_blocks.{i}.0.op.weight" in unet_state_dict: |
| new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( |
| f"input_blocks.{i}.0.op.weight" |
| ) |
| new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( |
| f"input_blocks.{i}.0.op.bias" |
| ) |
|
|
| paths = renew_resnet_paths(resnets) |
| meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} |
| assign_to_checkpoint( |
| paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config |
| ) |
|
|
| if len(attentions): |
| paths = renew_attention_paths(attentions) |
| meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} |
| assign_to_checkpoint( |
| paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config |
| ) |
|
|
| resnet_0 = middle_blocks[0] |
| attentions = middle_blocks[1] |
| resnet_1 = middle_blocks[2] |
|
|
| resnet_0_paths = renew_resnet_paths(resnet_0) |
| assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) |
|
|
| resnet_1_paths = renew_resnet_paths(resnet_1) |
| assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) |
|
|
| attentions_paths = renew_attention_paths(attentions) |
| meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} |
| assign_to_checkpoint( |
| attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config |
| ) |
|
|
| for i in range(num_output_blocks): |
| block_id = i // (config["layers_per_block"] + 1) |
| layer_in_block_id = i % (config["layers_per_block"] + 1) |
| output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] |
| output_block_list = {} |
|
|
| for layer in output_block_layers: |
| layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) |
| if layer_id in output_block_list: |
| output_block_list[layer_id].append(layer_name) |
| else: |
| output_block_list[layer_id] = [layer_name] |
|
|
| if len(output_block_list) > 1: |
| resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] |
| attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] |
|
|
| resnet_0_paths = renew_resnet_paths(resnets) |
| paths = renew_resnet_paths(resnets) |
|
|
| meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} |
| assign_to_checkpoint( |
| paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config |
| ) |
|
|
| output_block_list = {k: sorted(v) for k, v in output_block_list.items()} |
| if ["conv.bias", "conv.weight"] in output_block_list.values(): |
| index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) |
| new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ |
| f"output_blocks.{i}.{index}.conv.weight" |
| ] |
| new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ |
| f"output_blocks.{i}.{index}.conv.bias" |
| ] |
|
|
| |
| if len(attentions) == 2: |
| attentions = [] |
|
|
| if len(attentions): |
| paths = renew_attention_paths(attentions) |
| meta_path = { |
| "old": f"output_blocks.{i}.1", |
| "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", |
| } |
| assign_to_checkpoint( |
| paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config |
| ) |
| else: |
| resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) |
| for path in resnet_0_paths: |
| old_path = ".".join(["output_blocks", str(i), path["old"]]) |
| new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) |
|
|
| new_checkpoint[new_path] = unet_state_dict[old_path] |
|
|
| for key in keys: |
| if "position_net" in key: |
| new_checkpoint[key] = unet_state_dict[key] |
|
|
| return new_checkpoint |
|
|
|
|
| def create_vae_config(original_config, image_size: int): |
| vae_params = original_config["autoencoder"]["params"]["ddconfig"] |
| _ = original_config["autoencoder"]["params"]["embed_dim"] |
|
|
| block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] |
| down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) |
| up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) |
|
|
| config = { |
| "sample_size": image_size, |
| "in_channels": vae_params["in_channels"], |
| "out_channels": vae_params["out_ch"], |
| "down_block_types": tuple(down_block_types), |
| "up_block_types": tuple(up_block_types), |
| "block_out_channels": tuple(block_out_channels), |
| "latent_channels": vae_params["z_channels"], |
| "layers_per_block": vae_params["num_res_blocks"], |
| } |
|
|
| return config |
|
|
|
|
| def create_unet_config(original_config, image_size: int, attention_type): |
| unet_params = original_config["model"]["params"] |
| vae_params = original_config["autoencoder"]["params"]["ddconfig"] |
|
|
| block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] |
|
|
| down_block_types = [] |
| resolution = 1 |
| for i in range(len(block_out_channels)): |
| block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" |
| down_block_types.append(block_type) |
| if i != len(block_out_channels) - 1: |
| resolution *= 2 |
|
|
| up_block_types = [] |
| for i in range(len(block_out_channels)): |
| block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" |
| up_block_types.append(block_type) |
| resolution //= 2 |
|
|
| vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) |
|
|
| head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None |
| use_linear_projection = ( |
| unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False |
| ) |
| if use_linear_projection: |
| if head_dim is None: |
| head_dim = [5, 10, 20, 20] |
|
|
| config = { |
| "sample_size": image_size // vae_scale_factor, |
| "in_channels": unet_params["in_channels"], |
| "down_block_types": tuple(down_block_types), |
| "block_out_channels": tuple(block_out_channels), |
| "layers_per_block": unet_params["num_res_blocks"], |
| "cross_attention_dim": unet_params["context_dim"], |
| "attention_head_dim": head_dim, |
| "use_linear_projection": use_linear_projection, |
| "attention_type": attention_type, |
| } |
|
|
| return config |
|
|
|
|
| def convert_gligen_to_diffusers( |
| checkpoint_path: str, |
| original_config_file: str, |
| attention_type: str, |
| image_size: int = 512, |
| extract_ema: bool = False, |
| num_in_channels: int = None, |
| device: str = None, |
| ): |
| if device is None: |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| checkpoint = torch.load(checkpoint_path, map_location=device) |
| else: |
| checkpoint = torch.load(checkpoint_path, map_location=device) |
|
|
| if "global_step" in checkpoint: |
| checkpoint["global_step"] |
| else: |
| print("global_step key not found in model") |
|
|
| original_config = yaml.safe_load(original_config_file) |
|
|
| if num_in_channels is not None: |
| original_config["model"]["params"]["in_channels"] = num_in_channels |
|
|
| num_train_timesteps = original_config["diffusion"]["params"]["timesteps"] |
| beta_start = original_config["diffusion"]["params"]["linear_start"] |
| beta_end = original_config["diffusion"]["params"]["linear_end"] |
|
|
| scheduler = DDIMScheduler( |
| beta_end=beta_end, |
| beta_schedule="scaled_linear", |
| beta_start=beta_start, |
| num_train_timesteps=num_train_timesteps, |
| steps_offset=1, |
| clip_sample=False, |
| set_alpha_to_one=False, |
| prediction_type="epsilon", |
| ) |
|
|
| |
| unet_config = create_unet_config(original_config, image_size, attention_type) |
| unet = UNet2DConditionModel(**unet_config) |
|
|
| converted_unet_checkpoint = convert_gligen_unet_checkpoint( |
| checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema |
| ) |
|
|
| unet.load_state_dict(converted_unet_checkpoint) |
|
|
| |
| vae_config = create_vae_config(original_config, image_size) |
| converted_vae_checkpoint = convert_gligen_vae_checkpoint(checkpoint, vae_config) |
|
|
| vae = AutoencoderKL(**vae_config) |
| vae.load_state_dict(converted_vae_checkpoint) |
|
|
| |
| text_encoder = convert_open_clip_checkpoint(checkpoint) |
| tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
|
|
| if attention_type == "gated-text-image": |
| image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") |
| processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") |
|
|
| pipe = StableDiffusionGLIGENTextImagePipeline( |
| vae=vae, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| image_encoder=image_encoder, |
| processor=processor, |
| unet=unet, |
| scheduler=scheduler, |
| safety_checker=None, |
| feature_extractor=None, |
| ) |
| elif attention_type == "gated": |
| pipe = StableDiffusionGLIGENPipeline( |
| vae=vae, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| unet=unet, |
| scheduler=scheduler, |
| safety_checker=None, |
| feature_extractor=None, |
| ) |
|
|
| return pipe |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
|
|
| parser.add_argument( |
| "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." |
| ) |
| parser.add_argument( |
| "--original_config_file", |
| default=None, |
| type=str, |
| required=True, |
| help="The YAML config file corresponding to the gligen architecture.", |
| ) |
| parser.add_argument( |
| "--num_in_channels", |
| default=None, |
| type=int, |
| help="The number of input channels. If `None` number of input channels will be automatically inferred.", |
| ) |
| parser.add_argument( |
| "--extract_ema", |
| action="store_true", |
| help=( |
| "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" |
| " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" |
| " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." |
| ), |
| ) |
| parser.add_argument( |
| "--attention_type", |
| default=None, |
| type=str, |
| required=True, |
| help="Type of attention ex: gated or gated-text-image", |
| ) |
| parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") |
| parser.add_argument("--device", type=str, help="Device to use.") |
| parser.add_argument("--half", action="store_true", help="Save weights in half precision.") |
|
|
| args = parser.parse_args() |
|
|
| pipe = convert_gligen_to_diffusers( |
| checkpoint_path=args.checkpoint_path, |
| original_config_file=args.original_config_file, |
| attention_type=args.attention_type, |
| extract_ema=args.extract_ema, |
| num_in_channels=args.num_in_channels, |
| device=args.device, |
| ) |
|
|
| if args.half: |
| pipe.to(dtype=torch.float16) |
|
|
| pipe.save_pretrained(args.dump_path) |
|
|