| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import argparse |
| | import copy |
| | import logging |
| | import math |
| | import os |
| | import random |
| | import shutil |
| | from contextlib import nullcontext |
| | from pathlib import Path |
| |
|
| | import accelerate |
| | import numpy as np |
| | import torch |
| | import transformers |
| | from accelerate import Accelerator |
| | from accelerate.logging import get_logger |
| | from accelerate.utils import DistributedType, ProjectConfiguration, set_seed |
| | from datasets import load_dataset |
| | from huggingface_hub import create_repo, upload_folder |
| | from packaging import version |
| | from PIL import Image |
| | from torchvision import transforms |
| | from tqdm.auto import tqdm |
| |
|
| | import diffusers |
| | from diffusers import ( |
| | AutoencoderKL, |
| | CogView4ControlPipeline, |
| | CogView4Transformer2DModel, |
| | FlowMatchEulerDiscreteScheduler, |
| | ) |
| | from diffusers.optimization import get_scheduler |
| | from diffusers.training_utils import ( |
| | compute_density_for_timestep_sampling, |
| | compute_loss_weighting_for_sd3, |
| | free_memory, |
| | ) |
| | from diffusers.utils import check_min_version, is_wandb_available, load_image, make_image_grid |
| | from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card |
| | from diffusers.utils.torch_utils import is_compiled_module |
| |
|
| |
|
| | if is_wandb_available(): |
| | import wandb |
| |
|
| | |
| | check_min_version("0.37.0.dev0") |
| |
|
| | logger = get_logger(__name__) |
| |
|
| | NORM_LAYER_PREFIXES = ["norm_q", "norm_k", "norm_added_q", "norm_added_k"] |
| |
|
| |
|
| | def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype): |
| | pixel_latents = vae.encode(pixels.to(vae.dtype)).latent_dist.sample() |
| | pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor |
| | return pixel_latents.to(weight_dtype) |
| |
|
| |
|
| | def log_validation(cogview4_transformer, args, accelerator, weight_dtype, step, is_final_validation=False): |
| | logger.info("Running validation... ") |
| |
|
| | if not is_final_validation: |
| | cogview4_transformer = accelerator.unwrap_model(cogview4_transformer) |
| | pipeline = CogView4ControlPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | transformer=cogview4_transformer, |
| | torch_dtype=weight_dtype, |
| | ) |
| | else: |
| | transformer = CogView4Transformer2DModel.from_pretrained(args.output_dir, torch_dtype=weight_dtype) |
| | pipeline = CogView4ControlPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | transformer=transformer, |
| | torch_dtype=weight_dtype, |
| | ) |
| |
|
| | pipeline.to(accelerator.device) |
| | pipeline.set_progress_bar_config(disable=True) |
| |
|
| | if args.seed is None: |
| | generator = None |
| | else: |
| | generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| |
|
| | if len(args.validation_image) == len(args.validation_prompt): |
| | validation_images = args.validation_image |
| | validation_prompts = args.validation_prompt |
| | elif len(args.validation_image) == 1: |
| | validation_images = args.validation_image * len(args.validation_prompt) |
| | validation_prompts = args.validation_prompt |
| | elif len(args.validation_prompt) == 1: |
| | validation_images = args.validation_image |
| | validation_prompts = args.validation_prompt * len(args.validation_image) |
| | else: |
| | raise ValueError( |
| | "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" |
| | ) |
| |
|
| | image_logs = [] |
| | if is_final_validation or torch.backends.mps.is_available(): |
| | autocast_ctx = nullcontext() |
| | else: |
| | autocast_ctx = torch.autocast(accelerator.device.type, weight_dtype) |
| |
|
| | for validation_prompt, validation_image in zip(validation_prompts, validation_images): |
| | validation_image = load_image(validation_image) |
| | |
| | validation_image = validation_image.resize((args.resolution, args.resolution)) |
| |
|
| | images = [] |
| |
|
| | for _ in range(args.num_validation_images): |
| | with autocast_ctx: |
| | image = pipeline( |
| | prompt=validation_prompt, |
| | control_image=validation_image, |
| | num_inference_steps=50, |
| | guidance_scale=args.guidance_scale, |
| | max_sequence_length=args.max_sequence_length, |
| | generator=generator, |
| | height=args.resolution, |
| | width=args.resolution, |
| | ).images[0] |
| | image = image.resize((args.resolution, args.resolution)) |
| | images.append(image) |
| | image_logs.append( |
| | {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} |
| | ) |
| |
|
| | tracker_key = "test" if is_final_validation else "validation" |
| | for tracker in accelerator.trackers: |
| | if tracker.name == "tensorboard": |
| | for log in image_logs: |
| | images = log["images"] |
| | validation_prompt = log["validation_prompt"] |
| | validation_image = log["validation_image"] |
| | formatted_images = [] |
| | formatted_images.append(np.asarray(validation_image)) |
| | for image in images: |
| | formatted_images.append(np.asarray(image)) |
| | formatted_images = np.stack(formatted_images) |
| | tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") |
| |
|
| | elif tracker.name == "wandb": |
| | formatted_images = [] |
| | for log in image_logs: |
| | images = log["images"] |
| | validation_prompt = log["validation_prompt"] |
| | validation_image = log["validation_image"] |
| | formatted_images.append(wandb.Image(validation_image, caption="Conditioning")) |
| | for image in images: |
| | image = wandb.Image(image, caption=validation_prompt) |
| | formatted_images.append(image) |
| |
|
| | tracker.log({tracker_key: formatted_images}) |
| | else: |
| | logger.warning(f"image logging not implemented for {tracker.name}") |
| |
|
| | del pipeline |
| | free_memory() |
| | return image_logs |
| |
|
| |
|
| | def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None): |
| | img_str = "" |
| | if image_logs is not None: |
| | img_str = "You can find some example images below.\n\n" |
| | for i, log in enumerate(image_logs): |
| | images = log["images"] |
| | validation_prompt = log["validation_prompt"] |
| | validation_image = log["validation_image"] |
| | validation_image.save(os.path.join(repo_folder, "image_control.png")) |
| | img_str += f"prompt: {validation_prompt}\n" |
| | images = [validation_image] + images |
| | make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) |
| | img_str += f"\n" |
| |
|
| | model_description = f""" |
| | # cogview4-control-{repo_id} |
| | |
| | These are Control weights trained on {base_model} with new type of conditioning. |
| | {img_str} |
| | |
| | ## License |
| | |
| | Please adhere to the licensing terms as described [here](https://huggingface.co/THUDM/CogView4-6b/blob/main/LICENSE.md) |
| | """ |
| |
|
| | model_card = load_or_create_model_card( |
| | repo_id_or_path=repo_id, |
| | from_training=True, |
| | license="other", |
| | base_model=base_model, |
| | model_description=model_description, |
| | inference=True, |
| | ) |
| |
|
| | tags = [ |
| | "cogview4", |
| | "cogview4-diffusers", |
| | "text-to-image", |
| | "diffusers", |
| | "control", |
| | "diffusers-training", |
| | ] |
| | model_card = populate_model_card(model_card, tags=tags) |
| |
|
| | model_card.save(os.path.join(repo_folder, "README.md")) |
| |
|
| |
|
| | def parse_args(input_args=None): |
| | parser = argparse.ArgumentParser(description="Simple example of a CogView4 Control training script.") |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--variant", |
| | type=str, |
| | default=None, |
| | help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="Revision of pretrained model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="cogview4-control", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument( |
| | "--cache_dir", |
| | type=str, |
| | default=None, |
| | help="The directory where the downloaded models and datasets will be stored.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--resolution", |
| | type=int, |
| | default=1024, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--max_sequence_length", type=int, default=128, help="The maximum sequence length for the prompt." |
| | ) |
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=1) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=None, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--checkpointing_steps", |
| | type=int, |
| | default=500, |
| | help=( |
| | "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " |
| | "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." |
| | "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." |
| | "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" |
| | "instructions." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--checkpoints_total_limit", |
| | type=int, |
| | default=None, |
| | help=("Max number of checkpoints to store."), |
| | ) |
| | parser.add_argument( |
| | "--resume_from_checkpoint", |
| | type=str, |
| | default=None, |
| | help=( |
| | "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--proportion_empty_prompts", |
| | type=float, |
| | default=0, |
| | help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", |
| | ) |
| | parser.add_argument( |
| | "--gradient_accumulation_steps", |
| | type=int, |
| | default=1, |
| | help="Number of updates steps to accumulate before performing a backward/update pass.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_checkpointing", |
| | action="store_true", |
| | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=5e-6, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=False, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument( |
| | "--lr_num_cycles", |
| | type=int, |
| | default=1, |
| | help="Number of hard resets of the lr in cosine_with_restarts scheduler.", |
| | ) |
| | parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") |
| | parser.add_argument( |
| | "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." |
| | ) |
| |
|
| | parser.add_argument( |
| | "--dataloader_num_workers", |
| | type=int, |
| | default=0, |
| | help=( |
| | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." |
| | ), |
| | ) |
| | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--allow_tf32", |
| | action="store_true", |
| | help=( |
| | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--report_to", |
| | type=str, |
| | default="tensorboard", |
| | help=( |
| | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default=None, |
| | choices=["no", "fp16", "bf16"], |
| | help=( |
| | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
| | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--dataset_name", |
| | type=str, |
| | default=None, |
| | help=( |
| | "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," |
| | " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," |
| | " or to a folder containing files that 🤗 Datasets can understand." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--dataset_config_name", |
| | type=str, |
| | default=None, |
| | help="The config of the Dataset, leave as None if there's only one config.", |
| | ) |
| | parser.add_argument( |
| | "--image_column", type=str, default="image", help="The column of the dataset containing the target image." |
| | ) |
| | parser.add_argument( |
| | "--conditioning_image_column", |
| | type=str, |
| | default="conditioning_image", |
| | help="The column of the dataset containing the control conditioning image.", |
| | ) |
| | parser.add_argument( |
| | "--caption_column", |
| | type=str, |
| | default="text", |
| | help="The column of the dataset containing a caption or a list of captions.", |
| | ) |
| | parser.add_argument("--log_dataset_samples", action="store_true", help="Whether to log somple dataset samples.") |
| | parser.add_argument( |
| | "--max_train_samples", |
| | type=int, |
| | default=None, |
| | help=( |
| | "For debugging purposes or quicker training, truncate the number of training examples to this " |
| | "value if set." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--validation_prompt", |
| | type=str, |
| | default=None, |
| | nargs="+", |
| | help=( |
| | "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." |
| | " Provide either a matching number of `--validation_image`s, a single `--validation_image`" |
| | " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--validation_image", |
| | type=str, |
| | default=None, |
| | nargs="+", |
| | help=( |
| | "A set of paths to the control conditioning image be evaluated every `--validation_steps`" |
| | " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" |
| | " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" |
| | " `--validation_image` that will be used with all `--validation_prompt`s." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--num_validation_images", |
| | type=int, |
| | default=1, |
| | help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", |
| | ) |
| | parser.add_argument( |
| | "--validation_steps", |
| | type=int, |
| | default=100, |
| | help=( |
| | "Run validation every X steps. Validation consists of running the prompt" |
| | " `args.validation_prompt` multiple times: `args.num_validation_images`" |
| | " and logging the images." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--tracker_project_name", |
| | type=str, |
| | default="cogview4_train_control", |
| | help=( |
| | "The `project_name` argument passed to Accelerator.init_trackers for" |
| | " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--jsonl_for_train", |
| | type=str, |
| | default=None, |
| | help="Path to the jsonl file containing the training data.", |
| | ) |
| | parser.add_argument( |
| | "--only_target_transformer_blocks", |
| | action="store_true", |
| | help="If we should only target the transformer blocks to train along with the input layer (`x_embedder`).", |
| | ) |
| | parser.add_argument( |
| | "--guidance_scale", |
| | type=float, |
| | default=3.5, |
| | help="the guidance scale used for transformer.", |
| | ) |
| |
|
| | parser.add_argument( |
| | "--upcast_before_saving", |
| | action="store_true", |
| | help=( |
| | "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " |
| | "Defaults to precision dtype used for training to save memory" |
| | ), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--weighting_scheme", |
| | type=str, |
| | default="none", |
| | choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], |
| | help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), |
| | ) |
| | parser.add_argument( |
| | "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." |
| | ) |
| | parser.add_argument( |
| | "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." |
| | ) |
| | parser.add_argument( |
| | "--mode_scale", |
| | type=float, |
| | default=1.29, |
| | help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", |
| | ) |
| | parser.add_argument( |
| | "--offload", |
| | action="store_true", |
| | help="Whether to offload the VAE and the text encoders to CPU when they are not used.", |
| | ) |
| |
|
| | if input_args is not None: |
| | args = parser.parse_args(input_args) |
| | else: |
| | args = parser.parse_args() |
| |
|
| | if args.dataset_name is None and args.jsonl_for_train is None: |
| | raise ValueError("Specify either `--dataset_name` or `--jsonl_for_train`") |
| |
|
| | if args.dataset_name is not None and args.jsonl_for_train is not None: |
| | raise ValueError("Specify only one of `--dataset_name` or `--jsonl_for_train`") |
| |
|
| | if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: |
| | raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") |
| |
|
| | if args.validation_prompt is not None and args.validation_image is None: |
| | raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") |
| |
|
| | if args.validation_prompt is None and args.validation_image is not None: |
| | raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") |
| |
|
| | if ( |
| | args.validation_image is not None |
| | and args.validation_prompt is not None |
| | and len(args.validation_image) != 1 |
| | and len(args.validation_prompt) != 1 |
| | and len(args.validation_image) != len(args.validation_prompt) |
| | ): |
| | raise ValueError( |
| | "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," |
| | " or the same number of `--validation_prompt`s and `--validation_image`s" |
| | ) |
| |
|
| | if args.resolution % 8 != 0: |
| | raise ValueError( |
| | "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the cogview4 transformer." |
| | ) |
| |
|
| | return args |
| |
|
| |
|
| | def get_train_dataset(args, accelerator): |
| | dataset = None |
| | if args.dataset_name is not None: |
| | |
| | dataset = load_dataset( |
| | args.dataset_name, |
| | args.dataset_config_name, |
| | cache_dir=args.cache_dir, |
| | ) |
| | if args.jsonl_for_train is not None: |
| | |
| | dataset = load_dataset("json", data_files=args.jsonl_for_train, cache_dir=args.cache_dir) |
| | dataset = dataset.flatten_indices() |
| | |
| | |
| | column_names = dataset["train"].column_names |
| |
|
| | |
| | if args.image_column is None: |
| | image_column = column_names[0] |
| | logger.info(f"image column defaulting to {image_column}") |
| | else: |
| | image_column = args.image_column |
| | if image_column not in column_names: |
| | raise ValueError( |
| | f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| |
|
| | if args.caption_column is None: |
| | caption_column = column_names[1] |
| | logger.info(f"caption column defaulting to {caption_column}") |
| | else: |
| | caption_column = args.caption_column |
| | if caption_column not in column_names: |
| | raise ValueError( |
| | f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| |
|
| | if args.conditioning_image_column is None: |
| | conditioning_image_column = column_names[2] |
| | logger.info(f"conditioning image column defaulting to {conditioning_image_column}") |
| | else: |
| | conditioning_image_column = args.conditioning_image_column |
| | if conditioning_image_column not in column_names: |
| | raise ValueError( |
| | f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| |
|
| | with accelerator.main_process_first(): |
| | train_dataset = dataset["train"].shuffle(seed=args.seed) |
| | if args.max_train_samples is not None: |
| | train_dataset = train_dataset.select(range(args.max_train_samples)) |
| | return train_dataset |
| |
|
| |
|
| | def prepare_train_dataset(dataset, accelerator): |
| | image_transforms = transforms.Compose( |
| | [ |
| | transforms.Resize((args.resolution, args.resolution), interpolation=transforms.InterpolationMode.BILINEAR), |
| | transforms.ToTensor(), |
| | transforms.Lambda(lambda x: x * 2 - 1), |
| | ] |
| | ) |
| |
|
| | def preprocess_train(examples): |
| | images = [ |
| | (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) |
| | for image in examples[args.image_column] |
| | ] |
| | images = [image_transforms(image) for image in images] |
| |
|
| | conditioning_images = [ |
| | (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) |
| | for image in examples[args.conditioning_image_column] |
| | ] |
| | conditioning_images = [image_transforms(image) for image in conditioning_images] |
| | examples["pixel_values"] = images |
| | examples["conditioning_pixel_values"] = conditioning_images |
| |
|
| | is_caption_list = isinstance(examples[args.caption_column][0], list) |
| | if is_caption_list: |
| | examples["captions"] = [max(example, key=len) for example in examples[args.caption_column]] |
| | else: |
| | examples["captions"] = list(examples[args.caption_column]) |
| |
|
| | return examples |
| |
|
| | with accelerator.main_process_first(): |
| | dataset = dataset.with_transform(preprocess_train) |
| |
|
| | return dataset |
| |
|
| |
|
| | def collate_fn(examples): |
| | pixel_values = torch.stack([example["pixel_values"] for example in examples]) |
| | pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() |
| | conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) |
| | conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() |
| | captions = [example["captions"] for example in examples] |
| | return {"pixel_values": pixel_values, "conditioning_pixel_values": conditioning_pixel_values, "captions": captions} |
| |
|
| |
|
| | def main(args): |
| | if args.report_to == "wandb" and args.hub_token is not None: |
| | raise ValueError( |
| | "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| | " Please use `hf auth login` to authenticate with the Hub." |
| | ) |
| |
|
| | logging_out_dir = Path(args.output_dir, args.logging_dir) |
| |
|
| | if torch.backends.mps.is_available() and args.mixed_precision == "bf16": |
| | |
| | raise ValueError( |
| | "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." |
| | ) |
| |
|
| | accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=str(logging_out_dir)) |
| |
|
| | accelerator = Accelerator( |
| | gradient_accumulation_steps=args.gradient_accumulation_steps, |
| | mixed_precision=args.mixed_precision, |
| | log_with=args.report_to, |
| | project_config=accelerator_project_config, |
| | ) |
| |
|
| | |
| | if torch.backends.mps.is_available(): |
| | logger.info("MPS is enabled. Disabling AMP.") |
| | accelerator.native_amp = False |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | |
| | level=logging.INFO, |
| | ) |
| | logger.info(accelerator.state, main_process_only=False) |
| |
|
| | if accelerator.is_local_main_process: |
| | transformers.utils.logging.set_verbosity_warning() |
| | diffusers.utils.logging.set_verbosity_info() |
| | else: |
| | transformers.utils.logging.set_verbosity_error() |
| | diffusers.utils.logging.set_verbosity_error() |
| |
|
| | |
| | if args.seed is not None: |
| | set_seed(args.seed) |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | if args.push_to_hub: |
| | repo_id = create_repo( |
| | repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token |
| | ).repo_id |
| |
|
| | |
| | |
| | vae = AutoencoderKL.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="vae", |
| | revision=args.revision, |
| | variant=args.variant, |
| | ) |
| | cogview4_transformer = CogView4Transformer2DModel.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="transformer", |
| | revision=args.revision, |
| | variant=args.variant, |
| | ) |
| | logger.info("All models loaded successfully") |
| |
|
| | noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="scheduler", |
| | ) |
| | noise_scheduler_copy = copy.deepcopy(noise_scheduler) |
| | if not args.only_target_transformer_blocks: |
| | cogview4_transformer.requires_grad_(True) |
| | vae.requires_grad_(False) |
| |
|
| | |
| | weight_dtype = torch.float32 |
| | if accelerator.mixed_precision == "fp16": |
| | weight_dtype = torch.float16 |
| | elif accelerator.mixed_precision == "bf16": |
| | weight_dtype = torch.bfloat16 |
| |
|
| | |
| | vae.to(dtype=torch.float32) |
| |
|
| | |
| | with torch.no_grad(): |
| | patch_size = cogview4_transformer.config.patch_size |
| | initial_input_channels = cogview4_transformer.config.in_channels * patch_size**2 |
| | new_linear = torch.nn.Linear( |
| | cogview4_transformer.patch_embed.proj.in_features * 2, |
| | cogview4_transformer.patch_embed.proj.out_features, |
| | bias=cogview4_transformer.patch_embed.proj.bias is not None, |
| | dtype=cogview4_transformer.dtype, |
| | device=cogview4_transformer.device, |
| | ) |
| | new_linear.weight.zero_() |
| | new_linear.weight[:, :initial_input_channels].copy_(cogview4_transformer.patch_embed.proj.weight) |
| | if cogview4_transformer.patch_embed.proj.bias is not None: |
| | new_linear.bias.copy_(cogview4_transformer.patch_embed.proj.bias) |
| | cogview4_transformer.patch_embed.proj = new_linear |
| |
|
| | assert torch.all(cogview4_transformer.patch_embed.proj.weight[:, initial_input_channels:].data == 0) |
| | cogview4_transformer.register_to_config( |
| | in_channels=cogview4_transformer.config.in_channels * 2, out_channels=cogview4_transformer.config.in_channels |
| | ) |
| |
|
| | if args.only_target_transformer_blocks: |
| | cogview4_transformer.patch_embed.proj.requires_grad_(True) |
| | for name, module in cogview4_transformer.named_modules(): |
| | if "transformer_blocks" in name: |
| | module.requires_grad_(True) |
| | else: |
| | module.requirs_grad_(False) |
| |
|
| | def unwrap_model(model): |
| | model = accelerator.unwrap_model(model) |
| | model = model._orig_mod if is_compiled_module(model) else model |
| | return model |
| |
|
| | |
| | if version.parse(accelerate.__version__) >= version.parse("0.16.0"): |
| |
|
| | def save_model_hook(models, weights, output_dir): |
| | if accelerator.is_main_process: |
| | for model in models: |
| | if isinstance(unwrap_model(model), type(unwrap_model(cogview4_transformer))): |
| | model = unwrap_model(model) |
| | model.save_pretrained(os.path.join(output_dir, "transformer")) |
| | else: |
| | raise ValueError(f"unexpected save model: {model.__class__}") |
| |
|
| | |
| | if weights: |
| | weights.pop() |
| |
|
| | def load_model_hook(models, input_dir): |
| | transformer_ = None |
| |
|
| | if not accelerator.distributed_type == DistributedType.DEEPSPEED: |
| | while len(models) > 0: |
| | model = models.pop() |
| |
|
| | if isinstance(unwrap_model(model), type(unwrap_model(cogview4_transformer))): |
| | transformer_ = model |
| | else: |
| | raise ValueError(f"unexpected save model: {unwrap_model(model).__class__}") |
| |
|
| | else: |
| | transformer_ = CogView4Transformer2DModel.from_pretrained(input_dir, subfolder="transformer") |
| |
|
| | accelerator.register_save_state_pre_hook(save_model_hook) |
| | accelerator.register_load_state_pre_hook(load_model_hook) |
| |
|
| | if args.gradient_checkpointing: |
| | cogview4_transformer.enable_gradient_checkpointing() |
| |
|
| | |
| | |
| | if args.allow_tf32: |
| | torch.backends.cuda.matmul.allow_tf32 = True |
| |
|
| | if args.scale_lr: |
| | args.learning_rate = ( |
| | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| | ) |
| |
|
| | |
| | if args.use_8bit_adam: |
| | try: |
| | import bitsandbytes as bnb |
| | except ImportError: |
| | raise ImportError( |
| | "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." |
| | ) |
| |
|
| | optimizer_class = bnb.optim.AdamW8bit |
| | else: |
| | optimizer_class = torch.optim.AdamW |
| |
|
| | |
| | optimizer = optimizer_class( |
| | cogview4_transformer.parameters(), |
| | lr=args.learning_rate, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | ) |
| |
|
| | |
| | train_dataset = get_train_dataset(args, accelerator) |
| | train_dataset = prepare_train_dataset(train_dataset, accelerator) |
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, |
| | shuffle=True, |
| | collate_fn=collate_fn, |
| | batch_size=args.train_batch_size, |
| | num_workers=args.dataloader_num_workers, |
| | ) |
| |
|
| | |
| | |
| | if args.max_train_steps is None: |
| | len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) |
| | num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) |
| | num_training_steps_for_scheduler = ( |
| | args.num_train_epochs * num_update_steps_per_epoch * accelerator.num_processes |
| | ) |
| | else: |
| | num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes |
| |
|
| | lr_scheduler = get_scheduler( |
| | args.lr_scheduler, |
| | optimizer=optimizer, |
| | num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, |
| | num_training_steps=args.max_train_steps * accelerator.num_processes, |
| | num_cycles=args.lr_num_cycles, |
| | power=args.lr_power, |
| | ) |
| | |
| | cogview4_transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | cogview4_transformer, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | if num_training_steps_for_scheduler != args.max_train_steps * accelerator.num_processes: |
| | logger.warning( |
| | f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " |
| | f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " |
| | f"This inconsistency may result in the learning rate scheduler not functioning properly." |
| | ) |
| | |
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | |
| | |
| | if accelerator.is_main_process: |
| | tracker_config = dict(vars(args)) |
| |
|
| | |
| | tracker_config.pop("validation_prompt") |
| | tracker_config.pop("validation_image") |
| |
|
| | accelerator.init_trackers(args.tracker_project_name, config=tracker_config) |
| |
|
| | |
| | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num batches each epoch = {len(train_dataloader)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| | global_step = 0 |
| | first_epoch = 0 |
| |
|
| | |
| | text_encoding_pipeline = CogView4ControlPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, transformer=None, vae=None, torch_dtype=weight_dtype |
| | ) |
| | tokenizer = text_encoding_pipeline.tokenizer |
| |
|
| | |
| | if args.resume_from_checkpoint: |
| | if args.resume_from_checkpoint != "latest": |
| | path = os.path.basename(args.resume_from_checkpoint) |
| | else: |
| | |
| | dirs = os.listdir(args.output_dir) |
| | dirs = [d for d in dirs if d.startswith("checkpoint")] |
| | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| | path = dirs[-1] if len(dirs) > 0 else None |
| |
|
| | if path is None: |
| | logger.info(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.") |
| | args.resume_from_checkpoint = None |
| | initial_global_step = 0 |
| | else: |
| | logger.info(f"Resuming from checkpoint {path}") |
| | accelerator.load_state(os.path.join(args.output_dir, path)) |
| | global_step = int(path.split("-")[1]) |
| |
|
| | initial_global_step = global_step |
| | first_epoch = global_step // num_update_steps_per_epoch |
| | else: |
| | initial_global_step = 0 |
| |
|
| | if accelerator.is_main_process and args.report_to == "wandb" and args.log_dataset_samples: |
| | logger.info("Logging some dataset samples.") |
| | formatted_images = [] |
| | formatted_control_images = [] |
| | all_prompts = [] |
| | for i, batch in enumerate(train_dataloader): |
| | images = (batch["pixel_values"] + 1) / 2 |
| | control_images = (batch["conditioning_pixel_values"] + 1) / 2 |
| | prompts = batch["captions"] |
| |
|
| | if len(formatted_images) > 10: |
| | break |
| |
|
| | for img, control_img, prompt in zip(images, control_images, prompts): |
| | formatted_images.append(img) |
| | formatted_control_images.append(control_img) |
| | all_prompts.append(prompt) |
| |
|
| | logged_artifacts = [] |
| | for img, control_img, prompt in zip(formatted_images, formatted_control_images, all_prompts): |
| | logged_artifacts.append(wandb.Image(control_img, caption="Conditioning")) |
| | logged_artifacts.append(wandb.Image(img, caption=prompt)) |
| |
|
| | wandb_tracker = [tracker for tracker in accelerator.trackers if tracker.name == "wandb"] |
| | wandb_tracker[0].log({"dataset_samples": logged_artifacts}) |
| |
|
| | progress_bar = tqdm( |
| | range(0, args.max_train_steps), |
| | initial=initial_global_step, |
| | desc="Steps", |
| | |
| | disable=not accelerator.is_local_main_process, |
| | ) |
| |
|
| | for epoch in range(first_epoch, args.num_train_epochs): |
| | cogview4_transformer.train() |
| | for step, batch in enumerate(train_dataloader): |
| | with accelerator.accumulate(cogview4_transformer): |
| | |
| | |
| | prompts = batch["captions"] |
| | attention_mask = tokenizer( |
| | prompts, |
| | padding="longest", |
| | max_length=args.max_sequence_length, |
| | truncation=True, |
| | add_special_tokens=True, |
| | return_tensors="pt", |
| | ).attention_mask.float() |
| |
|
| | pixel_latents = encode_images(batch["pixel_values"], vae.to(accelerator.device), weight_dtype) |
| | control_latents = encode_images( |
| | batch["conditioning_pixel_values"], vae.to(accelerator.device), weight_dtype |
| | ) |
| | if args.offload: |
| | vae.cpu() |
| |
|
| | |
| | |
| | bsz = pixel_latents.shape[0] |
| | noise = torch.randn_like(pixel_latents, device=accelerator.device, dtype=weight_dtype) |
| | u = compute_density_for_timestep_sampling( |
| | weighting_scheme=args.weighting_scheme, |
| | batch_size=bsz, |
| | logit_mean=args.logit_mean, |
| | logit_std=args.logit_std, |
| | mode_scale=args.mode_scale, |
| | ) |
| |
|
| | |
| | indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() |
| | timesteps = noise_scheduler_copy.timesteps[indices].to(device=pixel_latents.device) |
| | sigmas = noise_scheduler_copy.sigmas[indices].to(device=pixel_latents.device) |
| | captions = batch["captions"] |
| | image_seq_lens = torch.tensor( |
| | pixel_latents.shape[2] * pixel_latents.shape[3] // patch_size**2, |
| | dtype=pixel_latents.dtype, |
| | device=pixel_latents.device, |
| | ) |
| | mu = torch.sqrt(image_seq_lens / 256) |
| | mu = mu * 0.75 + 0.25 |
| | scale_factors = mu / (mu + (1 / sigmas - 1) ** 1.0).to( |
| | dtype=pixel_latents.dtype, device=pixel_latents.device |
| | ) |
| | scale_factors = scale_factors.view(len(batch["captions"]), 1, 1, 1) |
| | noisy_model_input = (1.0 - scale_factors) * pixel_latents + scale_factors * noise |
| | concatenated_noisy_model_input = torch.cat([noisy_model_input, control_latents], dim=1) |
| | text_encoding_pipeline = text_encoding_pipeline.to("cuda") |
| |
|
| | with torch.no_grad(): |
| | ( |
| | prompt_embeds, |
| | pooled_prompt_embeds, |
| | ) = text_encoding_pipeline.encode_prompt(captions, "") |
| | original_size = (args.resolution, args.resolution) |
| | original_size = torch.tensor([original_size], dtype=prompt_embeds.dtype, device=prompt_embeds.device) |
| |
|
| | target_size = (args.resolution, args.resolution) |
| | target_size = torch.tensor([target_size], dtype=prompt_embeds.dtype, device=prompt_embeds.device) |
| |
|
| | target_size = target_size.repeat(len(batch["captions"]), 1) |
| | original_size = original_size.repeat(len(batch["captions"]), 1) |
| | crops_coords_top_left = torch.tensor([(0, 0)], dtype=prompt_embeds.dtype, device=prompt_embeds.device) |
| | crops_coords_top_left = crops_coords_top_left.repeat(len(batch["captions"]), 1) |
| |
|
| | |
| | |
| | if args.proportion_empty_prompts and random.random() < args.proportion_empty_prompts: |
| | |
| | prompt_embeds = pooled_prompt_embeds |
| | if args.offload: |
| | text_encoding_pipeline = text_encoding_pipeline.to("cpu") |
| | |
| | noise_pred_cond = cogview4_transformer( |
| | hidden_states=concatenated_noisy_model_input, |
| | encoder_hidden_states=prompt_embeds, |
| | timestep=timesteps, |
| | original_size=original_size, |
| | target_size=target_size, |
| | crop_coords=crops_coords_top_left, |
| | return_dict=False, |
| | attention_mask=attention_mask, |
| | )[0] |
| | |
| | |
| | weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) |
| | |
| | target = noise - pixel_latents |
| |
|
| | weighting = weighting.view(len(batch["captions"]), 1, 1, 1) |
| | loss = torch.mean( |
| | (weighting.float() * (noise_pred_cond.float() - target.float()) ** 2).reshape(target.shape[0], -1), |
| | 1, |
| | ) |
| | loss = loss.mean() |
| | accelerator.backward(loss) |
| |
|
| | if accelerator.sync_gradients: |
| | params_to_clip = cogview4_transformer.parameters() |
| | accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) |
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad() |
| |
|
| | |
| | if accelerator.sync_gradients: |
| | progress_bar.update(1) |
| | global_step += 1 |
| |
|
| | |
| | if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: |
| | if global_step % args.checkpointing_steps == 0: |
| | |
| | if args.checkpoints_total_limit is not None: |
| | checkpoints = os.listdir(args.output_dir) |
| | checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| | checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
| |
|
| | |
| | if len(checkpoints) >= args.checkpoints_total_limit: |
| | num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| | removing_checkpoints = checkpoints[0:num_to_remove] |
| |
|
| | logger.info( |
| | f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
| | ) |
| | logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
| |
|
| | for removing_checkpoint in removing_checkpoints: |
| | removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| | shutil.rmtree(removing_checkpoint) |
| |
|
| | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| | accelerator.save_state(save_path) |
| | logger.info(f"Saved state to {save_path}") |
| |
|
| | if args.validation_prompt is not None and global_step % args.validation_steps == 0: |
| | image_logs = log_validation( |
| | cogview4_transformer=cogview4_transformer, |
| | args=args, |
| | accelerator=accelerator, |
| | weight_dtype=weight_dtype, |
| | step=global_step, |
| | ) |
| |
|
| | logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| | progress_bar.set_postfix(**logs) |
| | accelerator.log(logs, step=global_step) |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| |
|
| | |
| | accelerator.wait_for_everyone() |
| | if accelerator.is_main_process: |
| | cogview4_transformer = unwrap_model(cogview4_transformer) |
| | if args.upcast_before_saving: |
| | cogview4_transformer.to(torch.float32) |
| | cogview4_transformer.save_pretrained(args.output_dir) |
| |
|
| | del cogview4_transformer |
| | del text_encoding_pipeline |
| | del vae |
| | free_memory() |
| |
|
| | |
| | image_logs = None |
| | if args.validation_prompt is not None: |
| | image_logs = log_validation( |
| | cogview4_transformer=None, |
| | args=args, |
| | accelerator=accelerator, |
| | weight_dtype=weight_dtype, |
| | step=global_step, |
| | is_final_validation=True, |
| | ) |
| |
|
| | if args.push_to_hub: |
| | save_model_card( |
| | repo_id, |
| | image_logs=image_logs, |
| | base_model=args.pretrained_model_name_or_path, |
| | repo_folder=args.output_dir, |
| | ) |
| | upload_folder( |
| | repo_id=repo_id, |
| | folder_path=args.output_dir, |
| | commit_message="End of training", |
| | ignore_patterns=["step_*", "epoch_*", "checkpoint-*"], |
| | ) |
| |
|
| | accelerator.end_training() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | args = parse_args() |
| | main(args) |
| |
|