| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Fine-tuning script for Stable Diffusion XL for text2image.""" |
|
|
| import argparse |
| import functools |
| import gc |
| import logging |
| import math |
| import os |
| import random |
| import shutil |
| from contextlib import nullcontext |
| from pathlib import Path |
|
|
| import accelerate |
| import datasets |
| import numpy as np |
| import torch |
| import torch.nn.functional as F |
| import torch.utils.checkpoint |
| import transformers |
| from accelerate import Accelerator |
| from accelerate.logging import get_logger |
| from accelerate.utils import DistributedType, ProjectConfiguration, set_seed |
| from datasets import concatenate_datasets, load_dataset |
| from huggingface_hub import create_repo, upload_folder |
| from packaging import version |
| from torchvision import transforms |
| from torchvision.transforms.functional import crop |
| from tqdm.auto import tqdm |
| from transformers import AutoTokenizer, PretrainedConfig |
|
|
| import diffusers |
| from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel |
| from diffusers.optimization import get_scheduler |
| from diffusers.training_utils import EMAModel, compute_snr |
| from diffusers.utils import check_min_version, is_wandb_available |
| from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card |
| from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available |
| from diffusers.utils.torch_utils import is_compiled_module |
|
|
|
|
| |
| check_min_version("0.35.0.dev0") |
|
|
| logger = get_logger(__name__) |
| if is_torch_npu_available(): |
| import torch_npu |
|
|
| torch.npu.config.allow_internal_format = False |
|
|
| DATASET_NAME_MAPPING = { |
| "lambdalabs/naruto-blip-captions": ("image", "text"), |
| } |
|
|
|
|
| def save_model_card( |
| repo_id: str, |
| images: list = None, |
| validation_prompt: str = None, |
| base_model: str = None, |
| dataset_name: str = None, |
| repo_folder: str = None, |
| vae_path: str = None, |
| ): |
| img_str = "" |
| if images is not None: |
| for i, image in enumerate(images): |
| image.save(os.path.join(repo_folder, f"image_{i}.png")) |
| img_str += f"\n" |
|
|
| model_description = f""" |
| # Text-to-image finetuning - {repo_id} |
| |
| This pipeline was finetuned from **{base_model}** on the **{dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompt: {validation_prompt}: \n |
| {img_str} |
| |
| Special VAE used for training: {vae_path}. |
| """ |
|
|
| model_card = load_or_create_model_card( |
| repo_id_or_path=repo_id, |
| from_training=True, |
| license="creativeml-openrail-m", |
| base_model=base_model, |
| model_description=model_description, |
| inference=True, |
| ) |
|
|
| tags = [ |
| "stable-diffusion-xl", |
| "stable-diffusion-xl-diffusers", |
| "text-to-image", |
| "diffusers-training", |
| "diffusers", |
| ] |
| model_card = populate_model_card(model_card, tags=tags) |
|
|
| model_card.save(os.path.join(repo_folder, "README.md")) |
|
|
|
|
| def import_model_class_from_model_name_or_path( |
| pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" |
| ): |
| text_encoder_config = PretrainedConfig.from_pretrained( |
| pretrained_model_name_or_path, subfolder=subfolder, revision=revision |
| ) |
| model_class = text_encoder_config.architectures[0] |
|
|
| if model_class == "CLIPTextModel": |
| from transformers import CLIPTextModel |
|
|
| return CLIPTextModel |
| elif model_class == "CLIPTextModelWithProjection": |
| from transformers import CLIPTextModelWithProjection |
|
|
| return CLIPTextModelWithProjection |
| else: |
| raise ValueError(f"{model_class} is not supported.") |
|
|
|
|
| def parse_args(input_args=None): |
| parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| parser.add_argument( |
| "--pretrained_model_name_or_path", |
| type=str, |
| default=None, |
| required=True, |
| help="Path to pretrained model or model identifier from huggingface.co/models.", |
| ) |
| parser.add_argument( |
| "--pretrained_vae_model_name_or_path", |
| type=str, |
| default=None, |
| help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", |
| ) |
| parser.add_argument( |
| "--revision", |
| type=str, |
| default=None, |
| required=False, |
| help="Revision of pretrained model identifier from huggingface.co/models.", |
| ) |
| parser.add_argument( |
| "--variant", |
| type=str, |
| default=None, |
| help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
| ) |
| parser.add_argument( |
| "--dataset_name", |
| type=str, |
| default=None, |
| help=( |
| "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," |
| " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," |
| " or to a folder containing files that 🤗 Datasets can understand." |
| ), |
| ) |
| parser.add_argument( |
| "--dataset_config_name", |
| type=str, |
| default=None, |
| help="The config of the Dataset, leave as None if there's only one config.", |
| ) |
| parser.add_argument( |
| "--train_data_dir", |
| type=str, |
| default=None, |
| help=( |
| "A folder containing the training data. Folder contents must follow the structure described in" |
| " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" |
| " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." |
| ), |
| ) |
| parser.add_argument( |
| "--image_column", type=str, default="image", help="The column of the dataset containing an image." |
| ) |
| parser.add_argument( |
| "--caption_column", |
| type=str, |
| default="text", |
| help="The column of the dataset containing a caption or a list of captions.", |
| ) |
| parser.add_argument( |
| "--validation_prompt", |
| type=str, |
| default=None, |
| help="A prompt that is used during validation to verify that the model is learning.", |
| ) |
| parser.add_argument( |
| "--num_validation_images", |
| type=int, |
| default=4, |
| help="Number of images that should be generated during validation with `validation_prompt`.", |
| ) |
| parser.add_argument( |
| "--validation_epochs", |
| type=int, |
| default=1, |
| help=( |
| "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" |
| " `args.validation_prompt` multiple times: `args.num_validation_images`." |
| ), |
| ) |
| parser.add_argument( |
| "--max_train_samples", |
| type=int, |
| default=None, |
| help=( |
| "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| ), |
| ) |
| parser.add_argument( |
| "--proportion_empty_prompts", |
| type=float, |
| default=0, |
| help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", |
| ) |
| parser.add_argument( |
| "--output_dir", |
| type=str, |
| default="sdxl-model-finetuned", |
| help="The output directory where the model predictions and checkpoints will be written.", |
| ) |
| parser.add_argument( |
| "--cache_dir", |
| type=str, |
| default=None, |
| help="The directory where the downloaded models and datasets will be stored.", |
| ) |
| parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| parser.add_argument( |
| "--resolution", |
| type=int, |
| default=1024, |
| help=( |
| "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| " resolution" |
| ), |
| ) |
| parser.add_argument( |
| "--center_crop", |
| default=False, |
| action="store_true", |
| help=( |
| "Whether to center crop the input images to the resolution. If not set, the images will be randomly" |
| " cropped. The images will be resized to the resolution first before cropping." |
| ), |
| ) |
| parser.add_argument( |
| "--random_flip", |
| action="store_true", |
| help="whether to randomly flip images horizontally", |
| ) |
| parser.add_argument( |
| "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." |
| ) |
| parser.add_argument("--num_train_epochs", type=int, default=100) |
| parser.add_argument( |
| "--max_train_steps", |
| type=int, |
| default=None, |
| help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| ) |
| parser.add_argument( |
| "--checkpointing_steps", |
| type=int, |
| default=500, |
| help=( |
| "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" |
| " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" |
| " training using `--resume_from_checkpoint`." |
| ), |
| ) |
| parser.add_argument( |
| "--checkpoints_total_limit", |
| type=int, |
| default=None, |
| help=("Max number of checkpoints to store."), |
| ) |
| parser.add_argument( |
| "--resume_from_checkpoint", |
| type=str, |
| default=None, |
| help=( |
| "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| ), |
| ) |
| parser.add_argument( |
| "--gradient_accumulation_steps", |
| type=int, |
| default=1, |
| help="Number of updates steps to accumulate before performing a backward/update pass.", |
| ) |
| parser.add_argument( |
| "--gradient_checkpointing", |
| action="store_true", |
| help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
| ) |
| parser.add_argument( |
| "--learning_rate", |
| type=float, |
| default=1e-4, |
| help="Initial learning rate (after the potential warmup period) to use.", |
| ) |
| parser.add_argument( |
| "--scale_lr", |
| action="store_true", |
| default=False, |
| help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| ) |
| parser.add_argument( |
| "--lr_scheduler", |
| type=str, |
| default="constant", |
| help=( |
| 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| ' "constant", "constant_with_warmup"]' |
| ), |
| ) |
| parser.add_argument( |
| "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| ) |
| parser.add_argument( |
| "--timestep_bias_strategy", |
| type=str, |
| default="none", |
| choices=["earlier", "later", "range", "none"], |
| help=( |
| "The timestep bias strategy, which may help direct the model toward learning low or high frequency details." |
| " Choices: ['earlier', 'later', 'range', 'none']." |
| " The default is 'none', which means no bias is applied, and training proceeds normally." |
| " The value of 'later' will increase the frequency of the model's final training timesteps." |
| ), |
| ) |
| parser.add_argument( |
| "--timestep_bias_multiplier", |
| type=float, |
| default=1.0, |
| help=( |
| "The multiplier for the bias. Defaults to 1.0, which means no bias is applied." |
| " A value of 2.0 will double the weight of the bias, and a value of 0.5 will halve it." |
| ), |
| ) |
| parser.add_argument( |
| "--timestep_bias_begin", |
| type=int, |
| default=0, |
| help=( |
| "When using `--timestep_bias_strategy=range`, the beginning (inclusive) timestep to bias." |
| " Defaults to zero, which equates to having no specific bias." |
| ), |
| ) |
| parser.add_argument( |
| "--timestep_bias_end", |
| type=int, |
| default=1000, |
| help=( |
| "When using `--timestep_bias_strategy=range`, the final timestep (inclusive) to bias." |
| " Defaults to 1000, which is the number of timesteps that Stable Diffusion is trained on." |
| ), |
| ) |
| parser.add_argument( |
| "--timestep_bias_portion", |
| type=float, |
| default=0.25, |
| help=( |
| "The portion of timesteps to bias. Defaults to 0.25, which 25% of timesteps will be biased." |
| " A value of 0.5 will bias one half of the timesteps. The value provided for `--timestep_bias_strategy` determines" |
| " whether the biased portions are in the earlier or later timesteps." |
| ), |
| ) |
| parser.add_argument( |
| "--snr_gamma", |
| type=float, |
| default=None, |
| help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " |
| "More details here: https://huggingface.co/papers/2303.09556.", |
| ) |
| parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") |
| parser.add_argument( |
| "--allow_tf32", |
| action="store_true", |
| help=( |
| "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| ), |
| ) |
| parser.add_argument( |
| "--dataloader_num_workers", |
| type=int, |
| default=0, |
| help=( |
| "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." |
| ), |
| ) |
| parser.add_argument( |
| "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." |
| ) |
| parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| parser.add_argument( |
| "--prediction_type", |
| type=str, |
| default=None, |
| help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", |
| ) |
| parser.add_argument( |
| "--hub_model_id", |
| type=str, |
| default=None, |
| help="The name of the repository to keep in sync with the local `output_dir`.", |
| ) |
| parser.add_argument( |
| "--logging_dir", |
| type=str, |
| default="logs", |
| help=( |
| "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| ), |
| ) |
| parser.add_argument( |
| "--report_to", |
| type=str, |
| default="tensorboard", |
| help=( |
| 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| ), |
| ) |
| parser.add_argument( |
| "--mixed_precision", |
| type=str, |
| default=None, |
| choices=["no", "fp16", "bf16"], |
| help=( |
| "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
| " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
| ), |
| ) |
| parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| parser.add_argument( |
| "--enable_npu_flash_attention", action="store_true", help="Whether or not to use npu flash attention." |
| ) |
| parser.add_argument( |
| "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." |
| ) |
| parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") |
| parser.add_argument( |
| "--image_interpolation_mode", |
| type=str, |
| default="lanczos", |
| choices=[ |
| f.lower() for f in dir(transforms.InterpolationMode) if not f.startswith("__") and not f.endswith("__") |
| ], |
| help="The image interpolation method to use for resizing images.", |
| ) |
|
|
| if input_args is not None: |
| args = parser.parse_args(input_args) |
| else: |
| args = parser.parse_args() |
|
|
| env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| if env_local_rank != -1 and env_local_rank != args.local_rank: |
| args.local_rank = env_local_rank |
|
|
| |
| if args.dataset_name is None and args.train_data_dir is None: |
| raise ValueError("Need either a dataset name or a training folder.") |
| if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: |
| raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") |
|
|
| return args |
|
|
|
|
| |
| def encode_prompt(batch, text_encoders, tokenizers, proportion_empty_prompts, caption_column, is_train=True): |
| prompt_embeds_list = [] |
| prompt_batch = batch[caption_column] |
|
|
| captions = [] |
| for caption in prompt_batch: |
| if random.random() < proportion_empty_prompts: |
| captions.append("") |
| elif isinstance(caption, str): |
| captions.append(caption) |
| elif isinstance(caption, (list, np.ndarray)): |
| |
| captions.append(random.choice(caption) if is_train else caption[0]) |
|
|
| with torch.no_grad(): |
| for tokenizer, text_encoder in zip(tokenizers, text_encoders): |
| text_inputs = tokenizer( |
| captions, |
| padding="max_length", |
| max_length=tokenizer.model_max_length, |
| truncation=True, |
| return_tensors="pt", |
| ) |
| text_input_ids = text_inputs.input_ids |
| prompt_embeds = text_encoder( |
| text_input_ids.to(text_encoder.device), |
| output_hidden_states=True, |
| return_dict=False, |
| ) |
|
|
| |
| pooled_prompt_embeds = prompt_embeds[0] |
| prompt_embeds = prompt_embeds[-1][-2] |
| bs_embed, seq_len, _ = prompt_embeds.shape |
| prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) |
| prompt_embeds_list.append(prompt_embeds) |
|
|
| prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) |
| pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) |
| return {"prompt_embeds": prompt_embeds.cpu(), "pooled_prompt_embeds": pooled_prompt_embeds.cpu()} |
|
|
|
|
| def compute_vae_encodings(batch, vae): |
| images = batch.pop("pixel_values") |
| pixel_values = torch.stack(list(images)) |
| pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() |
| pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) |
|
|
| with torch.no_grad(): |
| model_input = vae.encode(pixel_values).latent_dist.sample() |
| model_input = model_input * vae.config.scaling_factor |
|
|
| |
| |
| return {"model_input": model_input.cpu()} |
|
|
|
|
| def generate_timestep_weights(args, num_timesteps): |
| weights = torch.ones(num_timesteps) |
|
|
| |
| num_to_bias = int(args.timestep_bias_portion * num_timesteps) |
|
|
| if args.timestep_bias_strategy == "later": |
| bias_indices = slice(-num_to_bias, None) |
| elif args.timestep_bias_strategy == "earlier": |
| bias_indices = slice(0, num_to_bias) |
| elif args.timestep_bias_strategy == "range": |
| |
| range_begin = args.timestep_bias_begin |
| range_end = args.timestep_bias_end |
| if range_begin < 0: |
| raise ValueError( |
| "When using the range strategy for timestep bias, you must provide a beginning timestep greater or equal to zero." |
| ) |
| if range_end > num_timesteps: |
| raise ValueError( |
| "When using the range strategy for timestep bias, you must provide an ending timestep smaller than the number of timesteps." |
| ) |
| bias_indices = slice(range_begin, range_end) |
| else: |
| return weights |
| if args.timestep_bias_multiplier <= 0: |
| return ValueError( |
| "The parameter --timestep_bias_multiplier is not intended to be used to disable the training of specific timesteps." |
| " If it was intended to disable timestep bias, use `--timestep_bias_strategy none` instead." |
| " A timestep bias multiplier less than or equal to 0 is not allowed." |
| ) |
|
|
| |
| weights[bias_indices] *= args.timestep_bias_multiplier |
|
|
| |
| weights /= weights.sum() |
|
|
| return weights |
|
|
|
|
| def main(args): |
| if args.report_to == "wandb" and args.hub_token is not None: |
| raise ValueError( |
| "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| " Please use `huggingface-cli login` to authenticate with the Hub." |
| ) |
|
|
| logging_dir = Path(args.output_dir, args.logging_dir) |
|
|
| accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
|
|
| if torch.backends.mps.is_available() and args.mixed_precision == "bf16": |
| |
| raise ValueError( |
| "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." |
| ) |
|
|
| accelerator = Accelerator( |
| gradient_accumulation_steps=args.gradient_accumulation_steps, |
| mixed_precision=args.mixed_precision, |
| log_with=args.report_to, |
| project_config=accelerator_project_config, |
| ) |
|
|
| |
| if torch.backends.mps.is_available(): |
| accelerator.native_amp = False |
|
|
| if args.report_to == "wandb": |
| if not is_wandb_available(): |
| raise ImportError("Make sure to install wandb if you want to use it for logging during training.") |
| import wandb |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO, |
| ) |
| logger.info(accelerator.state, main_process_only=False) |
| if accelerator.is_local_main_process: |
| datasets.utils.logging.set_verbosity_warning() |
| transformers.utils.logging.set_verbosity_warning() |
| diffusers.utils.logging.set_verbosity_info() |
| else: |
| datasets.utils.logging.set_verbosity_error() |
| transformers.utils.logging.set_verbosity_error() |
| diffusers.utils.logging.set_verbosity_error() |
|
|
| |
| if args.seed is not None: |
| set_seed(args.seed) |
|
|
| |
| if accelerator.is_main_process: |
| if args.output_dir is not None: |
| os.makedirs(args.output_dir, exist_ok=True) |
|
|
| if args.push_to_hub: |
| repo_id = create_repo( |
| repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token |
| ).repo_id |
|
|
| |
| tokenizer_one = AutoTokenizer.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="tokenizer", |
| revision=args.revision, |
| use_fast=False, |
| ) |
| tokenizer_two = AutoTokenizer.from_pretrained( |
| args.pretrained_model_name_or_path, |
| subfolder="tokenizer_2", |
| revision=args.revision, |
| use_fast=False, |
| ) |
|
|
| |
| text_encoder_cls_one = import_model_class_from_model_name_or_path( |
| args.pretrained_model_name_or_path, args.revision |
| ) |
| text_encoder_cls_two = import_model_class_from_model_name_or_path( |
| args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" |
| ) |
|
|
| |
| noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") |
| |
| text_encoder_one = text_encoder_cls_one.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant |
| ) |
| text_encoder_two = text_encoder_cls_two.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant |
| ) |
| vae_path = ( |
| args.pretrained_model_name_or_path |
| if args.pretrained_vae_model_name_or_path is None |
| else args.pretrained_vae_model_name_or_path |
| ) |
| vae = AutoencoderKL.from_pretrained( |
| vae_path, |
| subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, |
| revision=args.revision, |
| variant=args.variant, |
| ) |
| unet = UNet2DConditionModel.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant |
| ) |
|
|
| |
| vae.requires_grad_(False) |
| text_encoder_one.requires_grad_(False) |
| text_encoder_two.requires_grad_(False) |
| |
| unet.train() |
|
|
| |
| |
| weight_dtype = torch.float32 |
| if accelerator.mixed_precision == "fp16": |
| weight_dtype = torch.float16 |
| elif accelerator.mixed_precision == "bf16": |
| weight_dtype = torch.bfloat16 |
|
|
| |
| |
| vae.to(accelerator.device, dtype=torch.float32) |
| text_encoder_one.to(accelerator.device, dtype=weight_dtype) |
| text_encoder_two.to(accelerator.device, dtype=weight_dtype) |
|
|
| |
| if args.use_ema: |
| ema_unet = UNet2DConditionModel.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant |
| ) |
| ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) |
| if args.enable_npu_flash_attention: |
| if is_torch_npu_available(): |
| logger.info("npu flash attention enabled.") |
| unet.enable_npu_flash_attention() |
| else: |
| raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu devices.") |
| if args.enable_xformers_memory_efficient_attention: |
| if is_xformers_available(): |
| import xformers |
|
|
| xformers_version = version.parse(xformers.__version__) |
| if xformers_version == version.parse("0.0.16"): |
| logger.warning( |
| "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." |
| ) |
| unet.enable_xformers_memory_efficient_attention() |
| else: |
| raise ValueError("xformers is not available. Make sure it is installed correctly") |
|
|
| |
| if version.parse(accelerate.__version__) >= version.parse("0.16.0"): |
| |
| def save_model_hook(models, weights, output_dir): |
| if accelerator.is_main_process: |
| if args.use_ema: |
| ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) |
|
|
| for i, model in enumerate(models): |
| model.save_pretrained(os.path.join(output_dir, "unet")) |
|
|
| |
| if weights: |
| weights.pop() |
|
|
| def load_model_hook(models, input_dir): |
| if args.use_ema: |
| load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) |
| ema_unet.load_state_dict(load_model.state_dict()) |
| ema_unet.to(accelerator.device) |
| del load_model |
|
|
| for _ in range(len(models)): |
| |
| model = models.pop() |
|
|
| |
| load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") |
| model.register_to_config(**load_model.config) |
|
|
| model.load_state_dict(load_model.state_dict()) |
| del load_model |
|
|
| accelerator.register_save_state_pre_hook(save_model_hook) |
| accelerator.register_load_state_pre_hook(load_model_hook) |
|
|
| if args.gradient_checkpointing: |
| unet.enable_gradient_checkpointing() |
|
|
| |
| |
| if args.allow_tf32: |
| torch.backends.cuda.matmul.allow_tf32 = True |
|
|
| if args.scale_lr: |
| args.learning_rate = ( |
| args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| ) |
|
|
| |
| if args.use_8bit_adam: |
| try: |
| import bitsandbytes as bnb |
| except ImportError: |
| raise ImportError( |
| "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." |
| ) |
|
|
| optimizer_class = bnb.optim.AdamW8bit |
| else: |
| optimizer_class = torch.optim.AdamW |
|
|
| |
| params_to_optimize = unet.parameters() |
| optimizer = optimizer_class( |
| params_to_optimize, |
| lr=args.learning_rate, |
| betas=(args.adam_beta1, args.adam_beta2), |
| weight_decay=args.adam_weight_decay, |
| eps=args.adam_epsilon, |
| ) |
|
|
| |
| |
|
|
| |
| |
| if args.dataset_name is not None: |
| |
| dataset = load_dataset( |
| args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir |
| ) |
| else: |
| data_files = {} |
| if args.train_data_dir is not None: |
| data_files["train"] = os.path.join(args.train_data_dir, "**") |
| dataset = load_dataset( |
| "imagefolder", |
| data_files=data_files, |
| cache_dir=args.cache_dir, |
| ) |
| |
| |
|
|
| |
| |
| column_names = dataset["train"].column_names |
|
|
| |
| dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) |
| if args.image_column is None: |
| image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] |
| else: |
| image_column = args.image_column |
| if image_column not in column_names: |
| raise ValueError( |
| f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" |
| ) |
| if args.caption_column is None: |
| caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] |
| else: |
| caption_column = args.caption_column |
| if caption_column not in column_names: |
| raise ValueError( |
| f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" |
| ) |
|
|
| |
| interpolation = getattr(transforms.InterpolationMode, args.image_interpolation_mode.upper(), None) |
| if interpolation is None: |
| raise ValueError(f"Unsupported interpolation mode {interpolation=}.") |
| train_resize = transforms.Resize(args.resolution, interpolation=interpolation) |
| train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) |
| train_flip = transforms.RandomHorizontalFlip(p=1.0) |
| train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) |
|
|
| def preprocess_train(examples): |
| images = [image.convert("RGB") for image in examples[image_column]] |
| |
| original_sizes = [] |
| all_images = [] |
| crop_top_lefts = [] |
| for image in images: |
| original_sizes.append((image.height, image.width)) |
| image = train_resize(image) |
| if args.random_flip and random.random() < 0.5: |
| |
| image = train_flip(image) |
| if args.center_crop: |
| y1 = max(0, int(round((image.height - args.resolution) / 2.0))) |
| x1 = max(0, int(round((image.width - args.resolution) / 2.0))) |
| image = train_crop(image) |
| else: |
| y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) |
| image = crop(image, y1, x1, h, w) |
| crop_top_left = (y1, x1) |
| crop_top_lefts.append(crop_top_left) |
| image = train_transforms(image) |
| all_images.append(image) |
|
|
| examples["original_sizes"] = original_sizes |
| examples["crop_top_lefts"] = crop_top_lefts |
| examples["pixel_values"] = all_images |
| return examples |
|
|
| with accelerator.main_process_first(): |
| if args.max_train_samples is not None: |
| dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) |
| |
| train_dataset = dataset["train"].with_transform(preprocess_train) |
|
|
| |
| |
| text_encoders = [text_encoder_one, text_encoder_two] |
| tokenizers = [tokenizer_one, tokenizer_two] |
| compute_embeddings_fn = functools.partial( |
| encode_prompt, |
| text_encoders=text_encoders, |
| tokenizers=tokenizers, |
| proportion_empty_prompts=args.proportion_empty_prompts, |
| caption_column=args.caption_column, |
| ) |
| compute_vae_encodings_fn = functools.partial(compute_vae_encodings, vae=vae) |
| with accelerator.main_process_first(): |
| from datasets.fingerprint import Hasher |
|
|
| |
| |
| new_fingerprint = Hasher.hash(args) |
| new_fingerprint_for_vae = Hasher.hash((vae_path, args)) |
| train_dataset_with_embeddings = train_dataset.map( |
| compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint |
| ) |
| train_dataset_with_vae = train_dataset.map( |
| compute_vae_encodings_fn, |
| batched=True, |
| batch_size=args.train_batch_size, |
| new_fingerprint=new_fingerprint_for_vae, |
| ) |
| precomputed_dataset = concatenate_datasets( |
| [train_dataset_with_embeddings, train_dataset_with_vae.remove_columns(["image", "text"])], axis=1 |
| ) |
| precomputed_dataset = precomputed_dataset.with_transform(preprocess_train) |
|
|
| del compute_vae_encodings_fn, compute_embeddings_fn, text_encoder_one, text_encoder_two |
| del text_encoders, tokenizers, vae |
| gc.collect() |
| if is_torch_npu_available(): |
| torch_npu.npu.empty_cache() |
| elif torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
|
|
| def collate_fn(examples): |
| model_input = torch.stack([torch.tensor(example["model_input"]) for example in examples]) |
| original_sizes = [example["original_sizes"] for example in examples] |
| crop_top_lefts = [example["crop_top_lefts"] for example in examples] |
| prompt_embeds = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) |
| pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) |
|
|
| return { |
| "model_input": model_input, |
| "prompt_embeds": prompt_embeds, |
| "pooled_prompt_embeds": pooled_prompt_embeds, |
| "original_sizes": original_sizes, |
| "crop_top_lefts": crop_top_lefts, |
| } |
|
|
| |
| train_dataloader = torch.utils.data.DataLoader( |
| precomputed_dataset, |
| shuffle=True, |
| collate_fn=collate_fn, |
| batch_size=args.train_batch_size, |
| num_workers=args.dataloader_num_workers, |
| ) |
|
|
| |
| overrode_max_train_steps = False |
| num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| if args.max_train_steps is None: |
| args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| overrode_max_train_steps = True |
|
|
| lr_scheduler = get_scheduler( |
| args.lr_scheduler, |
| optimizer=optimizer, |
| num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, |
| num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, |
| ) |
|
|
| |
| unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| unet, optimizer, train_dataloader, lr_scheduler |
| ) |
|
|
| if args.use_ema: |
| ema_unet.to(accelerator.device) |
|
|
| |
| num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| if overrode_max_train_steps: |
| args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| |
| args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
|
|
| |
| |
| if accelerator.is_main_process: |
| accelerator.init_trackers("text2image-fine-tune-sdxl", config=vars(args)) |
|
|
| |
| def unwrap_model(model): |
| model = accelerator.unwrap_model(model) |
| model = model._orig_mod if is_compiled_module(model) else model |
| return model |
|
|
| if torch.backends.mps.is_available() or "playground" in args.pretrained_model_name_or_path: |
| autocast_ctx = nullcontext() |
| else: |
| autocast_ctx = torch.autocast(accelerator.device.type) |
|
|
| |
| total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
|
|
| logger.info("***** Running training *****") |
| logger.info(f" Num examples = {len(precomputed_dataset)}") |
| logger.info(f" Num Epochs = {args.num_train_epochs}") |
| logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| logger.info(f" Total optimization steps = {args.max_train_steps}") |
| global_step = 0 |
| first_epoch = 0 |
|
|
| |
| if args.resume_from_checkpoint: |
| if args.resume_from_checkpoint != "latest": |
| path = os.path.basename(args.resume_from_checkpoint) |
| else: |
| |
| dirs = os.listdir(args.output_dir) |
| dirs = [d for d in dirs if d.startswith("checkpoint")] |
| dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| path = dirs[-1] if len(dirs) > 0 else None |
|
|
| if path is None: |
| accelerator.print( |
| f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." |
| ) |
| args.resume_from_checkpoint = None |
| initial_global_step = 0 |
| else: |
| accelerator.print(f"Resuming from checkpoint {path}") |
| accelerator.load_state(os.path.join(args.output_dir, path)) |
| global_step = int(path.split("-")[1]) |
|
|
| initial_global_step = global_step |
| first_epoch = global_step // num_update_steps_per_epoch |
|
|
| else: |
| initial_global_step = 0 |
|
|
| progress_bar = tqdm( |
| range(0, args.max_train_steps), |
| initial=initial_global_step, |
| desc="Steps", |
| |
| disable=not accelerator.is_local_main_process, |
| ) |
|
|
| for epoch in range(first_epoch, args.num_train_epochs): |
| train_loss = 0.0 |
| for step, batch in enumerate(train_dataloader): |
| with accelerator.accumulate(unet): |
| |
| model_input = batch["model_input"].to(accelerator.device) |
| noise = torch.randn_like(model_input) |
| if args.noise_offset: |
| |
| noise += args.noise_offset * torch.randn( |
| (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device |
| ) |
|
|
| bsz = model_input.shape[0] |
| if args.timestep_bias_strategy == "none": |
| |
| timesteps = torch.randint( |
| 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device |
| ) |
| else: |
| |
| |
| weights = generate_timestep_weights(args, noise_scheduler.config.num_train_timesteps).to( |
| model_input.device |
| ) |
| timesteps = torch.multinomial(weights, bsz, replacement=True).long() |
|
|
| |
| |
| noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps).to(dtype=weight_dtype) |
|
|
| |
| def compute_time_ids(original_size, crops_coords_top_left): |
| |
| target_size = (args.resolution, args.resolution) |
| add_time_ids = list(original_size + crops_coords_top_left + target_size) |
| add_time_ids = torch.tensor([add_time_ids], device=accelerator.device, dtype=weight_dtype) |
| return add_time_ids |
|
|
| add_time_ids = torch.cat( |
| [compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] |
| ) |
|
|
| |
| unet_added_conditions = {"time_ids": add_time_ids} |
| prompt_embeds = batch["prompt_embeds"].to(accelerator.device, dtype=weight_dtype) |
| pooled_prompt_embeds = batch["pooled_prompt_embeds"].to(accelerator.device) |
| unet_added_conditions.update({"text_embeds": pooled_prompt_embeds}) |
| model_pred = unet( |
| noisy_model_input, |
| timesteps, |
| prompt_embeds, |
| added_cond_kwargs=unet_added_conditions, |
| return_dict=False, |
| )[0] |
|
|
| |
| if args.prediction_type is not None: |
| |
| noise_scheduler.register_to_config(prediction_type=args.prediction_type) |
|
|
| if noise_scheduler.config.prediction_type == "epsilon": |
| target = noise |
| elif noise_scheduler.config.prediction_type == "v_prediction": |
| target = noise_scheduler.get_velocity(model_input, noise, timesteps) |
| elif noise_scheduler.config.prediction_type == "sample": |
| |
| target = model_input |
| |
| model_pred = model_pred - noise |
| else: |
| raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") |
|
|
| if args.snr_gamma is None: |
| loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") |
| else: |
| |
| |
| |
| snr = compute_snr(noise_scheduler, timesteps) |
| mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( |
| dim=1 |
| )[0] |
| if noise_scheduler.config.prediction_type == "epsilon": |
| mse_loss_weights = mse_loss_weights / snr |
| elif noise_scheduler.config.prediction_type == "v_prediction": |
| mse_loss_weights = mse_loss_weights / (snr + 1) |
|
|
| loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") |
| loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights |
| loss = loss.mean() |
|
|
| |
| avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() |
| train_loss += avg_loss.item() / args.gradient_accumulation_steps |
|
|
| |
| accelerator.backward(loss) |
| if accelerator.sync_gradients: |
| params_to_clip = unet.parameters() |
| accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) |
| optimizer.step() |
| lr_scheduler.step() |
| optimizer.zero_grad() |
|
|
| |
| if accelerator.sync_gradients: |
| if args.use_ema: |
| ema_unet.step(unet.parameters()) |
| progress_bar.update(1) |
| global_step += 1 |
| accelerator.log({"train_loss": train_loss}, step=global_step) |
| train_loss = 0.0 |
|
|
| |
| if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: |
| if global_step % args.checkpointing_steps == 0: |
| |
| if args.checkpoints_total_limit is not None: |
| checkpoints = os.listdir(args.output_dir) |
| checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
|
|
| |
| if len(checkpoints) >= args.checkpoints_total_limit: |
| num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| removing_checkpoints = checkpoints[0:num_to_remove] |
|
|
| logger.info( |
| f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
| ) |
| logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
|
|
| for removing_checkpoint in removing_checkpoints: |
| removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| shutil.rmtree(removing_checkpoint) |
|
|
| save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| accelerator.save_state(save_path) |
| logger.info(f"Saved state to {save_path}") |
|
|
| logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| progress_bar.set_postfix(**logs) |
|
|
| if global_step >= args.max_train_steps: |
| break |
|
|
| if accelerator.is_main_process: |
| if args.validation_prompt is not None and epoch % args.validation_epochs == 0: |
| logger.info( |
| f"Running validation... \n Generating {args.num_validation_images} images with prompt:" |
| f" {args.validation_prompt}." |
| ) |
| if args.use_ema: |
| |
| ema_unet.store(unet.parameters()) |
| ema_unet.copy_to(unet.parameters()) |
|
|
| |
| vae = AutoencoderKL.from_pretrained( |
| vae_path, |
| subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, |
| revision=args.revision, |
| variant=args.variant, |
| ) |
| pipeline = StableDiffusionXLPipeline.from_pretrained( |
| args.pretrained_model_name_or_path, |
| vae=vae, |
| unet=accelerator.unwrap_model(unet), |
| revision=args.revision, |
| variant=args.variant, |
| torch_dtype=weight_dtype, |
| ) |
| if args.prediction_type is not None: |
| scheduler_args = {"prediction_type": args.prediction_type} |
| pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) |
|
|
| pipeline = pipeline.to(accelerator.device) |
| pipeline.set_progress_bar_config(disable=True) |
|
|
| |
| generator = ( |
| torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| if args.seed is not None |
| else None |
| ) |
| pipeline_args = {"prompt": args.validation_prompt} |
|
|
| with autocast_ctx: |
| images = [ |
| pipeline(**pipeline_args, generator=generator, num_inference_steps=25).images[0] |
| for _ in range(args.num_validation_images) |
| ] |
|
|
| for tracker in accelerator.trackers: |
| if tracker.name == "tensorboard": |
| np_images = np.stack([np.asarray(img) for img in images]) |
| tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") |
| if tracker.name == "wandb": |
| tracker.log( |
| { |
| "validation": [ |
| wandb.Image(image, caption=f"{i}: {args.validation_prompt}") |
| for i, image in enumerate(images) |
| ] |
| } |
| ) |
|
|
| del pipeline |
| if is_torch_npu_available(): |
| torch_npu.npu.empty_cache() |
| elif torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
|
|
| if args.use_ema: |
| |
| ema_unet.restore(unet.parameters()) |
|
|
| accelerator.wait_for_everyone() |
| if accelerator.is_main_process: |
| unet = unwrap_model(unet) |
| if args.use_ema: |
| ema_unet.copy_to(unet.parameters()) |
|
|
| |
| vae = AutoencoderKL.from_pretrained( |
| vae_path, |
| subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, |
| revision=args.revision, |
| variant=args.variant, |
| torch_dtype=weight_dtype, |
| ) |
| pipeline = StableDiffusionXLPipeline.from_pretrained( |
| args.pretrained_model_name_or_path, |
| unet=unet, |
| vae=vae, |
| revision=args.revision, |
| variant=args.variant, |
| torch_dtype=weight_dtype, |
| ) |
| if args.prediction_type is not None: |
| scheduler_args = {"prediction_type": args.prediction_type} |
| pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) |
| pipeline.save_pretrained(args.output_dir) |
|
|
| |
| images = [] |
| if args.validation_prompt and args.num_validation_images > 0: |
| pipeline = pipeline.to(accelerator.device) |
| generator = ( |
| torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None |
| ) |
|
|
| with autocast_ctx: |
| images = [ |
| pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] |
| for _ in range(args.num_validation_images) |
| ] |
|
|
| for tracker in accelerator.trackers: |
| if tracker.name == "tensorboard": |
| np_images = np.stack([np.asarray(img) for img in images]) |
| tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") |
| if tracker.name == "wandb": |
| tracker.log( |
| { |
| "test": [ |
| wandb.Image(image, caption=f"{i}: {args.validation_prompt}") |
| for i, image in enumerate(images) |
| ] |
| } |
| ) |
|
|
| if args.push_to_hub: |
| save_model_card( |
| repo_id=repo_id, |
| images=images, |
| validation_prompt=args.validation_prompt, |
| base_model=args.pretrained_model_name_or_path, |
| dataset_name=args.dataset_name, |
| repo_folder=args.output_dir, |
| vae_path=args.pretrained_vae_model_name_or_path, |
| ) |
| upload_folder( |
| repo_id=repo_id, |
| folder_path=args.output_dir, |
| commit_message="End of training", |
| ignore_patterns=["step_*", "epoch_*"], |
| ) |
|
|
| accelerator.end_training() |
|
|
|
|
| if __name__ == "__main__": |
| args = parse_args() |
| main(args) |
|
|