|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import argparse |
|
|
import copy |
|
|
import itertools |
|
|
import logging |
|
|
import math |
|
|
import os |
|
|
import random |
|
|
import shutil |
|
|
import warnings |
|
|
from contextlib import nullcontext |
|
|
from pathlib import Path |
|
|
import pandas as pd |
|
|
from collections import defaultdict |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import torch.utils.checkpoint |
|
|
import transformers |
|
|
from accelerate import Accelerator, DistributedType |
|
|
from accelerate.logging import get_logger |
|
|
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed |
|
|
from huggingface_hub import create_repo, upload_folder |
|
|
from huggingface_hub.utils import insecure_hashlib |
|
|
from peft import LoraConfig, set_peft_model_state_dict |
|
|
from peft.utils import get_peft_model_state_dict |
|
|
from PIL import Image |
|
|
from PIL.ImageOps import exif_transpose |
|
|
from torch.utils.data import Dataset |
|
|
from torchvision import transforms |
|
|
from torchvision.transforms.functional import crop |
|
|
from tqdm.auto import tqdm |
|
|
from transformers import CLIPTokenizer, PretrainedConfig, T5TokenizerFast |
|
|
import torch.nn.functional as F |
|
|
|
|
|
import diffusers |
|
|
from diffusers import ( |
|
|
AutoencoderKL, |
|
|
FlowMatchEulerDiscreteScheduler, |
|
|
SD3Transformer2DModel, |
|
|
StableDiffusion3Pipeline, |
|
|
) |
|
|
from diffusers.optimization import get_scheduler |
|
|
from diffusers.training_utils import ( |
|
|
_set_state_dict_into_text_encoder, |
|
|
cast_training_params, |
|
|
compute_density_for_timestep_sampling, |
|
|
compute_loss_weighting_for_sd3, |
|
|
free_memory, |
|
|
) |
|
|
from diffusers.utils import ( |
|
|
check_min_version, |
|
|
convert_unet_state_dict_to_peft, |
|
|
is_wandb_available, |
|
|
) |
|
|
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card |
|
|
from diffusers.utils.torch_utils import is_compiled_module |
|
|
|
|
|
|
|
|
if is_wandb_available(): |
|
|
import wandb |
|
|
|
|
|
os.environ["WANDB_API_KEY"] = 'c3c7dc2e7a43cc9e0b4cc8e913d363077af04ab2' |
|
|
os.environ["WANDB_MODE"] = "offline" |
|
|
|
|
|
check_min_version("0.33.0.dev0") |
|
|
|
|
|
logger = get_logger(__name__) |
|
|
|
|
|
DATASET_NAME_MAPPING = { |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
def save_model_card( |
|
|
repo_id: str, |
|
|
images=None, |
|
|
base_model: str = None, |
|
|
train_text_encoder=False, |
|
|
instance_prompt=None, |
|
|
validation_prompts=None, |
|
|
repo_folder=None, |
|
|
): |
|
|
if "large" in base_model: |
|
|
model_variant = "SD3.5-Large" |
|
|
license_url = "https://huggingface.co/stabilityai/stable-diffusion-3.5-large/blob/main/LICENSE.md" |
|
|
variant_tags = ["sd3.5-large", "sd3.5", "sd3.5-diffusers"] |
|
|
else: |
|
|
model_variant = "SD3" |
|
|
license_url = "https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/LICENSE.md" |
|
|
variant_tags = ["sd3", "sd3-diffusers"] |
|
|
|
|
|
widget_dict = [] |
|
|
if images is not None: |
|
|
for i, image in enumerate(images): |
|
|
image.save(os.path.join(repo_folder, f"image_{i}.png")) |
|
|
widget_dict.append( |
|
|
{"text": validation_prompts if validation_prompts else " ", "output": {"url": f"image_{i}.png"}} |
|
|
) |
|
|
|
|
|
model_description = f""" |
|
|
# {model_variant} DreamBooth LoRA - {repo_id} |
|
|
|
|
|
<Gallery /> |
|
|
|
|
|
## Model description |
|
|
|
|
|
These are {repo_id} DreamBooth LoRA weights for {base_model}. |
|
|
|
|
|
The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [SD3 diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_sd3.md). |
|
|
|
|
|
Was LoRA for the text encoder enabled? {train_text_encoder}. |
|
|
|
|
|
## Trigger words |
|
|
|
|
|
You should use `{instance_prompt}` to trigger the image generation. |
|
|
|
|
|
## Download model |
|
|
|
|
|
[Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab. |
|
|
|
|
|
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) |
|
|
|
|
|
```py |
|
|
from diffusers import AutoPipelineForText2Image |
|
|
import torch |
|
|
pipeline = AutoPipelineForText2Image.from_pretrained({base_model}, torch_dtype=torch.float16).to('cuda') |
|
|
pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors') |
|
|
image = pipeline('{validation_prompts }').images[0] |
|
|
``` |
|
|
|
|
|
### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke |
|
|
|
|
|
- **LoRA**: download **[`diffusers_lora_weights.safetensors` here 💾](/{repo_id}/blob/main/diffusers_lora_weights.safetensors)**. |
|
|
- Rename it and place it on your `models/Lora` folder. |
|
|
- On AUTOMATIC1111, load the LoRA by adding `<lora:your_new_name:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/). |
|
|
|
|
|
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) |
|
|
|
|
|
## License |
|
|
|
|
|
Please adhere to the licensing terms as described [here]({license_url}). |
|
|
""" |
|
|
model_card = load_or_create_model_card( |
|
|
repo_id_or_path=repo_id, |
|
|
from_training=True, |
|
|
license="other", |
|
|
base_model=base_model, |
|
|
model_description=model_description, |
|
|
widget=widget_dict, |
|
|
) |
|
|
tags = [ |
|
|
"text-to-image", |
|
|
"diffusers-training", |
|
|
"diffusers", |
|
|
"lora", |
|
|
"template:sd-lora", |
|
|
] |
|
|
|
|
|
tags += variant_tags |
|
|
|
|
|
model_card = populate_model_card(model_card, tags=tags) |
|
|
model_card.save(os.path.join(repo_folder, "README.md")) |
|
|
|
|
|
|
|
|
def load_text_encoders(class_one, class_two, class_three): |
|
|
text_encoder_one = class_one.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant |
|
|
) |
|
|
text_encoder_two = class_two.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant |
|
|
) |
|
|
text_encoder_three = class_three.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="text_encoder_3", revision=args.revision, variant=args.variant |
|
|
) |
|
|
return text_encoder_one, text_encoder_two, text_encoder_three |
|
|
|
|
|
|
|
|
def log_validation( |
|
|
pipeline, |
|
|
args, |
|
|
accelerator, |
|
|
pipeline_args, |
|
|
global_step, |
|
|
torch_dtype, |
|
|
is_final_validation=False, |
|
|
): |
|
|
logger.info( |
|
|
f"Running validation... \n Generating {len(pipeline_args)} images with prompts:" |
|
|
f" {[args['prompt'] for args in pipeline_args]}." |
|
|
) |
|
|
pipeline = pipeline.to(accelerator.device) |
|
|
pipeline.set_progress_bar_config(disable=True) |
|
|
|
|
|
autocast_ctx = nullcontext() |
|
|
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None |
|
|
|
|
|
|
|
|
images = [] |
|
|
with autocast_ctx: |
|
|
for single_args in pipeline_args: |
|
|
image = pipeline(**single_args, generator=generator).images[0] |
|
|
images.append(image) |
|
|
|
|
|
|
|
|
for tracker in accelerator.trackers: |
|
|
phase_name = "test" if is_final_validation else "validation" |
|
|
if tracker.name == "tensorboard": |
|
|
np_images = np.stack([np.asarray(img) for img in images]) |
|
|
tracker.writer.add_images(phase_name, np_images, global_step, dataformats="NHWC") |
|
|
elif tracker.name == "wandb": |
|
|
tracker.log({ |
|
|
phase_name: [ |
|
|
wandb.Image(image, caption=f"{i}: {args['prompt']}") |
|
|
for i, (image, args) in enumerate(zip(images, pipeline_args)) |
|
|
] |
|
|
}) |
|
|
|
|
|
del pipeline |
|
|
free_memory() |
|
|
return images |
|
|
|
|
|
|
|
|
|
|
|
def import_model_class_from_model_name_or_path( |
|
|
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" |
|
|
): |
|
|
text_encoder_config = PretrainedConfig.from_pretrained( |
|
|
pretrained_model_name_or_path, subfolder=subfolder, revision=revision |
|
|
) |
|
|
model_class = text_encoder_config.architectures[0] |
|
|
if model_class == "CLIPTextModelWithProjection": |
|
|
from transformers import CLIPTextModelWithProjection |
|
|
|
|
|
return CLIPTextModelWithProjection |
|
|
elif model_class == "T5EncoderModel": |
|
|
from transformers import T5EncoderModel |
|
|
|
|
|
return T5EncoderModel |
|
|
else: |
|
|
raise ValueError(f"{model_class} is not supported.") |
|
|
|
|
|
|
|
|
|
|
|
def parse_args(input_args=None): |
|
|
parser = argparse.ArgumentParser(description="Simple example of a training script.") |
|
|
parser.add_argument( |
|
|
"--pretrained_model_name_or_path", |
|
|
type=str, |
|
|
default=None, |
|
|
required=True, |
|
|
help="Path to pretrained model or model identifier from huggingface.co/models.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--revision", |
|
|
type=str, |
|
|
default=None, |
|
|
required=False, |
|
|
help="Revision of pretrained model identifier from huggingface.co/models.", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--variant", |
|
|
type=str, |
|
|
default=None, |
|
|
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--dataset_name", |
|
|
type=str, |
|
|
default=None, |
|
|
help=( |
|
|
"The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private," |
|
|
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," |
|
|
" or to a folder containing files that 🤗 Datasets can understand." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--dataset_config_name", |
|
|
type=str, |
|
|
default=None, |
|
|
help="The config of the Dataset, leave as None if there's only one config.", |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
"--cache_dir", |
|
|
type=str, |
|
|
default=None, |
|
|
help="The directory where the downloaded models and datasets will be stored.", |
|
|
) |
|
|
parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--image_column", |
|
|
type=str, |
|
|
default="image", |
|
|
help="The column of the dataset containing the target image. By " |
|
|
"default, the standard Image Dataset maps out 'file_name' " |
|
|
"to 'image'.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--caption_column", |
|
|
type=str, |
|
|
default=None, |
|
|
help="The column of the dataset containing the instance prompt for each image", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--max_sequence_length", |
|
|
type=int, |
|
|
default=512, |
|
|
help="Maximum sequence length to use with with the T5 text encoder", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--validation_prompts", |
|
|
type=str, |
|
|
nargs="+", |
|
|
default=None, |
|
|
help="A prompt that is used during validation to verify that the model is learning. The validation is happening at each `--checkpointing_steps`." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--num_validation_images", |
|
|
type=int, |
|
|
default=4, |
|
|
help="Number of images that should be generated during validation with `validation_prompts`.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--validation_steps", |
|
|
type=int, |
|
|
default=1, |
|
|
help=( |
|
|
"Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" |
|
|
" `args.validation_prompts` multiple times: `args.num_validation_images`." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--rank", |
|
|
type=int, |
|
|
default=768, |
|
|
help=("The dimension of the LoRA update matrices."), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--beta_dpo", |
|
|
type=int, |
|
|
default=2500, |
|
|
help="DPO KL Divergence penalty.", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--output_dir", |
|
|
type=str, |
|
|
default="sd3-dreambooth", |
|
|
help="The output directory where the model predictions and checkpoints will be written.", |
|
|
) |
|
|
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
|
|
parser.add_argument( |
|
|
"--resolution", |
|
|
type=int, |
|
|
default=1024, |
|
|
help=( |
|
|
"The resolution for input images, all the images in the train/validation dataset will be resized to this" |
|
|
" resolution" |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--center_crop", |
|
|
default=True, |
|
|
action="store_true", |
|
|
help=( |
|
|
"Whether to center crop the input images to the resolution. If not set, the images will be randomly" |
|
|
" cropped. The images will be resized to the resolution first before cropping." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--random_flip", |
|
|
action="store_true", |
|
|
help="whether to randomly flip images horizontally", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--train_text_encoder", |
|
|
action="store_true", |
|
|
help="Whether to train the text encoder (clip text encoders only). If set, the text encoder should be float32 precision.", |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." |
|
|
) |
|
|
|
|
|
parser.add_argument("--num_train_epochs", type=int, default=100) |
|
|
parser.add_argument( |
|
|
"--max_train_steps", |
|
|
type=int, |
|
|
default=None, |
|
|
help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--checkpointing_steps", |
|
|
type=int, |
|
|
default=500, |
|
|
help=( |
|
|
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" |
|
|
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" |
|
|
" training using `--resume_from_checkpoint`." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--checkpoints_total_limit", |
|
|
type=int, |
|
|
default=None, |
|
|
help=("Max number of checkpoints to store."), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--resume_from_checkpoint", |
|
|
type=str, |
|
|
default=None, |
|
|
help=( |
|
|
"Whether training should be resumed from a previous checkpoint. Use a path saved by" |
|
|
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--gradient_accumulation_steps", |
|
|
type=int, |
|
|
default=1, |
|
|
help="Number of updates steps to accumulate before performing a backward/update pass.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--gradient_checkpointing", |
|
|
action="store_true", |
|
|
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--learning_rate", |
|
|
type=float, |
|
|
default=1e-4, |
|
|
help="Initial learning rate (after the potential warmup period) to use.", |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
"--text_encoder_lr", |
|
|
type=float, |
|
|
default=5e-6, |
|
|
help="Text encoder learning rate to use.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--scale_lr", |
|
|
action="store_true", |
|
|
default=False, |
|
|
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--lr_scheduler", |
|
|
type=str, |
|
|
default="constant", |
|
|
help=( |
|
|
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
|
|
' "constant", "constant_with_warmup"]' |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--lr_num_cycles", |
|
|
type=int, |
|
|
default=1, |
|
|
help="Number of hard resets of the lr in cosine_with_restarts scheduler.", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--dataloader_num_workers", |
|
|
type=int, |
|
|
default=0, |
|
|
help=( |
|
|
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--weighting_scheme", |
|
|
type=str, |
|
|
default="logit_normal", |
|
|
choices=["sigma_sqrt", "logit_normal", "mode", "cosmap"], |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--mode_scale", |
|
|
type=float, |
|
|
default=1.29, |
|
|
help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--precondition_outputs", |
|
|
type=int, |
|
|
default=1, |
|
|
help="Flag indicating if we are preconditioning the model outputs or not as done in EDM. This affects how " |
|
|
"model `target` is calculated.", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--optimizer", |
|
|
type=str, |
|
|
default="AdamW", |
|
|
help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
"--use_8bit_adam", |
|
|
action="store_true", |
|
|
help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers.") |
|
|
|
|
|
|
|
|
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers.") |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--prodigy_beta3", |
|
|
type=float, |
|
|
default=None, |
|
|
help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " |
|
|
"uses the value of square root of beta2. Ignored if optimizer is adamW", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") |
|
|
|
|
|
|
|
|
parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params") |
|
|
|
|
|
|
|
|
parser.add_argument("--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder") |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--lora_layers", |
|
|
type=str, |
|
|
default=None, |
|
|
help=( |
|
|
"The transformer block layers to apply LoRA training on. Please specify the layers in a comma seperated string." |
|
|
"For examples refer to https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_SD3.md" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--lora_blocks", |
|
|
type=str, |
|
|
default=None, |
|
|
help=( |
|
|
"The transformer blocks to apply LoRA training on. Please specify the block numbers in a comma seperated manner." |
|
|
'E.g. - "--lora_blocks 12,30" will result in lora training of transformer blocks 12 and 30. For more examples refer to https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_SD3.md' |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--adam_epsilon", |
|
|
type=float, |
|
|
default=1e-08, |
|
|
help="Epsilon value for the Adam optimizer and Prodigy optimizers.", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--prodigy_use_bias_correction", |
|
|
type=bool, |
|
|
default=True, |
|
|
help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--prodigy_safeguard_warmup", |
|
|
type=bool, |
|
|
default=True, |
|
|
help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " |
|
|
"Ignored if optimizer is adamW", |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
|
|
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
|
|
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
|
|
parser.add_argument( |
|
|
"--hub_model_id", |
|
|
type=str, |
|
|
default=None, |
|
|
help="The name of the repository to keep in sync with the local `output_dir`.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--logging_dir", |
|
|
type=str, |
|
|
default="logs", |
|
|
help=( |
|
|
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
|
|
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--allow_tf32", |
|
|
action="store_true", |
|
|
help=( |
|
|
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
|
|
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--cache_latents", |
|
|
action="store_true", |
|
|
default=False, |
|
|
help="Cache the VAE latents", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--report_to", |
|
|
type=str, |
|
|
default="tensorboard", |
|
|
help=( |
|
|
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
|
|
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--mixed_precision", |
|
|
type=str, |
|
|
default=None, |
|
|
choices=["no", "fp16", "bf16"], |
|
|
help=( |
|
|
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
|
|
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
|
|
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
|
|
), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--upcast_before_saving", |
|
|
action="store_true", |
|
|
default=False, |
|
|
help=( |
|
|
"Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " |
|
|
"Defaults to precision dtype used for training to save memory" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--apply_pre_loss", action="store_true", help="Whether or not to apply pretrained loss." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--apply_reward_loss", action="store_true", help="Whether or not to apply reward loss." |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument( |
|
|
"--save_only_one_ckpt", |
|
|
action="store_true", |
|
|
help="If given, then it only stores one checkpoint through the whole training, the one with the best training loss." |
|
|
"This is useful to manage the storage." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--image_base_dir", |
|
|
type = str, |
|
|
default="", |
|
|
help="The base directory where stored all the images." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--num_images", |
|
|
type=int, |
|
|
default=0, |
|
|
help = "The number of images from image base dictionary" |
|
|
|
|
|
) |
|
|
parser.add_argument( |
|
|
"--train_data_file", |
|
|
type = str, |
|
|
default=None, |
|
|
help="The train data directory where stored all the train data,document is .json or .csv" |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
|
|
|
|
|
|
|
|
if input_args is not None: |
|
|
args = parser.parse_args(input_args) |
|
|
else: |
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.dataset_name is None and args.train_data_file is None: |
|
|
raise ValueError("Specify either `--dataset_name` or `--train_data_file`") |
|
|
|
|
|
if args.dataset_name is not None and args.train_data_file is not None: |
|
|
raise ValueError("Specify only one of `--dataset_name` or `--train_data_file`") |
|
|
|
|
|
|
|
|
env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
|
|
|
|
|
|
|
|
if env_local_rank != -1 and env_local_rank != args.local_rank: |
|
|
args.local_rank = env_local_rank |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return args |
|
|
|
|
|
class ImageDataset(Dataset): |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
image_base_dir, |
|
|
train_data_file, |
|
|
size=1024, |
|
|
repeats=1, |
|
|
center_crop=True, |
|
|
): |
|
|
self.size = size |
|
|
self.center_crop = center_crop |
|
|
|
|
|
|
|
|
|
|
|
if args.dataset_name is not None: |
|
|
try: |
|
|
from datasets import load_dataset |
|
|
except ImportError: |
|
|
raise ImportError( |
|
|
"You are trying to load your data using the datasets library. If you wish to train using custom " |
|
|
"captions please install the datasets library: `pip install datasets`. If you wish to load a " |
|
|
"local folder containing images only, specify --instance_data_dir instead." |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
dataset = load_dataset( |
|
|
args.dataset_name, |
|
|
args.dataset_config_name, |
|
|
cache_dir=args.cache_dir, |
|
|
) |
|
|
|
|
|
|
|
|
column_names = dataset["train"].column_names |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if args.image_column is None: |
|
|
image_column = column_names[0] |
|
|
logger.info(f"image column defaulting to {image_column}") |
|
|
else: |
|
|
image_column = args.image_column |
|
|
if image_column not in column_names: |
|
|
raise ValueError( |
|
|
f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
|
|
) |
|
|
images = dataset["train"][image_column] |
|
|
|
|
|
if args.caption_column is None: |
|
|
caption_column = column_names[1] |
|
|
logger.info(f"caption column defaulting to {caption_column}") |
|
|
prompts = dataset["train"][caption_column] |
|
|
|
|
|
else: |
|
|
if args.caption_column not in column_names: |
|
|
raise ValueError( |
|
|
f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
|
|
) |
|
|
prompts = dataset["train"][args.caption_column] |
|
|
|
|
|
else: |
|
|
df = pd.read_excel(train_data_file) |
|
|
column_names = df.columns.tolist() |
|
|
|
|
|
image_column = column_names[0] |
|
|
caption_column = column_names[1] |
|
|
mos1_column = column_names[2] |
|
|
mos1_pred_column = column_names[3] |
|
|
mos2_column = column_names[4] |
|
|
mos2_pred_column = column_names[5] |
|
|
|
|
|
prompts = df[caption_column] |
|
|
mos1_pred = df[mos1_pred_column] |
|
|
mos2_pred = df[mos2_pred_column] |
|
|
|
|
|
self.image_base_dir = Path(image_base_dir) |
|
|
self.train_data_file = Path(train_data_file) |
|
|
|
|
|
if not self.image_base_dir.exists(): |
|
|
raise ValueError("Image base dir doesn't exists.") |
|
|
|
|
|
images_path = df[image_column] |
|
|
|
|
|
|
|
|
images_file = [image_path.split('_', 1)[-1] for image_path in images_path] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
p=0.5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
images_file_final = [] |
|
|
prompts_final = [] |
|
|
reward_final = [] |
|
|
for i,image_file in enumerate(images_file): |
|
|
score = p*mos1_pred[i] + (1-p)*mos2_pred[i] |
|
|
|
|
|
images_file_final.append(image_file) |
|
|
prompts_final.append(prompts[i]) |
|
|
reward_final.append(score) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_prompt_index(image_path): |
|
|
return image_path.split("/")[-1].split(".")[0] |
|
|
|
|
|
|
|
|
prompt_groups = defaultdict(list) |
|
|
for img_path, reward, prompt in zip(images_file, reward_final, prompts_final): |
|
|
idx = extract_prompt_index(img_path) |
|
|
prompt_groups[idx].append((img_path, reward, prompt)) |
|
|
|
|
|
|
|
|
|
|
|
dpo_data = { |
|
|
"prompt": [], |
|
|
"win": [], |
|
|
"loss": [], |
|
|
} |
|
|
|
|
|
for idx, items in prompt_groups.items(): |
|
|
if len(items) < 2: |
|
|
continue |
|
|
|
|
|
|
|
|
random.shuffle(items) |
|
|
|
|
|
|
|
|
while len(items) >= 2: |
|
|
sample1 = items.pop() |
|
|
sample2 = items.pop() |
|
|
|
|
|
img1, reward1, prompt1 = sample1 |
|
|
img2, reward2, prompt2 = sample2 |
|
|
|
|
|
|
|
|
if reward1 == reward2: |
|
|
continue |
|
|
|
|
|
if reward1 > reward2: |
|
|
winner, loser, prompt = img1, img2, prompt1.strip() |
|
|
else: |
|
|
winner, loser, prompt = img2, img1, prompt2.strip() |
|
|
|
|
|
dpo_data["prompt"].append(prompt) |
|
|
dpo_data["win"].append(winner) |
|
|
dpo_data["loss"].append(loser) |
|
|
|
|
|
print(f"构造完成,共获得 {len(dpo_data['loss'])} 条 DPO 微调数据。") |
|
|
|
|
|
|
|
|
df = pd.DataFrame(dpo_data) |
|
|
|
|
|
|
|
|
csv_save_path = "dpo_dataset.csv" |
|
|
df.to_csv(csv_save_path, index=False, encoding='utf-8') |
|
|
|
|
|
print(f"DPO 微调数据已保存为 CSV 文件:{csv_save_path}") |
|
|
|
|
|
images_win = dpo_data["win"] |
|
|
images_loss = dpo_data["loss"] |
|
|
prompts = dpo_data["prompt"] |
|
|
|
|
|
|
|
|
|
|
|
self.prompts = [] |
|
|
for caption in prompts: |
|
|
self.prompts.extend(itertools.repeat(caption, repeats)) |
|
|
|
|
|
self.images_win = [] |
|
|
for img in images_win: |
|
|
self.images_win.extend(itertools.repeat(img, repeats)) |
|
|
|
|
|
self.images_loss = [] |
|
|
for img in images_loss: |
|
|
self.images_loss.extend(itertools.repeat(img, repeats)) |
|
|
|
|
|
train_resize = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR) |
|
|
train_crop = transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size) |
|
|
train_flip = transforms.RandomHorizontalFlip(p=1.0) |
|
|
|
|
|
|
|
|
img_transforms = [] |
|
|
img_transforms.append(train_resize) |
|
|
|
|
|
if args.random_flip and random.random() < 0.5: |
|
|
img_transforms.append(train_flip) |
|
|
if args.center_crop: |
|
|
img_transforms.append(train_crop) |
|
|
self.image_transforms = transforms.Compose( |
|
|
[*img_transforms, transforms.ToTensor(), transforms.Normalize([0.5], [0.5])] |
|
|
) |
|
|
|
|
|
self.num_images = len(self.images_win) |
|
|
self._length = self.num_images |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __len__(self): |
|
|
return self._length |
|
|
|
|
|
|
|
|
def __getitem__(self, index): |
|
|
example = {} |
|
|
all_images_input = [] |
|
|
images = [] |
|
|
image_win = self.images_win[index % self.num_images] |
|
|
image_loss = self.images_loss[index % self.num_images] |
|
|
images.append(image_win) |
|
|
images.append(image_loss) |
|
|
|
|
|
for image in images: |
|
|
image_input = Image.open(os.path.join(args.image_base_dir, image)) |
|
|
if not image_input.mode == "RGB": |
|
|
image_input = image_input.convert("RGB") |
|
|
image_input = exif_transpose(image_input) |
|
|
image_input_tr = self.image_transforms(image_input) |
|
|
all_images_input.append(image_input_tr) |
|
|
combined_im = torch.cat(all_images_input, dim=0) |
|
|
|
|
|
example["images_input"] = combined_im |
|
|
|
|
|
caption = self.prompts[index % self.num_images] |
|
|
example["prompts_input"] = caption |
|
|
|
|
|
return example |
|
|
|
|
|
def collate_fn(examples): |
|
|
pixel_values = [example["images_input"] for example in examples] |
|
|
prompts = [example["prompts_input"] for example in examples] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pixel_values = torch.stack(pixel_values) |
|
|
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() |
|
|
|
|
|
batch = {"pixel_values": pixel_values, "prompts": prompts} |
|
|
|
|
|
return batch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def tokenize_prompt(tokenizer, prompt): |
|
|
text_inputs = tokenizer( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=tokenizer.model_max_length, |
|
|
truncation=True, |
|
|
return_tensors="pt", |
|
|
) |
|
|
text_input_ids = text_inputs.input_ids |
|
|
return text_input_ids |
|
|
|
|
|
|
|
|
def _encode_prompt_with_t5( |
|
|
text_encoder, |
|
|
tokenizer, |
|
|
max_sequence_length, |
|
|
prompt=None, |
|
|
num_images_per_prompt=1, |
|
|
device=None, |
|
|
text_input_ids=None, |
|
|
): |
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
batch_size = len(prompt) |
|
|
|
|
|
if tokenizer is not None: |
|
|
text_inputs = tokenizer( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=max_sequence_length, |
|
|
truncation=True, |
|
|
add_special_tokens=True, |
|
|
return_tensors="pt", |
|
|
) |
|
|
text_input_ids = text_inputs.input_ids |
|
|
else: |
|
|
if text_input_ids is None: |
|
|
raise ValueError("text_input_ids must be provided when the tokenizer is not specified") |
|
|
|
|
|
prompt_embeds = text_encoder(text_input_ids.to(device))[0] |
|
|
|
|
|
dtype = text_encoder.dtype |
|
|
|
|
|
|
|
|
|
|
|
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
|
|
|
|
|
_, seq_len, _ = prompt_embeds.shape |
|
|
|
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
|
|
|
return prompt_embeds |
|
|
|
|
|
|
|
|
def _encode_prompt_with_clip( |
|
|
text_encoder, |
|
|
tokenizer, |
|
|
prompt: str, |
|
|
device=None, |
|
|
text_input_ids=None, |
|
|
num_images_per_prompt: int = 1, |
|
|
): |
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
batch_size = len(prompt) |
|
|
|
|
|
if tokenizer is not None: |
|
|
text_inputs = tokenizer( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=77, |
|
|
truncation=True, |
|
|
return_tensors="pt", |
|
|
) |
|
|
|
|
|
text_input_ids = text_inputs.input_ids |
|
|
else: |
|
|
if text_input_ids is None: |
|
|
raise ValueError("text_input_ids must be provided when the tokenizer is not specified") |
|
|
|
|
|
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) |
|
|
|
|
|
pooled_prompt_embeds = prompt_embeds[0] |
|
|
prompt_embeds = prompt_embeds.hidden_states[-2] |
|
|
|
|
|
dtype = text_encoder.dtype |
|
|
|
|
|
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
|
|
|
|
|
_, seq_len, _ = prompt_embeds.shape |
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
|
|
|
return prompt_embeds, pooled_prompt_embeds |
|
|
|
|
|
|
|
|
def encode_prompt( |
|
|
text_encoders, |
|
|
tokenizers, |
|
|
prompt: str, |
|
|
max_sequence_length, |
|
|
device=None, |
|
|
num_images_per_prompt: int = 1, |
|
|
text_input_ids_list=None, |
|
|
): |
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
|
|
|
clip_tokenizers = tokenizers[:2] |
|
|
clip_text_encoders = text_encoders[:2] |
|
|
|
|
|
clip_prompt_embeds_list = [] |
|
|
clip_pooled_prompt_embeds_list = [] |
|
|
for i, (tokenizer, text_encoder) in enumerate(zip(clip_tokenizers, clip_text_encoders)): |
|
|
prompt_embeds, pooled_prompt_embeds = _encode_prompt_with_clip( |
|
|
text_encoder=text_encoder, |
|
|
tokenizer=tokenizer, |
|
|
prompt=prompt, |
|
|
device=device if device is not None else text_encoder.device, |
|
|
num_images_per_prompt=num_images_per_prompt, |
|
|
text_input_ids=text_input_ids_list[i] if text_input_ids_list else None, |
|
|
) |
|
|
clip_prompt_embeds_list.append(prompt_embeds) |
|
|
clip_pooled_prompt_embeds_list.append(pooled_prompt_embeds) |
|
|
|
|
|
clip_prompt_embeds = torch.cat(clip_prompt_embeds_list, dim=-1) |
|
|
pooled_prompt_embeds = torch.cat(clip_pooled_prompt_embeds_list, dim=-1) |
|
|
|
|
|
t5_prompt_embed = _encode_prompt_with_t5( |
|
|
text_encoders[-1], |
|
|
tokenizers[-1], |
|
|
max_sequence_length, |
|
|
prompt=prompt, |
|
|
num_images_per_prompt=num_images_per_prompt, |
|
|
text_input_ids=text_input_ids_list[-1] if text_input_ids_list else None, |
|
|
device=device if device is not None else text_encoders[-1].device, |
|
|
) |
|
|
|
|
|
clip_prompt_embeds = torch.nn.functional.pad( |
|
|
clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) |
|
|
) |
|
|
prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) |
|
|
|
|
|
return prompt_embeds, pooled_prompt_embeds |
|
|
|
|
|
|
|
|
def main(args): |
|
|
if args.report_to == "wandb" and args.hub_token is not None: |
|
|
raise ValueError( |
|
|
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
|
|
" Please use `huggingface-cli login` to authenticate with the Hub." |
|
|
) |
|
|
|
|
|
if torch.backends.mps.is_available() and args.mixed_precision == "bf16": |
|
|
|
|
|
raise ValueError( |
|
|
"Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." |
|
|
) |
|
|
|
|
|
logging_dir = Path(args.output_dir, args.logging_dir) |
|
|
|
|
|
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
|
|
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) |
|
|
accelerator = Accelerator( |
|
|
gradient_accumulation_steps=args.gradient_accumulation_steps, |
|
|
mixed_precision=args.mixed_precision, |
|
|
log_with=args.report_to, |
|
|
project_config=accelerator_project_config, |
|
|
kwargs_handlers=[kwargs], |
|
|
) |
|
|
|
|
|
|
|
|
if torch.backends.mps.is_available(): |
|
|
accelerator.native_amp = False |
|
|
|
|
|
if args.report_to == "wandb": |
|
|
if not is_wandb_available(): |
|
|
raise ImportError("Make sure to install wandb if you want to use it for logging during training.") |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
|
|
datefmt="%m/%d/%Y %H:%M:%S", |
|
|
level=logging.INFO, |
|
|
) |
|
|
logger.info(accelerator.state, main_process_only=False) |
|
|
if accelerator.is_local_main_process: |
|
|
transformers.utils.logging.set_verbosity_warning() |
|
|
diffusers.utils.logging.set_verbosity_info() |
|
|
else: |
|
|
transformers.utils.logging.set_verbosity_error() |
|
|
diffusers.utils.logging.set_verbosity_error() |
|
|
|
|
|
|
|
|
if args.seed is not None: |
|
|
set_seed(args.seed) |
|
|
|
|
|
|
|
|
|
|
|
if accelerator.is_main_process: |
|
|
if args.output_dir is not None: |
|
|
os.makedirs(args.output_dir, exist_ok=True) |
|
|
|
|
|
if args.push_to_hub: |
|
|
repo_id = create_repo( |
|
|
repo_id=args.hub_model_id or Path(args.output_dir).name, |
|
|
exist_ok=True, |
|
|
).repo_id |
|
|
|
|
|
|
|
|
tokenizer_one = CLIPTokenizer.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="tokenizer", |
|
|
revision=args.revision, |
|
|
) |
|
|
tokenizer_two = CLIPTokenizer.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="tokenizer_2", |
|
|
revision=args.revision, |
|
|
) |
|
|
tokenizer_three = T5TokenizerFast.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="tokenizer_3", |
|
|
revision=args.revision, |
|
|
) |
|
|
|
|
|
|
|
|
text_encoder_cls_one = import_model_class_from_model_name_or_path( |
|
|
args.pretrained_model_name_or_path, args.revision |
|
|
) |
|
|
text_encoder_cls_two = import_model_class_from_model_name_or_path( |
|
|
args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" |
|
|
) |
|
|
text_encoder_cls_three = import_model_class_from_model_name_or_path( |
|
|
args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_3" |
|
|
) |
|
|
|
|
|
|
|
|
noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="scheduler" |
|
|
) |
|
|
noise_scheduler_copy = copy.deepcopy(noise_scheduler) |
|
|
text_encoder_one, text_encoder_two, text_encoder_three = load_text_encoders( |
|
|
text_encoder_cls_one, text_encoder_cls_two, text_encoder_cls_three |
|
|
) |
|
|
vae = AutoencoderKL.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="vae", |
|
|
revision=args.revision, |
|
|
variant=args.variant, |
|
|
) |
|
|
transformer = SD3Transformer2DModel.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant |
|
|
) |
|
|
|
|
|
transformer.requires_grad_(False) |
|
|
vae.requires_grad_(False) |
|
|
text_encoder_one.requires_grad_(False) |
|
|
text_encoder_two.requires_grad_(False) |
|
|
text_encoder_three.requires_grad_(False) |
|
|
|
|
|
|
|
|
|
|
|
weight_dtype = torch.float32 |
|
|
if accelerator.mixed_precision == "fp16": |
|
|
weight_dtype = torch.float16 |
|
|
elif accelerator.mixed_precision == "bf16": |
|
|
weight_dtype = torch.bfloat16 |
|
|
|
|
|
if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: |
|
|
|
|
|
raise ValueError( |
|
|
"Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." |
|
|
) |
|
|
|
|
|
vae.to(accelerator.device, dtype=torch.float32) |
|
|
transformer.to(accelerator.device, dtype=weight_dtype) |
|
|
text_encoder_one.to(accelerator.device, dtype=weight_dtype) |
|
|
text_encoder_two.to(accelerator.device, dtype=weight_dtype) |
|
|
text_encoder_three.to(accelerator.device, dtype=weight_dtype) |
|
|
|
|
|
if args.gradient_checkpointing: |
|
|
transformer.enable_gradient_checkpointing() |
|
|
if args.train_text_encoder: |
|
|
text_encoder_one.gradient_checkpointing_enable() |
|
|
text_encoder_two.gradient_checkpointing_enable() |
|
|
if args.lora_layers is not None: |
|
|
target_modules = [layer.strip() for layer in args.lora_layers.split(",")] |
|
|
else: |
|
|
target_modules = [ |
|
|
"attn.add_k_proj", |
|
|
"attn.add_q_proj", |
|
|
"attn.add_v_proj", |
|
|
"attn.to_add_out", |
|
|
"attn.to_k", |
|
|
"attn.to_out.0", |
|
|
"attn.to_q", |
|
|
"attn.to_v", |
|
|
] |
|
|
if args.lora_blocks is not None: |
|
|
target_blocks = [int(block.strip()) for block in args.lora_blocks.split(",")] |
|
|
target_modules = [ |
|
|
f"transformer_blocks.{block}.{module}" for block in target_blocks for module in target_modules |
|
|
] |
|
|
|
|
|
|
|
|
transformer_lora_config = LoraConfig( |
|
|
r=args.rank, |
|
|
lora_alpha=args.rank, |
|
|
init_lora_weights="gaussian", |
|
|
target_modules=target_modules, |
|
|
) |
|
|
transformer.add_adapter(transformer_lora_config) |
|
|
|
|
|
if args.train_text_encoder: |
|
|
text_lora_config = LoraConfig( |
|
|
r=args.rank, |
|
|
lora_alpha=args.rank, |
|
|
init_lora_weights="gaussian", |
|
|
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], |
|
|
) |
|
|
text_encoder_one.add_adapter(text_lora_config) |
|
|
text_encoder_two.add_adapter(text_lora_config) |
|
|
|
|
|
def unwrap_model(model): |
|
|
model = accelerator.unwrap_model(model) |
|
|
model = model._orig_mod if is_compiled_module(model) else model |
|
|
return model |
|
|
|
|
|
|
|
|
def save_model_hook(models, weights, output_dir): |
|
|
if accelerator.is_main_process: |
|
|
transformer_lora_layers_to_save = None |
|
|
text_encoder_one_lora_layers_to_save = None |
|
|
text_encoder_two_lora_layers_to_save = None |
|
|
|
|
|
for model in models: |
|
|
if isinstance(unwrap_model(model), type(unwrap_model(transformer))): |
|
|
model = unwrap_model(model) |
|
|
if args.upcast_before_saving: |
|
|
model = model.to(torch.float32) |
|
|
transformer_lora_layers_to_save = get_peft_model_state_dict(model) |
|
|
elif args.train_text_encoder and isinstance( |
|
|
unwrap_model(model), type(unwrap_model(text_encoder_one)) |
|
|
): |
|
|
|
|
|
model = unwrap_model(model) |
|
|
hidden_size = model.config.hidden_size |
|
|
if hidden_size == 768: |
|
|
text_encoder_one_lora_layers_to_save = get_peft_model_state_dict(model) |
|
|
elif hidden_size == 1280: |
|
|
text_encoder_two_lora_layers_to_save = get_peft_model_state_dict(model) |
|
|
else: |
|
|
raise ValueError(f"unexpected save model: {model.__class__}") |
|
|
|
|
|
|
|
|
if weights: |
|
|
weights.pop() |
|
|
|
|
|
StableDiffusion3Pipeline.save_lora_weights( |
|
|
output_dir, |
|
|
transformer_lora_layers=transformer_lora_layers_to_save, |
|
|
text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, |
|
|
text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save, |
|
|
) |
|
|
|
|
|
def load_model_hook(models, input_dir): |
|
|
transformer_ = None |
|
|
text_encoder_one_ = None |
|
|
text_encoder_two_ = None |
|
|
|
|
|
if not accelerator.distributed_type == DistributedType.DEEPSPEED: |
|
|
print("不是DistributedType.DEEPSPEED!!!!!!!!!!!!") |
|
|
while len(models) > 0: |
|
|
model = models.pop() |
|
|
|
|
|
if isinstance(unwrap_model(model), type(unwrap_model(transformer))): |
|
|
transformer_ = unwrap_model(model) |
|
|
elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_one))): |
|
|
text_encoder_one_ = unwrap_model(model) |
|
|
elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_two))): |
|
|
text_encoder_two_ = unwrap_model(model) |
|
|
else: |
|
|
raise ValueError(f"unexpected save model: {model.__class__}") |
|
|
print([type(unwrap_model(m)) for m in models]) |
|
|
|
|
|
|
|
|
else: |
|
|
transformer_ = SD3Transformer2DModel.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="transformer" |
|
|
) |
|
|
transformer_.add_adapter(transformer_lora_config) |
|
|
if args.train_text_encoder: |
|
|
text_encoder_one_ = text_encoder_cls_one.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="text_encoder" |
|
|
) |
|
|
text_encoder_two_ = text_encoder_cls_two.from_pretrained( |
|
|
args.pretrained_model_name_or_path, subfolder="text_encoder_2" |
|
|
) |
|
|
|
|
|
lora_state_dict = StableDiffusion3Pipeline.lora_state_dict(input_dir) |
|
|
|
|
|
transformer_state_dict = { |
|
|
f'{k.replace("transformer.", "")}': v for k, v in lora_state_dict.items() if k.startswith("transformer.") |
|
|
} |
|
|
transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict) |
|
|
incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default") |
|
|
if incompatible_keys is not None: |
|
|
|
|
|
unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) |
|
|
if unexpected_keys: |
|
|
logger.warning( |
|
|
f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " |
|
|
f" {unexpected_keys}. " |
|
|
) |
|
|
if args.train_text_encoder: |
|
|
|
|
|
_set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_) |
|
|
|
|
|
_set_state_dict_into_text_encoder( |
|
|
lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_two_ |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if args.mixed_precision == "fp16": |
|
|
models = [transformer_] |
|
|
if args.train_text_encoder: |
|
|
models.extend([text_encoder_one_, text_encoder_two_]) |
|
|
|
|
|
cast_training_params(models) |
|
|
|
|
|
accelerator.register_save_state_pre_hook(save_model_hook) |
|
|
accelerator.register_load_state_pre_hook(load_model_hook) |
|
|
|
|
|
|
|
|
|
|
|
if args.allow_tf32 and torch.cuda.is_available(): |
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
|
|
|
if args.scale_lr: |
|
|
args.learning_rate = ( |
|
|
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
|
|
) |
|
|
|
|
|
|
|
|
if args.mixed_precision == "fp16": |
|
|
models = [transformer] |
|
|
if args.train_text_encoder: |
|
|
models.extend([text_encoder_one, text_encoder_two]) |
|
|
|
|
|
cast_training_params(models, dtype=torch.float32) |
|
|
|
|
|
transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters())) |
|
|
if args.train_text_encoder: |
|
|
text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) |
|
|
text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters())) |
|
|
|
|
|
|
|
|
transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate} |
|
|
if args.train_text_encoder: |
|
|
|
|
|
text_lora_parameters_one_with_lr = { |
|
|
"params": text_lora_parameters_one, |
|
|
"weight_decay": args.adam_weight_decay_text_encoder, |
|
|
"lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate, |
|
|
} |
|
|
text_lora_parameters_two_with_lr = { |
|
|
"params": text_lora_parameters_two, |
|
|
"weight_decay": args.adam_weight_decay_text_encoder, |
|
|
"lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate, |
|
|
} |
|
|
params_to_optimize = [ |
|
|
transformer_parameters_with_lr, |
|
|
text_lora_parameters_one_with_lr, |
|
|
text_lora_parameters_two_with_lr, |
|
|
] |
|
|
else: |
|
|
params_to_optimize = [transformer_parameters_with_lr] |
|
|
|
|
|
|
|
|
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): |
|
|
logger.warning( |
|
|
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." |
|
|
"Defaulting to adamW" |
|
|
) |
|
|
args.optimizer = "adamw" |
|
|
|
|
|
if args.use_8bit_adam and not args.optimizer.lower() == "adamw": |
|
|
logger.warning( |
|
|
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " |
|
|
f"set to {args.optimizer.lower()}" |
|
|
) |
|
|
|
|
|
if args.optimizer.lower() == "adamw": |
|
|
if args.use_8bit_adam: |
|
|
try: |
|
|
import bitsandbytes as bnb |
|
|
except ImportError: |
|
|
raise ImportError( |
|
|
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." |
|
|
) |
|
|
|
|
|
optimizer_class = bnb.optim.AdamW8bit |
|
|
else: |
|
|
optimizer_class = torch.optim.AdamW |
|
|
|
|
|
optimizer = optimizer_class( |
|
|
params_to_optimize, |
|
|
betas=(args.adam_beta1, args.adam_beta2), |
|
|
weight_decay=args.adam_weight_decay, |
|
|
eps=args.adam_epsilon, |
|
|
) |
|
|
|
|
|
if args.optimizer.lower() == "prodigy": |
|
|
try: |
|
|
import prodigyopt |
|
|
except ImportError: |
|
|
raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`") |
|
|
|
|
|
optimizer_class = prodigyopt.Prodigy |
|
|
|
|
|
if args.learning_rate <= 0.1: |
|
|
logger.warning( |
|
|
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" |
|
|
) |
|
|
if args.train_text_encoder and args.text_encoder_lr: |
|
|
logger.warning( |
|
|
f"Learning rates were provided both for the transformer and the text encoder- e.g. text_encoder_lr:" |
|
|
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " |
|
|
f"When using prodigy only learning_rate is used as the initial learning rate." |
|
|
) |
|
|
|
|
|
|
|
|
params_to_optimize[1]["lr"] = args.learning_rate |
|
|
params_to_optimize[2]["lr"] = args.learning_rate |
|
|
|
|
|
optimizer = optimizer_class( |
|
|
params_to_optimize, |
|
|
betas=(args.adam_beta1, args.adam_beta2), |
|
|
beta3=args.prodigy_beta3, |
|
|
weight_decay=args.adam_weight_decay, |
|
|
eps=args.adam_epsilon, |
|
|
decouple=args.prodigy_decouple, |
|
|
use_bias_correction=args.prodigy_use_bias_correction, |
|
|
safeguard_warmup=args.prodigy_safeguard_warmup, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
train_dataset = ImageDataset( |
|
|
image_base_dir=args.image_base_dir, |
|
|
train_data_file=args.train_data_file, |
|
|
size=args.resolution, |
|
|
repeats=args.repeats, |
|
|
center_crop=args.center_crop, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_dataloader = torch.utils.data.DataLoader( |
|
|
train_dataset, |
|
|
batch_size=args.train_batch_size, |
|
|
shuffle=True, |
|
|
collate_fn=lambda examples: collate_fn(examples), |
|
|
num_workers=args.dataloader_num_workers, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not args.train_text_encoder: |
|
|
tokenizers = [tokenizer_one, tokenizer_two, tokenizer_three] |
|
|
text_encoders = [text_encoder_one, text_encoder_two, text_encoder_three] |
|
|
|
|
|
def compute_text_embeddings(prompt, text_encoders, tokenizers): |
|
|
with torch.no_grad(): |
|
|
prompt_embeds, pooled_prompt_embeds = encode_prompt( |
|
|
text_encoders, tokenizers, prompt, args.max_sequence_length |
|
|
) |
|
|
prompt_embeds = prompt_embeds.to(accelerator.device) |
|
|
|
|
|
pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device) |
|
|
|
|
|
return prompt_embeds, pooled_prompt_embeds |
|
|
|
|
|
|
|
|
vae_config_shift_factor = vae.config.shift_factor |
|
|
vae_config_scaling_factor = vae.config.scaling_factor |
|
|
|
|
|
|
|
|
|
|
|
overrode_max_train_steps = False |
|
|
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
|
|
if args.max_train_steps is None: |
|
|
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
|
|
overrode_max_train_steps = True |
|
|
|
|
|
lr_scheduler = get_scheduler( |
|
|
args.lr_scheduler, |
|
|
optimizer=optimizer, |
|
|
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, |
|
|
num_training_steps=args.max_train_steps * accelerator.num_processes, |
|
|
num_cycles=args.lr_num_cycles, |
|
|
power=args.lr_power, |
|
|
) |
|
|
|
|
|
|
|
|
if args.train_text_encoder: |
|
|
( |
|
|
transformer, |
|
|
text_encoder_one, |
|
|
text_encoder_two, |
|
|
optimizer, |
|
|
train_dataloader, |
|
|
lr_scheduler, |
|
|
) = accelerator.prepare( |
|
|
transformer, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler |
|
|
) |
|
|
assert text_encoder_one is not None |
|
|
assert text_encoder_two is not None |
|
|
assert text_encoder_three is not None |
|
|
else: |
|
|
transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
|
|
transformer, optimizer, train_dataloader, lr_scheduler |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
|
|
print(f"每周期训练步骤是{num_update_steps_per_epoch}!!!!!!!!") |
|
|
if overrode_max_train_steps: |
|
|
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
|
|
|
|
|
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
|
|
|
|
|
|
|
|
|
|
|
if accelerator.is_main_process: |
|
|
tracker_name = "gors-sd3-lora" |
|
|
accelerator.init_trackers(tracker_name, config=vars(args)) |
|
|
|
|
|
|
|
|
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
|
|
|
|
|
logger.info("***** Running training *****") |
|
|
logger.info(f" Num examples = {len(train_dataset)}") |
|
|
logger.info(f" Num batches each epoch = {len(train_dataloader)}") |
|
|
logger.info(f" Num Epochs = {args.num_train_epochs}") |
|
|
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
|
|
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
|
|
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
|
|
logger.info(f" Total optimization steps = {args.max_train_steps}") |
|
|
global_step = 0 |
|
|
first_epoch = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if args.resume_from_checkpoint: |
|
|
if args.resume_from_checkpoint != "latest": |
|
|
path = os.path.basename(args.resume_from_checkpoint) |
|
|
else: |
|
|
|
|
|
dirs = os.listdir(args.output_dir) |
|
|
dirs = [d for d in dirs if d.startswith("checkpoint")] |
|
|
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
|
|
path = dirs[-1] if len(dirs) > 0 else None |
|
|
|
|
|
if path is None: |
|
|
accelerator.print( |
|
|
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." |
|
|
) |
|
|
args.resume_from_checkpoint = None |
|
|
initial_global_step = 0 |
|
|
else: |
|
|
accelerator.print(f"Resuming from checkpoint {path}") |
|
|
accelerator.load_state(os.path.join(args.output_dir, path)) |
|
|
global_step = int(path.split("-")[1]) |
|
|
|
|
|
initial_global_step = global_step |
|
|
resume_global_step = global_step * args.gradient_accumulation_steps |
|
|
first_epoch = global_step // num_update_steps_per_epoch |
|
|
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) |
|
|
|
|
|
|
|
|
else: |
|
|
initial_global_step = 0 |
|
|
|
|
|
progress_bar = tqdm( |
|
|
range(0, args.max_train_steps), |
|
|
initial=initial_global_step, |
|
|
desc="Steps", |
|
|
|
|
|
disable=not accelerator.is_local_main_process, |
|
|
) |
|
|
|
|
|
def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): |
|
|
sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) |
|
|
schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) |
|
|
timesteps = timesteps.to(accelerator.device) |
|
|
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] |
|
|
|
|
|
sigma = sigmas[step_indices].flatten() |
|
|
while len(sigma.shape) < n_dim: |
|
|
sigma = sigma.unsqueeze(-1) |
|
|
return sigma |
|
|
|
|
|
|
|
|
for epoch in range(first_epoch, args.num_train_epochs): |
|
|
|
|
|
|
|
|
transformer.train() |
|
|
if args.train_text_encoder: |
|
|
text_encoder_one.train() |
|
|
text_encoder_two.train() |
|
|
|
|
|
|
|
|
accelerator.unwrap_model(text_encoder_one).text_model.embeddings.requires_grad_(True) |
|
|
accelerator.unwrap_model(text_encoder_two).text_model.embeddings.requires_grad_(True) |
|
|
|
|
|
for step, batch in enumerate(train_dataloader): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: |
|
|
continue |
|
|
models_to_accumulate = [transformer] |
|
|
if args.train_text_encoder: |
|
|
models_to_accumulate.extend([text_encoder_one, text_encoder_two]) |
|
|
with accelerator.accumulate(models_to_accumulate): |
|
|
prompts = batch["prompts"] |
|
|
|
|
|
|
|
|
|
|
|
if not args.train_text_encoder: |
|
|
prompt_embeds, pooled_prompt_embeds = compute_text_embeddings( |
|
|
prompts, text_encoders, tokenizers |
|
|
) |
|
|
else: |
|
|
tokens_one = tokenize_prompt(tokenizer_one, prompts) |
|
|
tokens_two = tokenize_prompt(tokenizer_two, prompts) |
|
|
tokens_three = tokenize_prompt(tokenizer_three, prompts) |
|
|
text_encoders=[accelerator.unwrap_model(text_encoder_one), accelerator.unwrap_model(text_encoder_two), text_encoder_three] |
|
|
|
|
|
|
|
|
prompt_embeds, pooled_prompt_embeds = encode_prompt( |
|
|
text_encoders, |
|
|
tokenizers=[None, None, None], |
|
|
prompt=prompts, |
|
|
max_sequence_length=args.max_sequence_length, |
|
|
text_input_ids_list=[tokens_one, tokens_two, tokens_three], |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pixel_values = batch["pixel_values"].to(dtype=vae.dtype) |
|
|
|
|
|
|
|
|
|
|
|
feed_pixel_values = torch.cat(pixel_values.chunk(2, dim=1)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_input = [] |
|
|
|
|
|
for i in range(0, feed_pixel_values.shape[0], args.train_batch_size): |
|
|
model_input.append( |
|
|
vae.encode(feed_pixel_values[i : i + args.train_batch_size]).latent_dist.sample() |
|
|
) |
|
|
model_input = torch.cat(model_input, dim=0) |
|
|
model_input = (model_input- vae_config_shift_factor) * vae.config.scaling_factor |
|
|
|
|
|
model_input = model_input.to(dtype=weight_dtype) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
noise = torch.randn_like(model_input).chunk(2)[0].repeat(2, 1, 1, 1) |
|
|
bsz = model_input.shape[0] // 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
u = compute_density_for_timestep_sampling( |
|
|
weighting_scheme=args.weighting_scheme, |
|
|
batch_size=bsz, |
|
|
logit_mean=args.logit_mean, |
|
|
logit_std=args.logit_std, |
|
|
mode_scale=args.mode_scale, |
|
|
) |
|
|
indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() |
|
|
timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device).repeat(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) |
|
|
noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat(2, 1, 1) |
|
|
pooled_prompt_embeds = pooled_prompt_embeds.repeat(2, 1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_pred = transformer( |
|
|
hidden_states=noisy_model_input, |
|
|
timestep=timesteps, |
|
|
encoder_hidden_states=prompt_embeds, |
|
|
pooled_projections=pooled_prompt_embeds, |
|
|
return_dict=False, |
|
|
)[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if args.precondition_outputs: |
|
|
model_pred = model_pred * (-sigmas) + noisy_model_input |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) |
|
|
|
|
|
|
|
|
if args.precondition_outputs: |
|
|
target = model_input |
|
|
else: |
|
|
target = noise - model_input |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
per_pixel_loss = (weighting.float() * (model_pred.float() - target.float()) ** 2) |
|
|
|
|
|
model_losses = per_pixel_loss.mean(dim=list(range(1, len(per_pixel_loss.shape)))) |
|
|
model_losses_w, model_losses_l = model_losses.chunk(2) |
|
|
raw_model_loss = 0.5 * (model_losses_w.mean() + model_losses_l.mean()) |
|
|
model_diff = model_losses_w - model_losses_l |
|
|
|
|
|
accelerator.unwrap_model(transformer).disable_adapters() |
|
|
with torch.no_grad(): |
|
|
ref_pred = transformer( |
|
|
hidden_states=noisy_model_input, |
|
|
timestep=timesteps, |
|
|
encoder_hidden_states=prompt_embeds, |
|
|
pooled_projections=pooled_prompt_embeds, |
|
|
return_dict=False, |
|
|
)[0].detach() |
|
|
|
|
|
if args.precondition_outputs: |
|
|
ref_pred = ref_pred * (-sigmas) + noisy_model_input |
|
|
|
|
|
ref_per_pixel_loss = (weighting.float() * (ref_pred.float() - target.float()) ** 2) |
|
|
|
|
|
ref_losses = ref_per_pixel_loss.mean(dim=list(range(1, len(ref_per_pixel_loss.shape)))) |
|
|
ref_losses_w, ref_losses_l = ref_losses.chunk(2) |
|
|
raw_ref_loss = 0.5 * (ref_losses_w.mean() + ref_losses_l.mean()) |
|
|
ref_diff = ref_losses_w - ref_losses_l |
|
|
|
|
|
|
|
|
|
|
|
accelerator.unwrap_model(transformer).enable_adapters() |
|
|
|
|
|
|
|
|
logits = ref_diff - model_diff |
|
|
|
|
|
loss = -1 * F.logsigmoid(0.5*args.beta_dpo * logits).mean() |
|
|
|
|
|
|
|
|
accelerator.backward(loss) |
|
|
|
|
|
|
|
|
|
|
|
if accelerator.sync_gradients: |
|
|
params_to_clip = ( |
|
|
itertools.chain( |
|
|
transformer_lora_parameters, text_lora_parameters_one, text_lora_parameters_two |
|
|
) |
|
|
if args.train_text_encoder |
|
|
else transformer_lora_parameters |
|
|
) |
|
|
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) |
|
|
|
|
|
optimizer.step() |
|
|
lr_scheduler.step() |
|
|
optimizer.zero_grad() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if accelerator.sync_gradients: |
|
|
progress_bar.update(1) |
|
|
global_step += 1 |
|
|
|
|
|
if accelerator.is_main_process or accelerator.distributed_type == DistributedType.DEEPSPEED: |
|
|
|
|
|
if global_step % args.checkpointing_steps == 0: |
|
|
|
|
|
if args.checkpoints_total_limit is not None: |
|
|
checkpoints = os.listdir(args.output_dir) |
|
|
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
|
|
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
|
|
|
|
|
|
|
|
if len(checkpoints) >= args.checkpoints_total_limit: |
|
|
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
|
|
removing_checkpoints = checkpoints[0:num_to_remove] |
|
|
|
|
|
logger.info( |
|
|
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
|
|
) |
|
|
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
|
|
|
|
|
for removing_checkpoint in removing_checkpoints: |
|
|
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
|
|
shutil.rmtree(removing_checkpoint) |
|
|
|
|
|
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
|
|
accelerator.save_state(save_path) |
|
|
logger.info(f"Saved state to {save_path}") |
|
|
if accelerator.is_main_process: |
|
|
if args.validation_prompts is not None and global_step % args.validation_steps == 0: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pipeline = StableDiffusion3Pipeline.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
vae=vae, |
|
|
text_encoder=accelerator.unwrap_model(text_encoder_one), |
|
|
text_encoder_2=accelerator.unwrap_model(text_encoder_two), |
|
|
text_encoder_3=accelerator.unwrap_model(text_encoder_three), |
|
|
transformer=accelerator.unwrap_model(transformer), |
|
|
revision=args.revision, |
|
|
variant=args.variant, |
|
|
torch_dtype=weight_dtype, |
|
|
) |
|
|
images = [] |
|
|
if args.validation_prompts and args.num_validation_images > 0: |
|
|
pipeline_args = [{"prompt": prompt} for prompt in args.validation_prompts] |
|
|
images = log_validation( |
|
|
pipeline=pipeline, |
|
|
args=args, |
|
|
accelerator=accelerator, |
|
|
pipeline_args=pipeline_args, |
|
|
global_step=global_step, |
|
|
torch_dtype=weight_dtype, |
|
|
) |
|
|
for i,image in enumerate(images): |
|
|
|
|
|
|
|
|
image_path = os.path.join(save_path, f"validation_image_{i}.png") |
|
|
image.save(image_path) |
|
|
images = None |
|
|
del pipeline |
|
|
|
|
|
logs = { |
|
|
"loss": loss.detach().item(), |
|
|
"raw_model_loss": raw_model_loss.detach().item(), |
|
|
"ref_loss": raw_ref_loss.detach().item(), |
|
|
"lr": lr_scheduler.get_last_lr()[0], |
|
|
} |
|
|
progress_bar.set_postfix(**logs) |
|
|
accelerator.log(logs, step=global_step) |
|
|
|
|
|
if global_step >= args.max_train_steps: |
|
|
break |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
accelerator.wait_for_everyone() |
|
|
if accelerator.is_main_process: |
|
|
transformer = unwrap_model(transformer) |
|
|
if args.upcast_before_saving: |
|
|
transformer.to(torch.float32) |
|
|
else: |
|
|
transformer = transformer.to(weight_dtype) |
|
|
transformer_lora_layers = get_peft_model_state_dict(transformer) |
|
|
|
|
|
if args.train_text_encoder: |
|
|
text_encoder_one = unwrap_model(text_encoder_one) |
|
|
text_encoder_lora_layers = get_peft_model_state_dict(text_encoder_one.to(torch.float32)) |
|
|
text_encoder_two = unwrap_model(text_encoder_two) |
|
|
text_encoder_2_lora_layers = get_peft_model_state_dict(text_encoder_two.to(torch.float32)) |
|
|
else: |
|
|
text_encoder_lora_layers = None |
|
|
text_encoder_2_lora_layers = None |
|
|
|
|
|
StableDiffusion3Pipeline.save_lora_weights( |
|
|
save_directory=args.output_dir, |
|
|
transformer_lora_layers=transformer_lora_layers, |
|
|
text_encoder_lora_layers=text_encoder_lora_layers, |
|
|
text_encoder_2_lora_layers=text_encoder_2_lora_layers, |
|
|
) |
|
|
|
|
|
del transformer, text_encoder_one, text_encoder_two, text_encoder_three, vae |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if args.push_to_hub: |
|
|
save_model_card( |
|
|
repo_id, |
|
|
images=images, |
|
|
base_model=args.pretrained_model_name_or_path, |
|
|
instance_prompt=args.instance_prompt, |
|
|
validation_prompts=args.validation_prompts, |
|
|
train_text_encoder=args.train_text_encoder, |
|
|
repo_folder=args.output_dir, |
|
|
) |
|
|
upload_folder( |
|
|
repo_id=repo_id, |
|
|
folder_path=args.output_dir, |
|
|
commit_message="End of training", |
|
|
ignore_patterns=["step_*", "epoch_*"], |
|
|
) |
|
|
|
|
|
accelerator.end_training() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
args = parse_args() |
|
|
main(args) |
|
|
|