Datasets:

ArXiv:
sd3_5_fine_sixcard / train_dpo_lora_sd3.py
yyyzzzzyyy's picture
Add files using upload-large-folder tool
4535b2e verified
raw
history blame
100 kB
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import copy
import itertools
import logging
import math
import os
import random
import shutil
import warnings
from contextlib import nullcontext
from pathlib import Path
import pandas as pd
from collections import defaultdict
import numpy as np
import torch
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from huggingface_hub.utils import insecure_hashlib
from peft import LoraConfig, set_peft_model_state_dict
from peft.utils import get_peft_model_state_dict
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm.auto import tqdm
from transformers import CLIPTokenizer, PretrainedConfig, T5TokenizerFast
import torch.nn.functional as F
import diffusers
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
SD3Transformer2DModel,
StableDiffusion3Pipeline,
)
from diffusers.optimization import get_scheduler
from diffusers.training_utils import (
_set_state_dict_into_text_encoder,
cast_training_params,
compute_density_for_timestep_sampling,
compute_loss_weighting_for_sd3,
free_memory,
)
from diffusers.utils import (
check_min_version,
convert_unet_state_dict_to_peft,
is_wandb_available,
)
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
from diffusers.utils.torch_utils import is_compiled_module
if is_wandb_available():
import wandb
os.environ["WANDB_API_KEY"] = 'c3c7dc2e7a43cc9e0b4cc8e913d363077af04ab2'
os.environ["WANDB_MODE"] = "offline"
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.33.0.dev0")
logger = get_logger(__name__)
DATASET_NAME_MAPPING = {
# "refl": ("image", "text"),
}
#将模型的相关信息保存到readme.md文件中
def save_model_card(
repo_id: str,
images=None,
base_model: str = None,
train_text_encoder=False,
instance_prompt=None,
validation_prompts=None,
repo_folder=None,
):
if "large" in base_model:
model_variant = "SD3.5-Large"
license_url = "https://huggingface.co/stabilityai/stable-diffusion-3.5-large/blob/main/LICENSE.md"
variant_tags = ["sd3.5-large", "sd3.5", "sd3.5-diffusers"]
else:
model_variant = "SD3"
license_url = "https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/LICENSE.md"
variant_tags = ["sd3", "sd3-diffusers"]
widget_dict = []
if images is not None:
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
widget_dict.append(
{"text": validation_prompts if validation_prompts else " ", "output": {"url": f"image_{i}.png"}}
)#将验证的prompt和其对应的图片的信息一起保存下来
model_description = f"""
# {model_variant} DreamBooth LoRA - {repo_id}
<Gallery />
## Model description
These are {repo_id} DreamBooth LoRA weights for {base_model}.
The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [SD3 diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_sd3.md).
Was LoRA for the text encoder enabled? {train_text_encoder}.
## Trigger words
You should use `{instance_prompt}` to trigger the image generation.
## Download model
[Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab.
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained({base_model}, torch_dtype=torch.float16).to('cuda')
pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
image = pipeline('{validation_prompts }').images[0]
```
### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke
- **LoRA**: download **[`diffusers_lora_weights.safetensors` here 💾](/{repo_id}/blob/main/diffusers_lora_weights.safetensors)**.
- Rename it and place it on your `models/Lora` folder.
- On AUTOMATIC1111, load the LoRA by adding `<lora:your_new_name:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
## License
Please adhere to the licensing terms as described [here]({license_url}).
"""
model_card = load_or_create_model_card(
repo_id_or_path=repo_id,
from_training=True,
license="other",
base_model=base_model,
model_description=model_description,
widget=widget_dict,
)
tags = [
"text-to-image",
"diffusers-training",
"diffusers",
"lora",
"template:sd-lora",
]
tags += variant_tags
model_card = populate_model_card(model_card, tags=tags)
model_card.save(os.path.join(repo_folder, "README.md"))
#加载文本编码器
def load_text_encoders(class_one, class_two, class_three):
text_encoder_one = class_one.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
)
text_encoder_two = class_two.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant
)
text_encoder_three = class_three.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder_3", revision=args.revision, variant=args.variant
)
return text_encoder_one, text_encoder_two, text_encoder_three
#将每次检测的照片保存成日志的形式,并上传到wandb上面
def log_validation(
pipeline,
args,
accelerator,
pipeline_args, # 现在是一个 prompt 配置组成的 list
global_step,
torch_dtype,
is_final_validation=False,
):
logger.info(
f"Running validation... \n Generating {len(pipeline_args)} images with prompts:"
f" {[args['prompt'] for args in pipeline_args]}."
)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
autocast_ctx = nullcontext()
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None
# generate one image for each prompt dict in pipeline_args
images = []
with autocast_ctx:
for single_args in pipeline_args:
image = pipeline(**single_args, generator=generator).images[0]
images.append(image)
# Logging to trackers
for tracker in accelerator.trackers:
phase_name = "test" if is_final_validation else "validation"
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images(phase_name, np_images, global_step, dataformats="NHWC")
elif tracker.name == "wandb":
tracker.log({
phase_name: [
wandb.Image(image, caption=f"{i}: {args['prompt']}")
for i, (image, args) in enumerate(zip(images, pipeline_args))
]
})
del pipeline
free_memory()
return images
#从所给的模型名称或者是路径中,加载模型配置,从而导入相应模型的类
def import_model_class_from_model_name_or_path(
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModelWithProjection":
from transformers import CLIPTextModelWithProjection
return CLIPTextModelWithProjection
elif model_class == "T5EncoderModel":
from transformers import T5EncoderModel
return T5EncoderModel
else:
raise ValueError(f"{model_class} is not supported.")
#设置和解析命令行参数
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)#如果不提供这个参数的话:
# 如果你从的是一个普通的 checkpoint 目录(例如你本地保存的微调模型),没有版本分支概念,那根本不会有任何影响。
# 如果你从的是 Hugging Face 上的模型仓库,比如 "stabilityai/stable-diffusion-3",那么默认加载的是该 repo 的默认分支。
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)#--variant 指的是预训练模型的不同文件变种(variant)
#预训练模型通常托管在 Hugging Face Hub(huggingface.co/models)上,有时候一个模型会有不同的版本,
#例如 fp16 和 fp32 版本,或者有不同的精度(如 bfloat16),这时候就需要使用 --variant 参数来指定。
#如果不指定的话,那就用默认的(通常是 full-precision 版本)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.")
# parser.add_argument(
# "--reward_file",
# type=str,
# default=None,
# help="The file stored the reward model scores of all images in database.",
# )
parser.add_argument(
"--image_column",
type=str,
default="image",
help="The column of the dataset containing the target image. By "
"default, the standard Image Dataset maps out 'file_name' "
"to 'image'.",
)
parser.add_argument(
"--caption_column",
type=str,
default=None,
help="The column of the dataset containing the instance prompt for each image",
)
parser.add_argument(
"--max_sequence_length",
type=int,
default=512,
help="Maximum sequence length to use with with the T5 text encoder",
)
parser.add_argument(
"--validation_prompts",
type=str,
nargs="+",#这个是表示接受一个值或多个值,并不是说参数之间用加号连接
default=None,
help="A prompt that is used during validation to verify that the model is learning. The validation is happening at each `--checkpointing_steps`."
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompts`.",
)#配合 --sample_batch_size 使用:一般会循环调用若干次 sampling 来生成足够图像
parser.add_argument(
"--validation_steps",
type=int,
default=1,
help=(
"Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
" `args.validation_prompts` multiple times: `args.num_validation_images`."
),
)
parser.add_argument(
"--rank",
type=int,
default=768,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--beta_dpo",
type=int,
default=2500,
help="DPO KL Divergence penalty.",
)
#LoRA 会用两个 rank 更低的矩阵(通常是 A 和 B)来近似训练参数的变化,而不直接修改原始模型参数。
#rank 决定这两个矩阵的维度,rank 越大,表示 LoRA 模拟的能力越强(也越耗显存)。
#通常选择: 4、8、16 比较常见。
parser.add_argument(
"--output_dir",
type=str,
default="sd3-dreambooth",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=1024,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop",
default=True,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--train_text_encoder",
action="store_true",
help="Whether to train the text encoder (clip text encoders only). If set, the text encoder should be float32 precision.",
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)#这个参数控制在训练或验证过程中,生成图像(inference)时,每张 GPU 同时采样几张图片。
#采样过程不需要反向传播(即不占用梯度空间),适用于如 validation 生成、推理、展示效果等场景。
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--text_encoder_lr",
type=float,
default=5e-6,
help="Text encoder learning rate to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
# 这个调度器(cosine_with_restarts)的工作机制是:
# 学习率(learning rate)按余弦函数的曲线逐渐降低。
# 每过一段时间,会突然“重置”学习率,从高值重新开始一个新的下降周期,以跳出局部最优。
#lr_num_cycles就是用来控制在整个训练过程中,要进行几次这样的学习率下降 → 重置 的周期。
# 为什么使用 restart?
# 模拟“多次训练”的感觉
# 避免模型陷入局部最优
# 提高收敛稳定性和泛化能力
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
# 这个参数控制学习率下降的速率。
# 如果设置为 1.0,学习率会按线性速率下降。
# 如果设置为 2.0,学习率会按平方速率下降。
# 如果设置为 3.0,学习率会按立方速率下降。
# 通常使用 1.0 或 2.0。
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--weighting_scheme",
type=str,
default="logit_normal",
choices=["sigma_sqrt", "logit_normal", "mode", "cosmap"],
)#--weighting_scheme 就是 决定你训练时该偏爱哪些样本 的方式,每种策略都有它“偏爱的对象”
#sigma_sqrt:
#逻辑: 权重 = sqrt(σ),σ 是某种噪声或不确定性
# 权重与噪声方差 $\sigma$ 的平方根有关,强调不同噪声强度下的训练平衡,噪声越大权重越大。
# 适用于: 噪声方差变化较大的数据集,如多模态数据。
# 优点: 平衡不同噪声水平下的训练,避免某些样本权重过大。
# 缺点: 可能需要手动调整参数,以适应不同数据集的噪声特性。
#logit_normal:
#逻辑: 权重 = logit(reward),σ 是某种噪声或不确定性
#把 reward 想成“考试得分”,先经过一个 S 型函数(像 sigmoid),再转成权重。
#logit_mean 和 logit_std 控制了这个 S 曲线的中点和陡峭程度
#mode:
#逻辑: reward 的众数(最常见的高分)作为中心,越接近就越重要
# 适用于: 奖励分布较集中(如二元分类)的数据集。
# 优点: 强调常见高分样本,有助于模型学习。
# 缺点: 可能需要手动调整参数,以适应不同数据集的奖励分布。
#cosmap:
#逻辑: 用余弦函数把 reward 映射成 [0, 1] 的权重
#形象比喻: 一个像海浪一样起伏的权重函数,中间 reward 得到最高权重,两边低。
# 更平滑的加权方式,适合 reward 不是特别尖锐地分布的情况。
parser.add_argument(
"--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme."
)#--logit_mean 和 --logit_std 是 logit_normal 策略的参数,用于控制样本的权重分布。
# 它们决定了样本权重在 logit 空间中的均值和标准差。
# 均值(mean):样本权重在 logit 空间中的中心位置。
# 标准差(std):样本权重在 logit 空间中的离散程度。
# 这两个参数共同决定了样本权重在 logit 空间中的分布形状。
# 通常,均值设置为 0.0,标准差设置为 1.0。
parser.add_argument(
"--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme."
)
parser.add_argument(
"--mode_scale",
type=float,
default=1.29,
help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
)#--mode_scale 是 mode 策略的参数,用于控制样本的权重分布。
#用来放大或缩小以众数为核心的加权范围,数值越大,对高 reward 样本越偏向。
parser.add_argument(
"--precondition_outputs",
type=int,
default=1,
help="Flag indicating if we are preconditioning the model outputs or not as done in EDM. This affects how "
"model `target` is calculated.",
)#这个参数控制是否对模型输出进行预处理(preconditioning),这通常出现在 EDM(Elucidated Diffusion Models) 中
#如果为 0:使用原始的 target 计算损失。
#如果为 1:使用按照EDM的方法调整后的 target 计算损失。
parser.add_argument(
"--optimizer",
type=str,
default="AdamW",
help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'),
)
parser.add_argument(
"--use_8bit_adam",
action="store_true",
help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
)#Adam(Adaptive Moment Estimation)是一种自适应学习率的优化算法,是 SGD(随机梯度下降)的改进版本
#结合了 Momentum(动量) 和 RMSProp(自适应学习率),适合大部分深度学习任务,收敛很快
#8-bit Adam(由 bitsandbytes 库实现)将部分计算压缩到 8-bit 整数,能够减少内存占用,提升计算速度
# Prodigy 是一种新型的自适应优化器,可以看作是对 Adam 优化器的改进,主要目标是:
# 收敛更快:相比 Adam,在一些任务中它能更快收敛。
# 更稳定的训练过程:对学习率和权重衰减的响应更温和。
# 自带一些 bias correction、预热保护机制(warm-up safeguard),不容易在训练初期出现震荡。
# 它的核心思想是:
# 不仅跟踪一阶(梯度)和二阶(梯度平方)动量,还使用了额外的β3 系数来平滑地控制步长。
# 与 Adam 不同的是,Prodigy 还尝试根据历史梯度自动调整步长,不仅仅依赖学习率。
# ✅ 简单来说:Prodigy 是为了在保持 Adam 优点的同时提高效率和稳定性而设计的优化器。
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers.")
# Adam 和 Prodigy 优化器中的 β1 参数,控制一阶矩估计的指数衰减率,默认是 0.9。
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers.")
# Adam 和 Prodigy 优化器中的 β2 参数,控制二阶矩估计的指数衰减率,默认是 0.999。
parser.add_argument(
"--prodigy_beta3",
type=float,
default=None,
help="coefficients for computing the Prodigy stepsize using running averages. If set to None, "
"uses the value of square root of beta2. Ignored if optimizer is adamW",
)
# Prodigy 优化器中用于计算步长的 β3 系数,默认为 sqrt(beta2)。如果不使用 Prodigy 优化器将被忽略。
parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay")
# 是否采用 AdamW 式的权重衰减方式(将权重衰减与梯度更新分离),通常能提升训练稳定性。
parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
# 应用于 UNet 参数的权重衰减系数,有助于防止过拟合,默认值为 1e-4。
parser.add_argument("--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder")
# 应用于 text_encoder 参数的权重衰减系数,默认值为 1e-3。
parser.add_argument(
"--lora_layers",
type=str,
default=None,
help=(
"The transformer block layers to apply LoRA training on. Please specify the layers in a comma seperated string."
"For examples refer to https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_SD3.md"
),
)
# 指定在哪些 transformer 层上应用 LoRA 微调,多个层用逗号分隔,例如:"q_proj,k_proj,v_proj"。
parser.add_argument(
"--lora_blocks",
type=str,
default=None,
help=(
"The transformer blocks to apply LoRA training on. Please specify the block numbers in a comma seperated manner."
'E.g. - "--lora_blocks 12,30" will result in lora training of transformer blocks 12 and 30. For more examples refer to https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_SD3.md'
),
)
# 指定要进行 LoRA 微调的 transformer block 编号,多个编号用逗号分隔,例如 "12,30"。
# --lora_blocks 控制在哪几层 transformer 中使用 LoRA
# --lora_layers 控制在这些层中 LoRA 作用于哪些子模块
#相当于是--lora_layers是比--lora_blocks更细粒度的控制
# 其实当时运行的时候虽然--lora_blocks和--lora_layers都设置为None,但是在这种情况下还是会使用lora参数的,只不过是默认是全部层都使用LoRA,这部分是perf库自动控制的
parser.add_argument(
"--adam_epsilon",
type=float,
default=1e-08,
help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
)
# Adam 和 Prodigy 优化器中的 ε 值,用于防止除以零或数值不稳定,默认值为 1e-8。
parser.add_argument(
"--prodigy_use_bias_correction",
type=bool,
default=True,
help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW",
)
# 是否开启 Adam 风格的偏差修正,能提升早期训练效果。仅在使用 Prodigy 优化器时生效。
parser.add_argument(
"--prodigy_safeguard_warmup",
type=bool,
default=True,
help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. "
"Ignored if optimizer is adamW",
)
# 是否在 warm-up 阶段避免除以学习率来稳定训练过程,仅在 Prodigy 优化器中有效。
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)#--allow_tf32:是否允许 TF32 加速(仅适用于 Ampere 架构 GPU,如 A100、3090)TF32 计算比 FP32 快,但可能降低精度。
parser.add_argument(
"--cache_latents",
action="store_true",
default=False,
help="Cache the VAE latents",
)#--cache_latents:是否缓存 VAE 的潜在变量(latents)。
# 缓存可以加速训练,但会增加内存使用。
# 如果训练过程中出现内存不足,可以考虑关闭缓存。
# 如果训练稳定,可以保持默认值。
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--upcast_before_saving",
action="store_true",
default=False,
help=(
"Whether to upcast the trained transformer layers to float32 before saving (at the end of training). "
"Defaults to precision dtype used for training to save memory"
),
)
# 是否在保存模型前将 transformer 层转换为 float32 精度(默认不转换,即保持训练时的精度)。
# 这样做可以避免低精度导致的数值不稳定,但会增加模型大小。
# Personal usage arguments
parser.add_argument(
"--apply_pre_loss", action="store_true", help="Whether or not to apply pretrained loss."
)
parser.add_argument(
"--apply_reward_loss", action="store_true", help="Whether or not to apply reward loss."
)
# parser.add_argument(
# "--image_reward_version",
# type=str,
# required=True,
# help=(
# "The version of ImageReward to load with. This could be a downloadable version, i.e. ImageReward-v1.0"
# "or a file path."),
# )
parser.add_argument(
"--save_only_one_ckpt",
action="store_true",
help="If given, then it only stores one checkpoint through the whole training, the one with the best training loss."
"This is useful to manage the storage."
)
parser.add_argument(
"--image_base_dir",
type = str,
default="",
help="The base directory where stored all the images."
)
parser.add_argument(
"--num_images",
type=int,
default=0,
help = "The number of images from image base dictionary"
)
parser.add_argument(
"--train_data_file",
type = str,
default=None,
help="The train data directory where stored all the train data,document is .json or .csv"
)#如果使用的是本地的数据集的话,记得一定要提供参数train_data_file和image_base_dir
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
# 分布式训练时用于指定当前进程的本地编号,通常由分布式工具自动传入;单卡训练不需要关心。
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
if args.dataset_name is None and args.train_data_file is None:
raise ValueError("Specify either `--dataset_name` or `--train_data_file`")
if args.dataset_name is not None and args.train_data_file is not None:
raise ValueError("Specify only one of `--dataset_name` or `--train_data_file`")
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
#意思:从环境变量 LOCAL_RANK 中读取当前进程的编号。
#如果环境里没有这个变量,默认就是 -1。
if env_local_rank != -1 and env_local_rank != args.local_rank:## local_rank是在分布式训练时用于指定当前进程的本地编号,通常由分布式工具自动传入
args.local_rank = env_local_rank
# 意思:如果环境变量里读到的 LOCAL_RANK 不是 -1(说明确实是分布式训练),并且跟命令行参数 --local_rank 不一样,
# 那就用环境变量的 LOCAL_RANK 更新 args.local_rank,保证一致。
# 因为有时候命令行传的 --local_rank 可能是空的/错的,但环境变量通常是分布式工具帮你正确设好的
return args
class ImageDataset(Dataset):
def __init__(
self,
image_base_dir,
train_data_file,
size=1024,
repeats=1,
center_crop=True,
):
self.size = size
self.center_crop = center_crop
# if --dataset_name is provided or a metadata jsonl file is provided in the local --image_base_dir directory,
# we load the training data using load_dataset
if args.dataset_name is not None:
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"You are trying to load your data using the datasets library. If you wish to train using custom "
"captions please install the datasets library: `pip install datasets`. If you wish to load a "
"local folder containing images only, specify --instance_data_dir instead."
)
# Downloading and loading a dataset from the hub.
# See more about loading custom images at
# https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
dataset = load_dataset(
args.dataset_name,
args.dataset_config_name,
cache_dir=args.cache_dir,
)
# Preprocessing the datasets.
column_names = dataset["train"].column_names
#这段代码以及之后的处理都是把images当做PLI image数据来使用的
# 6. Get the column names for input/target.
if args.image_column is None:
image_column = column_names[0]
logger.info(f"image column defaulting to {image_column}")
else:
image_column = args.image_column
if image_column not in column_names:
raise ValueError(
f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
)
images = dataset["train"][image_column]#images[i] 是一个字典或 Image 类型(取决于 dataset 本身的格式),通常已经被 datasets 自动加载为 PIL.Image 或类似对象,不是路径字符串
if args.caption_column is None:
caption_column = column_names[1]
logger.info(f"caption column defaulting to {caption_column}")
prompts = dataset["train"][caption_column]#这个参数是具体的prompt组成的数组
else:
if args.caption_column not in column_names:
raise ValueError(
f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
)
prompts = dataset["train"][args.caption_column]#这个参数是具体的prompt组成的数组
else:
df = pd.read_excel(train_data_file)
column_names = df.columns.tolist()
image_column = column_names[0]
caption_column = column_names[1]
mos1_column = column_names[2]
mos1_pred_column = column_names[3]
mos2_column = column_names[4]
mos2_pred_column = column_names[5]
prompts = df[caption_column]#这个参数是具体的prompt组成的数组
mos1_pred = df[mos1_pred_column]
mos2_pred = df[mos2_pred_column]
self.image_base_dir = Path(image_base_dir)
self.train_data_file = Path(train_data_file)
if not self.image_base_dir.exists():
raise ValueError("Image base dir doesn't exists.")
images_path = df[image_column]
#去掉文件路径前面加的train_,test_
images_file = [image_path.split('_', 1)[-1] for image_path in images_path]
# #筛选出sd3.5,来计算阈值
# sd_score_mos1_pred = []
# sd_score_mos2_pred = []
# sd_score_mos1_all = 0
# sd_score_mos2_all = 0
# for i,image_file in enumerate(images_file):#i默认是从0开始的,也可以在enumerate中用start=某个数字来指定开始
# if image_file.split('/')[0] == "sd3_5_large":
# sd_score_mos1_pred.append(mos1_pred[i])
# sd_score_mos2_pred.append(mos2_pred[i])
# sd_score_mos1_all += float(mos1_pred[i])
# sd_score_mos2_all += float(mos2_pred[i])
# print("sd_3.5一共有",len(sd_score_mos1_pred),"张照片")
# #计算阈值
p=0.5
# thres = (p*sd_score_mos1_all+(1-p)*sd_score_mos2_all)/len(sd_score_mos1_pred)
# print("阈值为",thres)
#利用设置的阈值来筛选出最终进行微调的照片,并利用之前设置阈值的方式来计算最终的reward分数
images_file_final = []
prompts_final = []
reward_final = []
for i,image_file in enumerate(images_file):
score = p*mos1_pred[i] + (1-p)*mos2_pred[i]
# if score >= thres:
images_file_final.append(image_file)
prompts_final.append(prompts[i])
reward_final.append(score)
# print("筛选之后还剩",len(images_file_final),"张照片\n")
# print("筛选之后还剩",len(prompts_final),"张照片\n")
# print("筛选之后还剩",len(reward_final),"张照片\n")
# 提取图片编号(如"001"),用于分组
def extract_prompt_index(image_path):
return image_path.split("/")[-1].split(".")[0] # 从 "ali_flux_schnell/001.png" 提取 "001"
# 建立编号 -> List[(image_path, reward, prompt)] 的映射
prompt_groups = defaultdict(list)
for img_path, reward, prompt in zip(images_file, reward_final, prompts_final):
idx = extract_prompt_index(img_path)
prompt_groups[idx].append((img_path, reward, prompt))
# 构造 DPO 训练数据
dpo_data = {
"prompt": [],
"win": [],
"loss": [],
}
for idx, items in prompt_groups.items():
if len(items) < 2:
continue
# 打乱顺序,保证随机
random.shuffle(items)
# 尽可能多地构造不重复对,每次使用两张图片
while len(items) >= 2:
sample1 = items.pop()
sample2 = items.pop()
img1, reward1, prompt1 = sample1
img2, reward2, prompt2 = sample2
# 若评分相等则跳过,不放回
if reward1 == reward2:
continue
if reward1 > reward2:
winner, loser, prompt = img1, img2, prompt1.strip()
else:
winner, loser, prompt = img2, img1, prompt2.strip()
dpo_data["prompt"].append(prompt)
dpo_data["win"].append(winner)
dpo_data["loss"].append(loser)
print(f"构造完成,共获得 {len(dpo_data['loss'])} 条 DPO 微调数据。")
# 假设 dpo_data 是一个包含若干字典的列表,键为 prompt / image_winner / image_loser
df = pd.DataFrame(dpo_data)
# 存储为 CSV 文件
csv_save_path = "dpo_dataset.csv"
df.to_csv(csv_save_path, index=False, encoding='utf-8')
print(f"DPO 微调数据已保存为 CSV 文件:{csv_save_path}")
images_win = dpo_data["win"]
images_loss = dpo_data["loss"]
prompts = dpo_data["prompt"]
self.prompts = []
for caption in prompts:
self.prompts.extend(itertools.repeat(caption, repeats))
self.images_win = []
for img in images_win:
self.images_win.extend(itertools.repeat(img, repeats))
self.images_loss = []
for img in images_loss:
self.images_loss.extend(itertools.repeat(img, repeats))
train_resize = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)
train_crop = transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size)
train_flip = transforms.RandomHorizontalFlip(p=1.0)
img_transforms = []
img_transforms.append(train_resize)
if args.random_flip and random.random() < 0.5:
img_transforms.append(train_flip)
if args.center_crop:
img_transforms.append(train_crop)
self.image_transforms = transforms.Compose(
[*img_transforms, transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
)
self.num_images = len(self.images_win)
self._length = self.num_images
#SDXL以及它的变体中,会将一些额外的信息输入,但是SD3.5好像是没有和之前一样将这些信息输入的
# self.image_transforms = transforms.Compose(
# [
# transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
# transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
# transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5]),
# ]
# )#构建图片处理函数,前面已经对数据集中的照片进行处理过了,这里是包装为了之后还需要的时候使用
def __len__(self):
return self._length
#如果在类中定义了__getitem__()方法,那么他的实例对象(假设为P)就可以这样P[key]取值。当实例对象做P[key]运算时,就会调用类中的__getitem__()方法。
#如果类把某个属性定义为序列,可以使用__getitem__()输出序列属性中的某个元素.
def __getitem__(self, index):
example = {}
all_images_input = []
images = []
image_win = self.images_win[index % self.num_images]
image_loss = self.images_loss[index % self.num_images]
images.append(image_win)
images.append(image_loss)
for image in images:
image_input = Image.open(os.path.join(args.image_base_dir, image))
if not image_input.mode == "RGB":
image_input = image_input.convert("RGB")
image_input = exif_transpose(image_input)#这行代码是用来矫正图片的方向的,保证图片的时序信息是正确的
image_input_tr = self.image_transforms(image_input)
all_images_input.append(image_input_tr)
combined_im = torch.cat(all_images_input, dim=0)
example["images_input"] = combined_im
caption = self.prompts[index % self.num_images]
example["prompts_input"] = caption
return example
def collate_fn(examples):
pixel_values = [example["images_input"] for example in examples]
prompts = [example["prompts_input"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
batch = {"pixel_values": pixel_values, "prompts": prompts}
#到此已经和compbench有很多不一样的地方了,比如说我的batch里的prompt就是字符串,但是它的已经是tokenizer和pad过的了
return batch#其中pixel_values就是处理过的图片数据,#prompts就是提示词数据
# class PromptDataset(Dataset):
# "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
# def __init__(self, prompt, num_samples):
# self.prompt = prompt
# self.num_samples = num_samples
# def __len__(self):
# return self.num_samples
# def __getitem__(self, index):
# example = {}
# example["prompt"] = self.prompt
# example["index"] = index
# return example
def tokenize_prompt(tokenizer, prompt):
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
return text_input_ids
def _encode_prompt_with_t5(
text_encoder,
tokenizer,
max_sequence_length,
prompt=None,
num_images_per_prompt=1,
device=None,
text_input_ids=None,
):
prompt = [prompt] if isinstance(prompt, str) else prompt
batch_size = len(prompt)
if tokenizer is not None:
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=max_sequence_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
else:
if text_input_ids is None:
raise ValueError("text_input_ids must be provided when the tokenizer is not specified")
prompt_embeds = text_encoder(text_input_ids.to(device))[0]
dtype = text_encoder.dtype
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
_, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
return prompt_embeds
def _encode_prompt_with_clip(
text_encoder,
tokenizer,
prompt: str,
device=None,
text_input_ids=None,
num_images_per_prompt: int = 1,
):
prompt = [prompt] if isinstance(prompt, str) else prompt
batch_size = len(prompt)
if tokenizer is not None:
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=77,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
else:
if text_input_ids is None:
raise ValueError("text_input_ids must be provided when the tokenizer is not specified")
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
dtype = text_encoder.dtype
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
_, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
return prompt_embeds, pooled_prompt_embeds
def encode_prompt(
text_encoders,
tokenizers,
prompt: str,
max_sequence_length,
device=None,
num_images_per_prompt: int = 1,
text_input_ids_list=None,
):
prompt = [prompt] if isinstance(prompt, str) else prompt
clip_tokenizers = tokenizers[:2]
clip_text_encoders = text_encoders[:2]
clip_prompt_embeds_list = []
clip_pooled_prompt_embeds_list = []
for i, (tokenizer, text_encoder) in enumerate(zip(clip_tokenizers, clip_text_encoders)):
prompt_embeds, pooled_prompt_embeds = _encode_prompt_with_clip(
text_encoder=text_encoder,
tokenizer=tokenizer,
prompt=prompt,
device=device if device is not None else text_encoder.device,
num_images_per_prompt=num_images_per_prompt,
text_input_ids=text_input_ids_list[i] if text_input_ids_list else None,
)
clip_prompt_embeds_list.append(prompt_embeds)
clip_pooled_prompt_embeds_list.append(pooled_prompt_embeds)
clip_prompt_embeds = torch.cat(clip_prompt_embeds_list, dim=-1)
pooled_prompt_embeds = torch.cat(clip_pooled_prompt_embeds_list, dim=-1)
t5_prompt_embed = _encode_prompt_with_t5(
text_encoders[-1],
tokenizers[-1],
max_sequence_length,
prompt=prompt,
num_images_per_prompt=num_images_per_prompt,
text_input_ids=text_input_ids_list[-1] if text_input_ids_list else None,
device=device if device is not None else text_encoders[-1].device,
)
clip_prompt_embeds = torch.nn.functional.pad(
clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])
)
prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2)
return prompt_embeds, pooled_prompt_embeds
def main(args):
if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub."
)
if torch.backends.mps.is_available() and args.mixed_precision == "bf16":
# due to pytorch#99272, MPS does not yet support bfloat16.
raise ValueError(
"Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
)
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
kwargs_handlers=[kwargs],
)
# Disable AMP for MPS.
if torch.backends.mps.is_available():
accelerator.native_amp = False
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name,
exist_ok=True,
).repo_id
# Load the tokenizers
tokenizer_one = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
)
tokenizer_two = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer_2",
revision=args.revision,
)
tokenizer_three = T5TokenizerFast.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer_3",
revision=args.revision,
)
# import correct text encoder classes
text_encoder_cls_one = import_model_class_from_model_name_or_path(
args.pretrained_model_name_or_path, args.revision
)
text_encoder_cls_two = import_model_class_from_model_name_or_path(
args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
)
text_encoder_cls_three = import_model_class_from_model_name_or_path(
args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_3"
)
# Load scheduler and models
noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
args.pretrained_model_name_or_path, subfolder="scheduler"
)
noise_scheduler_copy = copy.deepcopy(noise_scheduler)
text_encoder_one, text_encoder_two, text_encoder_three = load_text_encoders(
text_encoder_cls_one, text_encoder_cls_two, text_encoder_cls_three
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="vae",
revision=args.revision,
variant=args.variant,
)
transformer = SD3Transformer2DModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant
)
transformer.requires_grad_(False)
vae.requires_grad_(False)
text_encoder_one.requires_grad_(False)
text_encoder_two.requires_grad_(False)
text_encoder_three.requires_grad_(False)
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora transformer) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16:
# due to pytorch#99272, MPS does not yet support bfloat16.
raise ValueError(
"Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
)
vae.to(accelerator.device, dtype=torch.float32)
transformer.to(accelerator.device, dtype=weight_dtype)
text_encoder_one.to(accelerator.device, dtype=weight_dtype)
text_encoder_two.to(accelerator.device, dtype=weight_dtype)
text_encoder_three.to(accelerator.device, dtype=weight_dtype)
if args.gradient_checkpointing:
transformer.enable_gradient_checkpointing()
if args.train_text_encoder:
text_encoder_one.gradient_checkpointing_enable()
text_encoder_two.gradient_checkpointing_enable()
if args.lora_layers is not None:
target_modules = [layer.strip() for layer in args.lora_layers.split(",")]
else:
target_modules = [
"attn.add_k_proj",
"attn.add_q_proj",
"attn.add_v_proj",
"attn.to_add_out",
"attn.to_k",
"attn.to_out.0",
"attn.to_q",
"attn.to_v",
]
if args.lora_blocks is not None:
target_blocks = [int(block.strip()) for block in args.lora_blocks.split(",")]
target_modules = [
f"transformer_blocks.{block}.{module}" for block in target_blocks for module in target_modules
]
# now we will add new LoRA weights to the attention layers
transformer_lora_config = LoraConfig(
r=args.rank,
lora_alpha=args.rank,
init_lora_weights="gaussian",
target_modules=target_modules,
)
transformer.add_adapter(transformer_lora_config)
if args.train_text_encoder:
text_lora_config = LoraConfig(
r=args.rank,
lora_alpha=args.rank,
init_lora_weights="gaussian",
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
)
text_encoder_one.add_adapter(text_lora_config)
text_encoder_two.add_adapter(text_lora_config)
def unwrap_model(model):
model = accelerator.unwrap_model(model)
model = model._orig_mod if is_compiled_module(model) else model
return model
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
transformer_lora_layers_to_save = None
text_encoder_one_lora_layers_to_save = None
text_encoder_two_lora_layers_to_save = None
for model in models:
if isinstance(unwrap_model(model), type(unwrap_model(transformer))):
model = unwrap_model(model)
if args.upcast_before_saving:
model = model.to(torch.float32)
transformer_lora_layers_to_save = get_peft_model_state_dict(model)
elif args.train_text_encoder and isinstance(
unwrap_model(model), type(unwrap_model(text_encoder_one))
): # or text_encoder_two
# both text encoders are of the same class, so we check hidden size to distinguish between the two
model = unwrap_model(model)
hidden_size = model.config.hidden_size
if hidden_size == 768:
text_encoder_one_lora_layers_to_save = get_peft_model_state_dict(model)
elif hidden_size == 1280:
text_encoder_two_lora_layers_to_save = get_peft_model_state_dict(model)
else:
raise ValueError(f"unexpected save model: {model.__class__}")
# make sure to pop weight so that corresponding model is not saved again
if weights:
weights.pop()
StableDiffusion3Pipeline.save_lora_weights(
output_dir,
transformer_lora_layers=transformer_lora_layers_to_save,
text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save,
)
def load_model_hook(models, input_dir):
transformer_ = None
text_encoder_one_ = None
text_encoder_two_ = None
if not accelerator.distributed_type == DistributedType.DEEPSPEED:
print("不是DistributedType.DEEPSPEED!!!!!!!!!!!!")
while len(models) > 0:
model = models.pop()
if isinstance(unwrap_model(model), type(unwrap_model(transformer))):
transformer_ = unwrap_model(model)
elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_one))):
text_encoder_one_ = unwrap_model(model)
elif isinstance(unwrap_model(model), type(unwrap_model(text_encoder_two))):
text_encoder_two_ = unwrap_model(model)
else:
raise ValueError(f"unexpected save model: {model.__class__}")
print([type(unwrap_model(m)) for m in models])
else:
transformer_ = SD3Transformer2DModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="transformer"
)
transformer_.add_adapter(transformer_lora_config)
if args.train_text_encoder:
text_encoder_one_ = text_encoder_cls_one.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder"
)
text_encoder_two_ = text_encoder_cls_two.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder_2"
)
lora_state_dict = StableDiffusion3Pipeline.lora_state_dict(input_dir)
transformer_state_dict = {
f'{k.replace("transformer.", "")}': v for k, v in lora_state_dict.items() if k.startswith("transformer.")
}
transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict)
incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default")
if incompatible_keys is not None:
# check only for unexpected keys
unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
if unexpected_keys:
logger.warning(
f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
f" {unexpected_keys}. "
)
if args.train_text_encoder:
# Do we need to call `scale_lora_layers()` here?
_set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_)
_set_state_dict_into_text_encoder(
lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_two_
)
# Make sure the trainable params are in float32. This is again needed since the base models
# are in `weight_dtype`. More details:
# https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
if args.mixed_precision == "fp16":
models = [transformer_]
if args.train_text_encoder:
models.extend([text_encoder_one_, text_encoder_two_])
# only upcast trainable parameters (LoRA) into fp32
cast_training_params(models)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32 and torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Make sure the trainable params are in float32.
if args.mixed_precision == "fp16":
models = [transformer]
if args.train_text_encoder:
models.extend([text_encoder_one, text_encoder_two])
# only upcast trainable parameters (LoRA) into fp32
cast_training_params(models, dtype=torch.float32)
transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters()))
if args.train_text_encoder:
text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters()))
# Optimization parameters
transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate}
if args.train_text_encoder:
# different learning rate for text encoder and unet
text_lora_parameters_one_with_lr = {
"params": text_lora_parameters_one,
"weight_decay": args.adam_weight_decay_text_encoder,
"lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
}
text_lora_parameters_two_with_lr = {
"params": text_lora_parameters_two,
"weight_decay": args.adam_weight_decay_text_encoder,
"lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
}
params_to_optimize = [
transformer_parameters_with_lr,
text_lora_parameters_one_with_lr,
text_lora_parameters_two_with_lr,
]
else:
params_to_optimize = [transformer_parameters_with_lr]
# Optimizer creation
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
logger.warning(
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
"Defaulting to adamW"
)
args.optimizer = "adamw"
if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
logger.warning(
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
f"set to {args.optimizer.lower()}"
)
if args.optimizer.lower() == "adamw":
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
optimizer = optimizer_class(
params_to_optimize,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
if args.optimizer.lower() == "prodigy":
try:
import prodigyopt
except ImportError:
raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
optimizer_class = prodigyopt.Prodigy
if args.learning_rate <= 0.1:
logger.warning(
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
)
if args.train_text_encoder and args.text_encoder_lr:
logger.warning(
f"Learning rates were provided both for the transformer and the text encoder- e.g. text_encoder_lr:"
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
f"When using prodigy only learning_rate is used as the initial learning rate."
)
# changes the learning rate of text_encoder_parameters_one and text_encoder_parameters_two to be
# --learning_rate
params_to_optimize[1]["lr"] = args.learning_rate
params_to_optimize[2]["lr"] = args.learning_rate
optimizer = optimizer_class(
params_to_optimize,
betas=(args.adam_beta1, args.adam_beta2),
beta3=args.prodigy_beta3,
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
decouple=args.prodigy_decouple,
use_bias_correction=args.prodigy_use_bias_correction,
safeguard_warmup=args.prodigy_safeguard_warmup,
)
# Dataset and DataLoaders creation:
train_dataset = ImageDataset(
image_base_dir=args.image_base_dir,
train_data_file=args.train_data_file,
size=args.resolution,
repeats=args.repeats,
center_crop=args.center_crop,
)
#每个进程独立运行 train_gors_lora_sd3.py,执行相同的代码,
#ImageDataset 是在每个进程的 Python 环境中创建的,accelerate 不会自动共享 train_dataset 的内存。
#因此一开始imagedataset类不能够装载那些占据内存量很大的数据,只能保存一些元数据,不然就会占用特别多的内存
#之前dreambooth由于只需要几张照片就能实现全局微调,所以将那几张照片在imagedataset类中就放到了cpu中,并没有像别的训练代码一样在真正处理batch的时候才加载
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=lambda examples: collate_fn(examples),#样本(examples)是 DataLoader 从 Dataset 中一个一个读取出来的。根据 batch_size 拿出一批样本
num_workers=args.dataloader_num_workers,
)#用torch自带的DataLoader函数对dataset进行切割
#这里只是设置了个数据加载器,相当于是初始化,并没有真正的用这个东西来处理数据,这个东西是后面训练的时候对train_dataloader进行遍历的时候
#会调用__getitem__()函数来对train_dataset进行train_dataset[index]进行索引取出examples,然后用collate_fn进行处理,得到batch
#实际调用的代码是:for step, batch in enumerate(train_dataloader)
if not args.train_text_encoder:
tokenizers = [tokenizer_one, tokenizer_two, tokenizer_three]
text_encoders = [text_encoder_one, text_encoder_two, text_encoder_three]
def compute_text_embeddings(prompt, text_encoders, tokenizers):
with torch.no_grad():
prompt_embeds, pooled_prompt_embeds = encode_prompt(
text_encoders, tokenizers, prompt, args.max_sequence_length
)
prompt_embeds = prompt_embeds.to(accelerator.device)
#每个 token 对应的向量,形状类似于 [batch, seq_len, dim]
pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
#整个句子压缩成一个向量,通常用于条件控制(形状 [batch, dim])
return prompt_embeds, pooled_prompt_embeds
vae_config_shift_factor = vae.config.shift_factor
vae_config_scaling_factor = vae.config.scaling_factor
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
# Prepare everything with our `accelerator`.
if args.train_text_encoder:
(
transformer,
text_encoder_one,
text_encoder_two,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
transformer, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler
)#这里对text_encoder_one, text_encoder_two进行了包装,所以之后就不能够直接访问模型的类型了
assert text_encoder_one is not None
assert text_encoder_two is not None
assert text_encoder_three is not None
else:
transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
transformer, optimizer, train_dataloader, lr_scheduler
)
# if args.cache_latents:
# if (os.path.exists("./latents_cache/latent_1.pt")):
# if args.cache_latents:
# latents_cache = [
# torch.load(f"./latents_cache/latent_{i}.pt").to(accelerator.device, dtype=weight_dtype)
# for i in range(len(train_dataset))
# ]
# else:
# latents_cache = []
# for batch_idx,batch in enumerate(tqdm(train_dataloader, desc="Caching latents")):
# # if batch_idx <=3198:
# # continue
# with torch.no_grad():
# batch["pixel_values"] = batch["pixel_values"].to(
# accelerator.device, non_blocking=True, dtype=vae.dtype
# )
# latent = vae.encode(batch["pixel_values"]).latent_dist
# latents_cache.append(latent)
# torch.save(latent.sample().cpu(), f"./latents_cache/latent_{batch_idx}.pt")
# # 保存潜变量
# #可是这样就相当于是在一个GPU一次放了所有的照片,运行了之后发现会GPU显存不够用
# if args.validation_prompts is None:
# del vae
# free_memory()
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
print(f"每周期训练步骤是{num_update_steps_per_epoch}!!!!!!!!")
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
tracker_name = "gors-sd3-lora"
accelerator.init_trackers(tracker_name, config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
#----------------------------------------------------------------------------
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
#这一部分代码应该也需要相应的更改一下
#-----------------------------------------------------------------------------------
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the mos recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
resume_global_step = global_step * args.gradient_accumulation_steps
first_epoch = global_step // num_update_steps_per_epoch
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
#断点
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype)
schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device)
timesteps = timesteps.to(accelerator.device)
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
sigma = sigmas[step_indices].flatten()
while len(sigma.shape) < n_dim:
sigma = sigma.unsqueeze(-1)
return sigma
#------------------------------------------------------------------
#整个训练过程的主体
for epoch in range(first_epoch, args.num_train_epochs):
#--------------------------------------------------------------
#每个周期
transformer.train()
if args.train_text_encoder:
text_encoder_one.train()
text_encoder_two.train()
# set top parameter requires_grad = True for gradient checkpointing works
accelerator.unwrap_model(text_encoder_one).text_model.embeddings.requires_grad_(True)
accelerator.unwrap_model(text_encoder_two).text_model.embeddings.requires_grad_(True)
for step, batch in enumerate(train_dataloader):
#-----------------------------------------------------------
#每个周期里的一次batch
# Skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
continue
models_to_accumulate = [transformer]
if args.train_text_encoder:
models_to_accumulate.extend([text_encoder_one, text_encoder_two])
with accelerator.accumulate(models_to_accumulate):
prompts = batch["prompts"]
# encode batch prompts when custom prompts are provided for each image -
if not args.train_text_encoder:
prompt_embeds, pooled_prompt_embeds = compute_text_embeddings(
prompts, text_encoders, tokenizers
)
else:
tokens_one = tokenize_prompt(tokenizer_one, prompts)
tokens_two = tokenize_prompt(tokenizer_two, prompts)
tokens_three = tokenize_prompt(tokenizer_three, prompts)
text_encoders=[accelerator.unwrap_model(text_encoder_one), accelerator.unwrap_model(text_encoder_two), text_encoder_three]
#text_encoders_upwrap = [accelerator.unwrap_model(text_encoder) for text_encoder in text_encoders]
prompt_embeds, pooled_prompt_embeds = encode_prompt(
text_encoders,
tokenizers=[None, None, None],
prompt=prompts,
max_sequence_length=args.max_sequence_length,
text_input_ids_list=[tokens_one, tokens_two, tokens_three],
)#由于compute_text_embeddings是专门在 not args.train_text_encoder情况下设置的函数,是无梯度运算
#所以在args.train_text_encoder的情况下还需要再边写一遍差不多的逻辑,但是是梯度运算
# Convert images to latent space
# if args.cache_latents:
# model_input = latents_cache[step].sample()
# else:
pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
#这个数组的维度是(batch_size, 2 * C, H, W)
#因为拼接了win和loss图片
feed_pixel_values = torch.cat(pixel_values.chunk(2, dim=1))
#这个操作的意思是把 pixel_values 沿着 dim=1(通道维度) 分成 2 块,然后再【拼起来
#.chunk(n, dim) 返回一个列表,长度为 n
#torch.cat([])是将这两个 tensor 沿 batch 维度 dim=0 拼接起来
#最终输出的维度是feed_pixel_values.shape == (2*B, C, H, W)
model_input = []
#i是从0到2*batch_size,步长是args.vae_encode_batch_size,也就是batch_size
for i in range(0, feed_pixel_values.shape[0], args.train_batch_size):
model_input.append(
vae.encode(feed_pixel_values[i : i + args.train_batch_size]).latent_dist.sample()
)
model_input = torch.cat(model_input, dim=0)
model_input = (model_input- vae_config_shift_factor) * vae.config.scaling_factor
model_input = model_input.to(dtype=weight_dtype)
#SD3 中对 latent 有归一化处理,shift 和 scale 是根据 VAE 配置做的。
#最后转换成 weight_dtype,通常是 float16 或 bfloat16。
# Sample noise that we'll add to the model_input
# Sample noise that we'll add to the model_input
noise = torch.randn_like(model_input).chunk(2)[0].repeat(2, 1, 1, 1)
bsz = model_input.shape[0] // 2
#为每个 latent 样本生成随机噪声 z1,用于构造 noisy latent zt。
#bsz 是 batch size。
# Sample a random timestep for each image
# for weighting schemes where we sample timesteps non-uniformly
u = compute_density_for_timestep_sampling(
weighting_scheme=args.weighting_scheme,
batch_size=bsz,
logit_mean=args.logit_mean,
logit_std=args.logit_std,
mode_scale=args.mode_scale,
)
indices = (u * noise_scheduler_copy.config.num_train_timesteps).long()
timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device).repeat(2)
# 为每张图像采样一个时间步 t,决定该步噪声水平。
# 不同的 weighting_scheme 决定 u 的分布,可能是均匀、高斯、偏置等。
# 得到 timesteps,用于 transformer 推理时的条件。
# Add noise according to flow matching.
# zt = (1 - texp) * x + texp * z1
sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype)
noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise
# 这是 flow matching 的核心公式:
# z_t = (1 - σ) * x + σ * z1
# 其中 x 是干净 latent,z1 是噪声,σ 是对应时间步的噪声比例(可能近似 exp(t))。
# 这与普通 diffusion 中 x_t = √α x₀ + √(1-α) ε 不同,是一种更简单、更稳定的插值结构。
prompt_embeds = prompt_embeds.repeat(2, 1, 1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(2, 1)
# Predict the noise residual
#就相当于,在一个干净的图片上加上随机噪声,然后让模型进行预测
#这里的预测分为两种,一种是预测噪声,一种是预测原本的图片
model_pred = transformer(
hidden_states=noisy_model_input,
timestep=timesteps,
encoder_hidden_states=prompt_embeds,
pooled_projections=pooled_prompt_embeds,
return_dict=False,
)[0]
# 将 noisy latent z_t 输入 transformer。
# 模型要学的是:根据 prompt,推断出当前 t 对应的 目标向量(通常是噪声或 clean latent)。
# Follow: Section 5 of https://arxiv.org/abs/2206.00364.
# Preconditioning of the model outputs.
if args.precondition_outputs:
model_pred = model_pred * (-sigmas) + noisy_model_input
#这一步相当于,如果unet是预测噪声的,那这里就去掉噪声
#不然就是预测原本图像的,那就不用处理
# these weighting schemes use a uniform timestep sampling
# and instead post-weight the loss
weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas)
# flow matching loss
if args.precondition_outputs:
target = model_input
else:
target = noise - model_input
# target 是 ground truth,根据是否预处理输出有不同:
# 如果用了 preconditioning,目标是 clean latent x
# 否则是 z1 - x,从 flow matching 推导而来
# weighting 是 timestep 对损失的权重函数。与 sigmas 有关,不同 scheme 下权重不同。
# Compute regular loss.
per_pixel_loss = (weighting.float() * (model_pred.float() - target.float()) ** 2)
model_losses = per_pixel_loss.mean(dim=list(range(1, len(per_pixel_loss.shape)))) # [2B]
model_losses_w, model_losses_l = model_losses.chunk(2)
raw_model_loss = 0.5 * (model_losses_w.mean() + model_losses_l.mean())
model_diff = model_losses_w - model_losses_l # These are both LBS (as is t)
# Reference model predictions.
accelerator.unwrap_model(transformer).disable_adapters()
with torch.no_grad():
ref_pred = transformer(
hidden_states=noisy_model_input,
timestep=timesteps,
encoder_hidden_states=prompt_embeds,
pooled_projections=pooled_prompt_embeds,
return_dict=False,
)[0].detach()#.detach() 返回的是一个“不需要梯度”的新张量,它共享原始数据,但不会再被自动求导追踪。
if args.precondition_outputs:
ref_pred = ref_pred * (-sigmas) + noisy_model_input
# Compute regular loss.
ref_per_pixel_loss = (weighting.float() * (ref_pred.float() - target.float()) ** 2)
ref_losses = ref_per_pixel_loss.mean(dim=list(range(1, len(ref_per_pixel_loss.shape)))) # [2B]
ref_losses_w, ref_losses_l = ref_losses.chunk(2)
raw_ref_loss = 0.5 * (ref_losses_w.mean() + ref_losses_l.mean())
ref_diff = ref_losses_w - ref_losses_l # These are both LBS (as is t)
# Reference model predictions.
# Re-enable adapters.
accelerator.unwrap_model(transformer).enable_adapters()
# Final loss.
logits = ref_diff - model_diff
loss = -1 * F.logsigmoid(0.5*args.beta_dpo * logits).mean()
accelerator.backward(loss)
#反向传播,对梯度进行更新
if accelerator.sync_gradients:
params_to_clip = (
itertools.chain(
transformer_lora_parameters, text_lora_parameters_one, text_lora_parameters_two
)
if args.train_text_encoder
else transformer_lora_parameters
)
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
#这是在 分布式训练(尤其是多卡训练) 中常见的一种写法,用来确保 只有在真正执行了梯度同步(即一次有效的优化步骤)时 才更新进度条和 global_step
# 🔹 accelerator.sync_gradients
# 这是 HuggingFace Accelerate 提供的属性,表示当前是否是 同步梯度的步骤:
# 如果你使用 梯度累积(gradient accumulation),那么并不是每个 batch 都会立刻做一次 optimizer.step()。
# 只有当累积步数达到设定值(如 accumulation_steps = 4),才会真正进行一次反向传播 + 梯度同步 + 参数更新。
#batch是比global_step更小的步骤,global_step是每次梯度更新会加1,batch是每次处理一批数据就是一个batch
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process or accelerator.distributed_type == DistributedType.DEEPSPEED:
#看看是否要存储当前的参数
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if accelerator.is_main_process:
if args.validation_prompts is not None and global_step % args.validation_steps == 0:
# if not args.train_text_encoder:
# # create pipeline
# text_encoder_one, text_encoder_two, text_encoder_three = load_text_encoders(
# text_encoder_cls_one, text_encoder_cls_two, text_encoder_cls_three
# )
# text_encoder_one.to(weight_dtype)
# text_encoder_two.to(weight_dtype)
pipeline = StableDiffusion3Pipeline.from_pretrained(
args.pretrained_model_name_or_path,
vae=vae,
text_encoder=accelerator.unwrap_model(text_encoder_one),
text_encoder_2=accelerator.unwrap_model(text_encoder_two),
text_encoder_3=accelerator.unwrap_model(text_encoder_three),
transformer=accelerator.unwrap_model(transformer),
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)#这里的pipeline是根据之前的训练数据更新过的,其中transformer是已经包含了lora层的
images = []
if args.validation_prompts and args.num_validation_images > 0:
pipeline_args = [{"prompt": prompt} for prompt in args.validation_prompts] # 假设是一个 prompt 列表
images = log_validation(
pipeline=pipeline,
args=args,
accelerator=accelerator,
pipeline_args=pipeline_args,
global_step=global_step,
torch_dtype=weight_dtype,
)
for i,image in enumerate(images):
# 保存图片,命名为 image_0.png, image_1.png ...
image_path = os.path.join(save_path, f"validation_image_{i}.png")
image.save(image_path)
images = None
del pipeline
logs = {
"loss": loss.detach().item(),
"raw_model_loss": raw_model_loss.detach().item(),
"ref_loss": raw_ref_loss.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
#-----------------------------------------------------------------
#每个周期里的所有batch都已经遍历完全,接下来的代码又回到了周期层面
# if not args.train_text_encoder:
# del text_encoder_one, text_encoder_two, text_encoder_three
# free_memory()#我个人还是感觉,训不训练文本编码部分,都不应该对text_encoder进行删除,释放显存,因为一直会需要使用
#--------------------------------------------------------------
#所有的周期都结束了,所有的训练也都结束了,接下来是保存以及进行最后一次的验证
# Save the lora layers
accelerator.wait_for_everyone()
if accelerator.is_main_process:
transformer = unwrap_model(transformer)
if args.upcast_before_saving:
transformer.to(torch.float32)
else:
transformer = transformer.to(weight_dtype)
transformer_lora_layers = get_peft_model_state_dict(transformer)
if args.train_text_encoder:
text_encoder_one = unwrap_model(text_encoder_one)
text_encoder_lora_layers = get_peft_model_state_dict(text_encoder_one.to(torch.float32))
text_encoder_two = unwrap_model(text_encoder_two)
text_encoder_2_lora_layers = get_peft_model_state_dict(text_encoder_two.to(torch.float32))
else:
text_encoder_lora_layers = None
text_encoder_2_lora_layers = None
StableDiffusion3Pipeline.save_lora_weights(
save_directory=args.output_dir,
transformer_lora_layers=transformer_lora_layers,
text_encoder_lora_layers=text_encoder_lora_layers,
text_encoder_2_lora_layers=text_encoder_2_lora_layers,
)#在训练结束之后会将lora的参数再次进行保存
# 💡 在加载最终验证 pipeline 之前,释放不再需要的训练模型以避免 OOM
del transformer, text_encoder_one, text_encoder_two, text_encoder_three, vae
torch.cuda.empty_cache()
# # Final inference
# # Load previous pipeline
# pipeline = StableDiffusion3Pipeline.from_pretrained(
# args.pretrained_model_name_or_path,
# revision=args.revision,
# variant=args.variant,
# torch_dtype=weight_dtype,
# )
# # load attention processors
# pipeline.load_lora_weights(args.output_dir)
# # run inference
# images = []
# if args.validation_prompts and args.num_validation_images > 0:
# pipeline_args = [{"prompt": prompt} for prompt in args.validation_prompts] # 假设是一个 prompt 列表
# images = log_validation(
# pipeline=pipeline,
# args=args,
# accelerator=accelerator,
# pipeline_args=pipeline_args,
# epoch=epoch,
# torch_dtype=weight_dtype,
# )
# for i,image in enumerate(images):
# # 保存图片,命名为 image_0.png, image_1.png ...
# image_path = os.path.join(args.output_dir, f"final_validation_image_{i}.png")
# image.save(image_path)
if args.push_to_hub:
save_model_card(
repo_id,
images=images,
base_model=args.pretrained_model_name_or_path,
instance_prompt=args.instance_prompt,
validation_prompts=args.validation_prompts,
train_text_encoder=args.train_text_encoder,
repo_folder=args.output_dir,
)
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)