| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import argparse |
| | import copy |
| | import itertools |
| | import logging |
| | import math |
| | import os |
| | import random |
| | import re |
| | import shutil |
| | from contextlib import nullcontext |
| | from pathlib import Path |
| | from typing import List, Optional |
| |
|
| | import numpy as np |
| | import torch |
| | import torch.utils.checkpoint |
| | import transformers |
| | from accelerate import Accelerator |
| | from accelerate.logging import get_logger |
| | from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed |
| | from huggingface_hub import create_repo, upload_folder |
| | from huggingface_hub.utils import insecure_hashlib |
| | from peft import LoraConfig, set_peft_model_state_dict |
| | from peft.utils import get_peft_model_state_dict |
| | from PIL import Image |
| | from PIL.ImageOps import exif_transpose |
| | from safetensors.torch import save_file |
| | from torch.utils.data import Dataset |
| | from torchvision import transforms |
| | from torchvision.transforms.functional import crop |
| | from tqdm.auto import tqdm |
| | from transformers import CLIPTokenizer, PretrainedConfig, T5TokenizerFast |
| |
|
| | import diffusers |
| | from diffusers import ( |
| | AutoencoderKL, |
| | FlowMatchEulerDiscreteScheduler, |
| | FluxPipeline, |
| | FluxTransformer2DModel, |
| | ) |
| | from diffusers.optimization import get_scheduler |
| | from diffusers.training_utils import ( |
| | _collate_lora_metadata, |
| | _set_state_dict_into_text_encoder, |
| | cast_training_params, |
| | compute_density_for_timestep_sampling, |
| | compute_loss_weighting_for_sd3, |
| | free_memory, |
| | ) |
| | from diffusers.utils import ( |
| | check_min_version, |
| | convert_unet_state_dict_to_peft, |
| | is_wandb_available, |
| | ) |
| | from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card |
| | from diffusers.utils.torch_utils import is_compiled_module |
| |
|
| |
|
| | if is_wandb_available(): |
| | import wandb |
| |
|
| | |
| | check_min_version("0.37.0.dev0") |
| |
|
| | logger = get_logger(__name__) |
| |
|
| |
|
| | def save_model_card( |
| | repo_id: str, |
| | images=None, |
| | base_model: str = None, |
| | train_text_encoder=False, |
| | train_text_encoder_ti=False, |
| | enable_t5_ti=False, |
| | pure_textual_inversion=False, |
| | token_abstraction_dict=None, |
| | instance_prompt=None, |
| | validation_prompt=None, |
| | repo_folder=None, |
| | ): |
| | widget_dict = [] |
| | trigger_str = f"You should use {instance_prompt} to trigger the image generation." |
| |
|
| | if images is not None: |
| | for i, image in enumerate(images): |
| | image.save(os.path.join(repo_folder, f"image_{i}.png")) |
| | widget_dict.append( |
| | {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}} |
| | ) |
| | diffusers_load_lora = "" |
| | diffusers_imports_pivotal = "" |
| | diffusers_example_pivotal = "" |
| | if not pure_textual_inversion: |
| | diffusers_load_lora = ( |
| | f"""pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')""" |
| | ) |
| | if train_text_encoder_ti: |
| | embeddings_filename = f"{repo_folder}_emb" |
| | ti_keys = ", ".join(f'"{match}"' for match in re.findall(r"<s\d+>", instance_prompt)) |
| | trigger_str = ( |
| | "To trigger image generation of trained concept(or concepts) replace each concept identifier " |
| | "in you prompt with the new inserted tokens:\n" |
| | ) |
| | diffusers_imports_pivotal = """from huggingface_hub import hf_hub_download |
| | from safetensors.torch import load_file |
| | """ |
| | if enable_t5_ti: |
| | diffusers_example_pivotal = f"""embedding_path = hf_hub_download(repo_id='{repo_id}', filename='{embeddings_filename}.safetensors', repo_type="model") |
| | state_dict = load_file(embedding_path) |
| | pipeline.load_textual_inversion(state_dict["clip_l"], token=[{ti_keys}], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer) |
| | pipeline.load_textual_inversion(state_dict["t5"], token=[{ti_keys}], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2) |
| | """ |
| | else: |
| | diffusers_example_pivotal = f"""embedding_path = hf_hub_download(repo_id='{repo_id}', filename='{embeddings_filename}.safetensors', repo_type="model") |
| | state_dict = load_file(embedding_path) |
| | pipeline.load_textual_inversion(state_dict["clip_l"], token=[{ti_keys}], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer) |
| | """ |
| | if token_abstraction_dict: |
| | for key, value in token_abstraction_dict.items(): |
| | tokens = "".join(value) |
| | trigger_str += f""" |
| | to trigger concept `{key}` → use `{tokens}` in your prompt \n |
| | """ |
| |
|
| | model_description = f""" |
| | # Flux DreamBooth LoRA - {repo_id} |
| | |
| | <Gallery /> |
| | |
| | ## Model description |
| | |
| | These are {repo_id} DreamBooth LoRA weights for {base_model}. |
| | |
| | The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [Flux diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_flux.md). |
| | |
| | Was LoRA for the text encoder enabled? {train_text_encoder}. |
| | |
| | Pivotal tuning was enabled: {train_text_encoder_ti}. |
| | |
| | ## Trigger words |
| | |
| | {trigger_str} |
| | |
| | ## Download model |
| | |
| | [Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab. |
| | |
| | ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) |
| | |
| | ```py |
| | from diffusers import AutoPipelineForText2Image |
| | import torch |
| | {diffusers_imports_pivotal} |
| | pipeline = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda') |
| | {diffusers_load_lora} |
| | {diffusers_example_pivotal} |
| | image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0] |
| | ``` |
| | |
| | For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) |
| | |
| | ## License |
| | |
| | Please adhere to the licensing terms as described [here](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md). |
| | """ |
| | model_card = load_or_create_model_card( |
| | repo_id_or_path=repo_id, |
| | from_training=True, |
| | license="other", |
| | base_model=base_model, |
| | prompt=instance_prompt, |
| | model_description=model_description, |
| | widget=widget_dict, |
| | ) |
| | tags = [ |
| | "text-to-image", |
| | "diffusers-training", |
| | "diffusers", |
| | "lora", |
| | "flux", |
| | "flux-diffusers", |
| | "template:sd-lora", |
| | ] |
| |
|
| | model_card = populate_model_card(model_card, tags=tags) |
| | model_card.save(os.path.join(repo_folder, "README.md")) |
| |
|
| |
|
| | def load_text_encoders(class_one, class_two): |
| | text_encoder_one = class_one.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant |
| | ) |
| | text_encoder_two = class_two.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant |
| | ) |
| | return text_encoder_one, text_encoder_two |
| |
|
| |
|
| | def log_validation( |
| | pipeline, |
| | args, |
| | accelerator, |
| | pipeline_args, |
| | epoch, |
| | torch_dtype, |
| | is_final_validation=False, |
| | ): |
| | logger.info( |
| | f"Running validation... \n Generating {args.num_validation_images} images with prompt:" |
| | f" {args.validation_prompt}." |
| | ) |
| | pipeline = pipeline.to(accelerator.device, dtype=torch_dtype) |
| | pipeline.set_progress_bar_config(disable=True) |
| |
|
| | |
| | generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None |
| | autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext() |
| |
|
| | |
| | with torch.no_grad(): |
| | prompt_embeds, pooled_prompt_embeds, text_ids = pipeline.encode_prompt( |
| | pipeline_args["prompt"], prompt_2=pipeline_args["prompt"] |
| | ) |
| | images = [] |
| | for _ in range(args.num_validation_images): |
| | with autocast_ctx: |
| | image = pipeline( |
| | prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, generator=generator |
| | ).images[0] |
| | images.append(image) |
| |
|
| | for tracker in accelerator.trackers: |
| | phase_name = "test" if is_final_validation else "validation" |
| | if tracker.name == "tensorboard": |
| | np_images = np.stack([np.asarray(img) for img in images]) |
| | tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") |
| | if tracker.name == "wandb": |
| | tracker.log( |
| | { |
| | phase_name: [ |
| | wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) |
| | ] |
| | } |
| | ) |
| |
|
| | del pipeline |
| | free_memory() |
| |
|
| | return images |
| |
|
| |
|
| | def import_model_class_from_model_name_or_path( |
| | pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" |
| | ): |
| | text_encoder_config = PretrainedConfig.from_pretrained( |
| | pretrained_model_name_or_path, subfolder=subfolder, revision=revision |
| | ) |
| | model_class = text_encoder_config.architectures[0] |
| | if model_class == "CLIPTextModel": |
| | from transformers import CLIPTextModel |
| |
|
| | return CLIPTextModel |
| | elif model_class == "T5EncoderModel": |
| | from transformers import T5EncoderModel |
| |
|
| | return T5EncoderModel |
| | else: |
| | raise ValueError(f"{model_class} is not supported.") |
| |
|
| |
|
| | def parse_args(input_args=None): |
| | parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="Revision of pretrained model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--variant", |
| | type=str, |
| | default=None, |
| | help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
| | ) |
| | parser.add_argument( |
| | "--dataset_name", |
| | type=str, |
| | default=None, |
| | help=( |
| | "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private," |
| | " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," |
| | " or to a folder containing files that 🤗 Datasets can understand." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--dataset_config_name", |
| | type=str, |
| | default=None, |
| | help="The config of the Dataset, leave as None if there's only one config.", |
| | ) |
| | parser.add_argument( |
| | "--instance_data_dir", |
| | type=str, |
| | default=None, |
| | help=("A folder containing the training data. "), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--cache_dir", |
| | type=str, |
| | default=None, |
| | help="The directory where the downloaded models and datasets will be stored.", |
| | ) |
| |
|
| | parser.add_argument( |
| | "--image_column", |
| | type=str, |
| | default="image", |
| | help="The column of the dataset containing the target image. By " |
| | "default, the standard Image Dataset maps out 'file_name' " |
| | "to 'image'.", |
| | ) |
| | parser.add_argument( |
| | "--caption_column", |
| | type=str, |
| | default=None, |
| | help="The column of the dataset containing the instance prompt for each image", |
| | ) |
| |
|
| | parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") |
| |
|
| | parser.add_argument( |
| | "--class_data_dir", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="A folder containing the training data of class images.", |
| | ) |
| | parser.add_argument( |
| | "--instance_prompt", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'", |
| | ) |
| | parser.add_argument( |
| | "--token_abstraction", |
| | type=str, |
| | default="TOK", |
| | help="identifier specifying the instance(or instances) as used in instance_prompt, validation prompt, " |
| | "captions - e.g. TOK. To use multiple identifiers, please specify them in a comma separated string - e.g. " |
| | "'TOK,TOK2,TOK3' etc.", |
| | ) |
| |
|
| | parser.add_argument( |
| | "--num_new_tokens_per_abstraction", |
| | type=int, |
| | default=None, |
| | help="number of new tokens inserted to the tokenizers per token_abstraction identifier when " |
| | "--train_text_encoder_ti = True. By default, each --token_abstraction (e.g. TOK) is mapped to 2 new " |
| | "tokens - <si><si+1> ", |
| | ) |
| | parser.add_argument( |
| | "--initializer_concept", |
| | type=str, |
| | default=None, |
| | help="the concept to use to initialize the new inserted tokens when training with " |
| | "--train_text_encoder_ti = True. By default, new tokens (<si><si+1>) are initialized with random value. " |
| | "Alternatively, you could specify a different word/words whose value will be used as the starting point for the new inserted tokens. " |
| | "--num_new_tokens_per_abstraction is ignored when initializer_concept is provided", |
| | ) |
| | parser.add_argument( |
| | "--class_prompt", |
| | type=str, |
| | default=None, |
| | help="The prompt to specify images in the same class as provided instance images.", |
| | ) |
| | parser.add_argument( |
| | "--max_sequence_length", |
| | type=int, |
| | default=512, |
| | help="Maximum sequence length to use with with the T5 text encoder", |
| | ) |
| | parser.add_argument( |
| | "--validation_prompt", |
| | type=str, |
| | default=None, |
| | help="A prompt that is used during validation to verify that the model is learning.", |
| | ) |
| | parser.add_argument( |
| | "--num_validation_images", |
| | type=int, |
| | default=4, |
| | help="Number of images that should be generated during validation with `validation_prompt`.", |
| | ) |
| | parser.add_argument( |
| | "--validation_epochs", |
| | type=int, |
| | default=50, |
| | help=( |
| | "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" |
| | " `args.validation_prompt` multiple times: `args.num_validation_images`." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--rank", |
| | type=int, |
| | default=4, |
| | help=("The dimension of the LoRA update matrices."), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--lora_alpha", |
| | type=int, |
| | default=4, |
| | help="LoRA alpha to be used for additional scaling.", |
| | ) |
| |
|
| | parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") |
| |
|
| | parser.add_argument( |
| | "--with_prior_preservation", |
| | default=False, |
| | action="store_true", |
| | help="Flag to add prior preservation loss.", |
| | ) |
| | parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") |
| | parser.add_argument( |
| | "--num_class_images", |
| | type=int, |
| | default=100, |
| | help=( |
| | "Minimal class images for prior preservation loss. If there are not enough images already present in" |
| | " class_data_dir, additional images will be sampled with class_prompt." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="flux-dreambooth-lora", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--resolution", |
| | type=int, |
| | default=512, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--center_crop", |
| | default=False, |
| | action="store_true", |
| | help=( |
| | "Whether to center crop the input images to the resolution. If not set, the images will be randomly" |
| | " cropped. The images will be resized to the resolution first before cropping." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--random_flip", |
| | action="store_true", |
| | help="whether to randomly flip images horizontally", |
| | ) |
| | parser.add_argument( |
| | "--train_text_encoder", |
| | action="store_true", |
| | help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", |
| | ) |
| | parser.add_argument( |
| | "--train_text_encoder_ti", |
| | action="store_true", |
| | help=("Whether to use pivotal tuning / textual inversion"), |
| | ) |
| | parser.add_argument( |
| | "--enable_t5_ti", |
| | action="store_true", |
| | help=( |
| | "Whether to use pivotal tuning / textual inversion for the T5 encoder as well (in addition to CLIP encoder)" |
| | ), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--train_text_encoder_ti_frac", |
| | type=float, |
| | default=0.5, |
| | help=("The percentage of epochs to perform textual inversion"), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--train_text_encoder_frac", |
| | type=float, |
| | default=1.0, |
| | help=("The percentage of epochs to perform text encoder tuning"), |
| | ) |
| | parser.add_argument( |
| | "--train_transformer_frac", |
| | type=float, |
| | default=1.0, |
| | help=("The percentage of epochs to perform transformer tuning"), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument( |
| | "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=1) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=None, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--checkpointing_steps", |
| | type=int, |
| | default=500, |
| | help=( |
| | "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" |
| | " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" |
| | " training using `--resume_from_checkpoint`." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--checkpoints_total_limit", |
| | type=int, |
| | default=None, |
| | help=("Max number of checkpoints to store."), |
| | ) |
| | parser.add_argument( |
| | "--resume_from_checkpoint", |
| | type=str, |
| | default=None, |
| | help=( |
| | "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--gradient_accumulation_steps", |
| | type=int, |
| | default=1, |
| | help="Number of updates steps to accumulate before performing a backward/update pass.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_checkpointing", |
| | action="store_true", |
| | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=1e-4, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| |
|
| | parser.add_argument( |
| | "--guidance_scale", |
| | type=float, |
| | default=3.5, |
| | help="the FLUX.1 dev variant is a guidance distilled model", |
| | ) |
| | parser.add_argument( |
| | "--text_encoder_lr", |
| | type=float, |
| | default=5e-6, |
| | help="Text encoder learning rate to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=False, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument( |
| | "--lr_num_cycles", |
| | type=int, |
| | default=1, |
| | help="Number of hard resets of the lr in cosine_with_restarts scheduler.", |
| | ) |
| | parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") |
| | parser.add_argument( |
| | "--dataloader_num_workers", |
| | type=int, |
| | default=0, |
| | help=( |
| | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--weighting_scheme", |
| | type=str, |
| | default="none", |
| | choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], |
| | help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), |
| | ) |
| | parser.add_argument( |
| | "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." |
| | ) |
| | parser.add_argument( |
| | "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." |
| | ) |
| | parser.add_argument( |
| | "--mode_scale", |
| | type=float, |
| | default=1.29, |
| | help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", |
| | ) |
| | parser.add_argument( |
| | "--optimizer", |
| | type=str, |
| | default="AdamW", |
| | help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--use_8bit_adam", |
| | action="store_true", |
| | help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", |
| | ) |
| |
|
| | parser.add_argument( |
| | "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers." |
| | ) |
| | parser.add_argument( |
| | "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers." |
| | ) |
| | parser.add_argument( |
| | "--prodigy_beta3", |
| | type=float, |
| | default=None, |
| | help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " |
| | "uses the value of square root of beta2. Ignored if optimizer is adamW", |
| | ) |
| | parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") |
| | parser.add_argument( |
| | "--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for transformer params" |
| | ) |
| | parser.add_argument( |
| | "--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder" |
| | ) |
| |
|
| | parser.add_argument( |
| | "--lora_layers", |
| | type=str, |
| | default=None, |
| | help=( |
| | "The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. " |
| | 'E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only. For more examples refer to https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/README_flux.md' |
| | ), |
| | ) |
| |
|
| | parser.add_argument( |
| | "--adam_epsilon", |
| | type=float, |
| | default=1e-08, |
| | help="Epsilon value for the Adam optimizer and Prodigy optimizers.", |
| | ) |
| |
|
| | parser.add_argument( |
| | "--prodigy_use_bias_correction", |
| | type=bool, |
| | default=True, |
| | help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", |
| | ) |
| | parser.add_argument( |
| | "--prodigy_safeguard_warmup", |
| | type=bool, |
| | default=True, |
| | help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " |
| | "Ignored if optimizer is adamW", |
| | ) |
| | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--allow_tf32", |
| | action="store_true", |
| | help=( |
| | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--cache_latents", |
| | action="store_true", |
| | default=False, |
| | help="Cache the VAE latents", |
| | ) |
| | parser.add_argument( |
| | "--report_to", |
| | type=str, |
| | default="tensorboard", |
| | help=( |
| | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default=None, |
| | choices=["no", "fp16", "bf16"], |
| | help=( |
| | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
| | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--upcast_before_saving", |
| | action="store_true", |
| | default=False, |
| | help=( |
| | "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " |
| | "Defaults to precision dtype used for training to save memory" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--prior_generation_precision", |
| | type=str, |
| | default=None, |
| | choices=["no", "fp32", "fp16", "bf16"], |
| | help=( |
| | "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| | " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." |
| | ), |
| | ) |
| | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| | parser.add_argument( |
| | "--image_interpolation_mode", |
| | type=str, |
| | default="lanczos", |
| | choices=[ |
| | f.lower() for f in dir(transforms.InterpolationMode) if not f.startswith("__") and not f.endswith("__") |
| | ], |
| | help="The image interpolation method to use for resizing images.", |
| | ) |
| |
|
| | if input_args is not None: |
| | args = parser.parse_args(input_args) |
| | else: |
| | args = parser.parse_args() |
| |
|
| | if args.dataset_name is None and args.instance_data_dir is None: |
| | raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`") |
| |
|
| | if args.dataset_name is not None and args.instance_data_dir is not None: |
| | raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`") |
| |
|
| | if args.train_text_encoder and args.train_text_encoder_ti: |
| | raise ValueError( |
| | "Specify only one of `--train_text_encoder` or `--train_text_encoder_ti. " |
| | "For full LoRA text encoder training check --train_text_encoder, for textual " |
| | "inversion training check `--train_text_encoder_ti`" |
| | ) |
| | if args.train_transformer_frac < 1 and not args.train_text_encoder_ti: |
| | raise ValueError( |
| | "--train_transformer_frac must be == 1 if text_encoder training / textual inversion is not enabled." |
| | ) |
| | if args.train_transformer_frac < 1 and args.train_text_encoder_ti_frac < 1: |
| | raise ValueError( |
| | "--train_transformer_frac and --train_text_encoder_ti_frac are identical and smaller than 1. " |
| | "This contradicts with --max_train_steps, please specify different values or set both to 1." |
| | ) |
| | if args.enable_t5_ti and not args.train_text_encoder_ti: |
| | logger.warning("You need not use --enable_t5_ti without --train_text_encoder_ti.") |
| |
|
| | if args.train_text_encoder_ti and args.initializer_concept and args.num_new_tokens_per_abstraction: |
| | logger.warning( |
| | "When specifying --initializer_concept, the number of tokens per abstraction is detrimned " |
| | "by the initializer token. --num_new_tokens_per_abstraction will be ignored" |
| | ) |
| |
|
| | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| | if env_local_rank != -1 and env_local_rank != args.local_rank: |
| | args.local_rank = env_local_rank |
| |
|
| | if args.with_prior_preservation: |
| | if args.class_data_dir is None: |
| | raise ValueError("You must specify a data directory for class images.") |
| | if args.class_prompt is None: |
| | raise ValueError("You must specify prompt for class images.") |
| | else: |
| | if args.class_data_dir is not None: |
| | logger.warning("You need not use --class_data_dir without --with_prior_preservation.") |
| | if args.class_prompt is not None: |
| | logger.warning("You need not use --class_prompt without --with_prior_preservation.") |
| |
|
| | return args |
| |
|
| |
|
| | |
| | class TokenEmbeddingsHandler: |
| | def __init__(self, text_encoders, tokenizers): |
| | self.text_encoders = text_encoders |
| | self.tokenizers = tokenizers |
| |
|
| | self.train_ids: Optional[torch.Tensor] = None |
| | self.train_ids_t5: Optional[torch.Tensor] = None |
| | self.inserting_toks: Optional[List[str]] = None |
| | self.embeddings_settings = {} |
| |
|
| | def initialize_new_tokens(self, inserting_toks: List[str]): |
| | idx = 0 |
| | for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): |
| | assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." |
| | assert all(isinstance(tok, str) for tok in inserting_toks), ( |
| | "All elements in inserting_toks should be strings." |
| | ) |
| |
|
| | self.inserting_toks = inserting_toks |
| | special_tokens_dict = {"additional_special_tokens": self.inserting_toks} |
| | tokenizer.add_special_tokens(special_tokens_dict) |
| | |
| | text_encoder.resize_token_embeddings(len(tokenizer)) |
| |
|
| | |
| | if idx == 0: |
| | self.train_ids = tokenizer.convert_tokens_to_ids(self.inserting_toks) |
| | else: |
| | self.train_ids_t5 = tokenizer.convert_tokens_to_ids(self.inserting_toks) |
| |
|
| | |
| | embeds = ( |
| | text_encoder.text_model.embeddings.token_embedding if idx == 0 else text_encoder.encoder.embed_tokens |
| | ) |
| | std_token_embedding = embeds.weight.data.std() |
| |
|
| | logger.info(f"{idx} text encoder's std_token_embedding: {std_token_embedding}") |
| |
|
| | train_ids = self.train_ids if idx == 0 else self.train_ids_t5 |
| | |
| | if args.initializer_concept is None: |
| | hidden_size = ( |
| | text_encoder.text_model.config.hidden_size if idx == 0 else text_encoder.encoder.config.hidden_size |
| | ) |
| | embeds.weight.data[train_ids] = ( |
| | torch.randn(len(train_ids), hidden_size).to(device=self.device).to(dtype=self.dtype) |
| | * std_token_embedding |
| | ) |
| | else: |
| | |
| | initializer_token_ids = tokenizer.encode(args.initializer_concept, add_special_tokens=False) |
| | for token_idx, token_id in enumerate(train_ids): |
| | embeds.weight.data[token_id] = (embeds.weight.data)[ |
| | initializer_token_ids[token_idx % len(initializer_token_ids)] |
| | ].clone() |
| |
|
| | self.embeddings_settings[f"original_embeddings_{idx}"] = embeds.weight.data.clone() |
| | self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding |
| |
|
| | |
| | index_no_updates = torch.ones((len(tokenizer),), dtype=torch.bool) |
| | index_no_updates[train_ids] = False |
| |
|
| | self.embeddings_settings[f"index_no_updates_{idx}"] = index_no_updates |
| |
|
| | logger.info(self.embeddings_settings[f"index_no_updates_{idx}"].shape) |
| |
|
| | idx += 1 |
| |
|
| | def save_embeddings(self, file_path: str): |
| | assert self.train_ids is not None, "Initialize new tokens before saving embeddings." |
| | tensors = {} |
| | |
| | idx_to_text_encoder_name = {0: "clip_l", 1: "t5"} |
| | for idx, text_encoder in enumerate(self.text_encoders): |
| | train_ids = self.train_ids if idx == 0 else self.train_ids_t5 |
| | embeds = text_encoder.text_model.embeddings.token_embedding if idx == 0 else text_encoder.shared |
| | assert embeds.weight.data.shape[0] == len(self.tokenizers[idx]), "Tokenizers should be the same." |
| | new_token_embeddings = embeds.weight.data[train_ids] |
| |
|
| | |
| | |
| | tensors[idx_to_text_encoder_name[idx]] = new_token_embeddings |
| | |
| |
|
| | save_file(tensors, file_path) |
| |
|
| | @property |
| | def dtype(self): |
| | return self.text_encoders[0].dtype |
| |
|
| | @property |
| | def device(self): |
| | return self.text_encoders[0].device |
| |
|
| | @torch.no_grad() |
| | def retract_embeddings(self): |
| | for idx, text_encoder in enumerate(self.text_encoders): |
| | embeds = text_encoder.text_model.embeddings.token_embedding if idx == 0 else text_encoder.shared |
| | index_no_updates = self.embeddings_settings[f"index_no_updates_{idx}"] |
| | embeds.weight.data[index_no_updates] = ( |
| | self.embeddings_settings[f"original_embeddings_{idx}"][index_no_updates] |
| | .to(device=text_encoder.device) |
| | .to(dtype=text_encoder.dtype) |
| | ) |
| |
|
| | |
| | |
| | std_token_embedding = self.embeddings_settings[f"std_token_embedding_{idx}"] |
| |
|
| | index_updates = ~index_no_updates |
| | new_embeddings = embeds.weight.data[index_updates] |
| | off_ratio = std_token_embedding / new_embeddings.std() |
| |
|
| | new_embeddings = new_embeddings * (off_ratio**0.1) |
| | embeds.weight.data[index_updates] = new_embeddings |
| |
|
| |
|
| | class DreamBoothDataset(Dataset): |
| | """ |
| | A dataset to prepare the instance and class images with the prompts for fine-tuning the model. |
| | It pre-processes the images. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | args, |
| | instance_data_root, |
| | instance_prompt, |
| | class_prompt, |
| | train_text_encoder_ti, |
| | token_abstraction_dict=None, |
| | class_data_root=None, |
| | class_num=None, |
| | size=1024, |
| | repeats=1, |
| | ): |
| | self.size = size |
| |
|
| | self.instance_prompt = instance_prompt |
| | self.custom_instance_prompts = None |
| | self.class_prompt = class_prompt |
| | self.token_abstraction_dict = token_abstraction_dict |
| | self.train_text_encoder_ti = train_text_encoder_ti |
| | |
| | |
| | if args.dataset_name is not None: |
| | try: |
| | from datasets import load_dataset |
| | except ImportError: |
| | raise ImportError( |
| | "You are trying to load your data using the datasets library. If you wish to train using custom " |
| | "captions please install the datasets library: `pip install datasets`. If you wish to load a " |
| | "local folder containing images only, specify --instance_data_dir instead." |
| | ) |
| | |
| | |
| | |
| | dataset = load_dataset( |
| | args.dataset_name, |
| | args.dataset_config_name, |
| | cache_dir=args.cache_dir, |
| | ) |
| | |
| | column_names = dataset["train"].column_names |
| |
|
| | |
| | if args.image_column is None: |
| | image_column = column_names[0] |
| | logger.info(f"image column defaulting to {image_column}") |
| | else: |
| | image_column = args.image_column |
| | if image_column not in column_names: |
| | raise ValueError( |
| | f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| | instance_images = dataset["train"][image_column] |
| |
|
| | if args.caption_column is None: |
| | logger.info( |
| | "No caption column provided, defaulting to instance_prompt for all images. If your dataset " |
| | "contains captions/prompts for the images, make sure to specify the " |
| | "column as --caption_column" |
| | ) |
| | self.custom_instance_prompts = None |
| | else: |
| | if args.caption_column not in column_names: |
| | raise ValueError( |
| | f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| | custom_instance_prompts = dataset["train"][args.caption_column] |
| | |
| | self.custom_instance_prompts = [] |
| | for caption in custom_instance_prompts: |
| | self.custom_instance_prompts.extend(itertools.repeat(caption, repeats)) |
| | else: |
| | self.instance_data_root = Path(instance_data_root) |
| | if not self.instance_data_root.exists(): |
| | raise ValueError("Instance images root doesn't exists.") |
| |
|
| | instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())] |
| | self.custom_instance_prompts = None |
| |
|
| | self.instance_images = [] |
| | for img in instance_images: |
| | self.instance_images.extend(itertools.repeat(img, repeats)) |
| |
|
| | self.pixel_values = [] |
| | interpolation = getattr(transforms.InterpolationMode, args.image_interpolation_mode.upper(), None) |
| | if interpolation is None: |
| | raise ValueError(f"Unsupported interpolation mode {interpolation=}.") |
| | train_resize = transforms.Resize(size, interpolation=interpolation) |
| | train_crop = transforms.CenterCrop(size) if args.center_crop else transforms.RandomCrop(size) |
| | train_flip = transforms.RandomHorizontalFlip(p=1.0) |
| | train_transforms = transforms.Compose( |
| | [ |
| | transforms.ToTensor(), |
| | transforms.Normalize([0.5], [0.5]), |
| | ] |
| | ) |
| | for image in self.instance_images: |
| | image = exif_transpose(image) |
| | if not image.mode == "RGB": |
| | image = image.convert("RGB") |
| | image = train_resize(image) |
| | if args.random_flip and random.random() < 0.5: |
| | |
| | image = train_flip(image) |
| | if args.center_crop: |
| | y1 = max(0, int(round((image.height - self.size) / 2.0))) |
| | x1 = max(0, int(round((image.width - self.size) / 2.0))) |
| | image = train_crop(image) |
| | else: |
| | y1, x1, h, w = train_crop.get_params(image, (self.size, self.size)) |
| | image = crop(image, y1, x1, h, w) |
| | image = train_transforms(image) |
| | self.pixel_values.append(image) |
| |
|
| | self.num_instance_images = len(self.instance_images) |
| | self._length = self.num_instance_images |
| |
|
| | if class_data_root is not None: |
| | self.class_data_root = Path(class_data_root) |
| | self.class_data_root.mkdir(parents=True, exist_ok=True) |
| | self.class_images_path = list(self.class_data_root.iterdir()) |
| | if class_num is not None: |
| | self.num_class_images = min(len(self.class_images_path), class_num) |
| | else: |
| | self.num_class_images = len(self.class_images_path) |
| | self._length = max(self.num_class_images, self.num_instance_images) |
| | else: |
| | self.class_data_root = None |
| |
|
| | self.image_transforms = transforms.Compose( |
| | [ |
| | transforms.Resize(size, interpolation=interpolation), |
| | transforms.CenterCrop(size) if args.center_crop else transforms.RandomCrop(size), |
| | transforms.ToTensor(), |
| | transforms.Normalize([0.5], [0.5]), |
| | ] |
| | ) |
| |
|
| | def __len__(self): |
| | return self._length |
| |
|
| | def __getitem__(self, index): |
| | example = {} |
| | instance_image = self.pixel_values[index % self.num_instance_images] |
| | example["instance_images"] = instance_image |
| |
|
| | if self.custom_instance_prompts: |
| | caption = self.custom_instance_prompts[index % self.num_instance_images] |
| | if caption: |
| | if self.train_text_encoder_ti: |
| | |
| | for token_abs, token_replacement in self.token_abstraction_dict.items(): |
| | caption = caption.replace(token_abs, "".join(token_replacement)) |
| | example["instance_prompt"] = caption |
| | else: |
| | example["instance_prompt"] = self.instance_prompt |
| |
|
| | else: |
| | example["instance_prompt"] = self.instance_prompt |
| |
|
| | if self.class_data_root: |
| | class_image = Image.open(self.class_images_path[index % self.num_class_images]) |
| | class_image = exif_transpose(class_image) |
| |
|
| | if not class_image.mode == "RGB": |
| | class_image = class_image.convert("RGB") |
| | example["class_images"] = self.image_transforms(class_image) |
| | example["class_prompt"] = self.class_prompt |
| |
|
| | return example |
| |
|
| |
|
| | def collate_fn(examples, with_prior_preservation=False): |
| | pixel_values = [example["instance_images"] for example in examples] |
| | prompts = [example["instance_prompt"] for example in examples] |
| |
|
| | |
| | |
| | if with_prior_preservation: |
| | pixel_values += [example["class_images"] for example in examples] |
| | prompts += [example["class_prompt"] for example in examples] |
| |
|
| | pixel_values = torch.stack(pixel_values) |
| | pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() |
| |
|
| | batch = {"pixel_values": pixel_values, "prompts": prompts} |
| | return batch |
| |
|
| |
|
| | class PromptDataset(Dataset): |
| | "A simple dataset to prepare the prompts to generate class images on multiple GPUs." |
| |
|
| | def __init__(self, prompt, num_samples): |
| | self.prompt = prompt |
| | self.num_samples = num_samples |
| |
|
| | def __len__(self): |
| | return self.num_samples |
| |
|
| | def __getitem__(self, index): |
| | example = {} |
| | example["prompt"] = self.prompt |
| | example["index"] = index |
| | return example |
| |
|
| |
|
| | def tokenize_prompt(tokenizer, prompt, max_sequence_length, add_special_tokens=False): |
| | text_inputs = tokenizer( |
| | prompt, |
| | padding="max_length", |
| | max_length=max_sequence_length, |
| | truncation=True, |
| | return_length=False, |
| | return_overflowing_tokens=False, |
| | add_special_tokens=add_special_tokens, |
| | return_tensors="pt", |
| | ) |
| | text_input_ids = text_inputs.input_ids |
| | return text_input_ids |
| |
|
| |
|
| | def _encode_prompt_with_t5( |
| | text_encoder, |
| | tokenizer, |
| | max_sequence_length=512, |
| | prompt=None, |
| | num_images_per_prompt=1, |
| | device=None, |
| | text_input_ids=None, |
| | ): |
| | prompt = [prompt] if isinstance(prompt, str) else prompt |
| | batch_size = len(prompt) |
| |
|
| | if tokenizer is not None: |
| | text_inputs = tokenizer( |
| | prompt, |
| | padding="max_length", |
| | max_length=max_sequence_length, |
| | truncation=True, |
| | return_length=False, |
| | return_overflowing_tokens=False, |
| | return_tensors="pt", |
| | ) |
| | text_input_ids = text_inputs.input_ids |
| | else: |
| | if text_input_ids is None: |
| | raise ValueError("text_input_ids must be provided when the tokenizer is not specified") |
| |
|
| | prompt_embeds = text_encoder(text_input_ids.to(device))[0] |
| |
|
| | if hasattr(text_encoder, "module"): |
| | dtype = text_encoder.module.dtype |
| | else: |
| | dtype = text_encoder.dtype |
| | prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
| |
|
| | _, seq_len, _ = prompt_embeds.shape |
| |
|
| | |
| | prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
| | prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
| |
|
| | return prompt_embeds |
| |
|
| |
|
| | def _encode_prompt_with_clip( |
| | text_encoder, |
| | tokenizer, |
| | prompt: str, |
| | device=None, |
| | text_input_ids=None, |
| | num_images_per_prompt: int = 1, |
| | ): |
| | prompt = [prompt] if isinstance(prompt, str) else prompt |
| | batch_size = len(prompt) |
| |
|
| | if tokenizer is not None: |
| | text_inputs = tokenizer( |
| | prompt, |
| | padding="max_length", |
| | max_length=77, |
| | truncation=True, |
| | return_overflowing_tokens=False, |
| | return_length=False, |
| | return_tensors="pt", |
| | ) |
| |
|
| | text_input_ids = text_inputs.input_ids |
| | else: |
| | if text_input_ids is None: |
| | raise ValueError("text_input_ids must be provided when the tokenizer is not specified") |
| |
|
| | prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=False) |
| |
|
| | if hasattr(text_encoder, "module"): |
| | dtype = text_encoder.module.dtype |
| | else: |
| | dtype = text_encoder.dtype |
| | |
| | prompt_embeds = prompt_embeds.pooler_output |
| | prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
| |
|
| | |
| | prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
| | prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) |
| |
|
| | return prompt_embeds |
| |
|
| |
|
| | def encode_prompt( |
| | text_encoders, |
| | tokenizers, |
| | prompt: str, |
| | max_sequence_length, |
| | device=None, |
| | num_images_per_prompt: int = 1, |
| | text_input_ids_list=None, |
| | ): |
| | prompt = [prompt] if isinstance(prompt, str) else prompt |
| | if hasattr(text_encoders[0], "module"): |
| | dtype = text_encoders[0].module.dtype |
| | else: |
| | dtype = text_encoders[0].dtype |
| |
|
| | pooled_prompt_embeds = _encode_prompt_with_clip( |
| | text_encoder=text_encoders[0], |
| | tokenizer=tokenizers[0], |
| | prompt=prompt, |
| | device=device if device is not None else text_encoders[0].device, |
| | num_images_per_prompt=num_images_per_prompt, |
| | text_input_ids=text_input_ids_list[0] if text_input_ids_list else None, |
| | ) |
| |
|
| | prompt_embeds = _encode_prompt_with_t5( |
| | text_encoder=text_encoders[1], |
| | tokenizer=tokenizers[1], |
| | max_sequence_length=max_sequence_length, |
| | prompt=prompt, |
| | num_images_per_prompt=num_images_per_prompt, |
| | device=device if device is not None else text_encoders[1].device, |
| | text_input_ids=text_input_ids_list[1] if text_input_ids_list else None, |
| | ) |
| |
|
| | text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) |
| |
|
| | return prompt_embeds, pooled_prompt_embeds, text_ids |
| |
|
| |
|
| | def main(args): |
| | if args.report_to == "wandb" and args.hub_token is not None: |
| | raise ValueError( |
| | "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| | " Please use `hf auth login` to authenticate with the Hub." |
| | ) |
| |
|
| | if torch.backends.mps.is_available() and args.mixed_precision == "bf16": |
| | |
| | raise ValueError( |
| | "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." |
| | ) |
| |
|
| | logging_dir = Path(args.output_dir, args.logging_dir) |
| |
|
| | accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
| | kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) |
| | accelerator = Accelerator( |
| | gradient_accumulation_steps=args.gradient_accumulation_steps, |
| | mixed_precision=args.mixed_precision, |
| | log_with=args.report_to, |
| | project_config=accelerator_project_config, |
| | kwargs_handlers=[kwargs], |
| | ) |
| |
|
| | |
| | if torch.backends.mps.is_available(): |
| | accelerator.native_amp = False |
| |
|
| | if args.report_to == "wandb": |
| | if not is_wandb_available(): |
| | raise ImportError("Make sure to install wandb if you want to use it for logging during training.") |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | level=logging.INFO, |
| | ) |
| | logger.info(accelerator.state, main_process_only=False) |
| | if accelerator.is_local_main_process: |
| | transformers.utils.logging.set_verbosity_warning() |
| | diffusers.utils.logging.set_verbosity_info() |
| | else: |
| | transformers.utils.logging.set_verbosity_error() |
| | diffusers.utils.logging.set_verbosity_error() |
| |
|
| | |
| | if args.seed is not None: |
| | set_seed(args.seed) |
| |
|
| | |
| | if args.with_prior_preservation: |
| | class_images_dir = Path(args.class_data_dir) |
| | if not class_images_dir.exists(): |
| | class_images_dir.mkdir(parents=True) |
| | cur_class_images = len(list(class_images_dir.iterdir())) |
| |
|
| | if cur_class_images < args.num_class_images: |
| | has_supported_fp16_accelerator = torch.cuda.is_available() or torch.backends.mps.is_available() |
| | torch_dtype = torch.float16 if has_supported_fp16_accelerator else torch.float32 |
| | if args.prior_generation_precision == "fp32": |
| | torch_dtype = torch.float32 |
| | elif args.prior_generation_precision == "fp16": |
| | torch_dtype = torch.float16 |
| | elif args.prior_generation_precision == "bf16": |
| | torch_dtype = torch.bfloat16 |
| |
|
| | pipeline = FluxPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | torch_dtype=torch_dtype, |
| | revision=args.revision, |
| | variant=args.variant, |
| | ) |
| | pipeline.set_progress_bar_config(disable=True) |
| |
|
| | num_new_images = args.num_class_images - cur_class_images |
| | logger.info(f"Number of class images to sample: {num_new_images}.") |
| |
|
| | sample_dataset = PromptDataset(args.class_prompt, num_new_images) |
| | sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) |
| |
|
| | sample_dataloader = accelerator.prepare(sample_dataloader) |
| | pipeline.to(accelerator.device) |
| |
|
| | for example in tqdm( |
| | sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process |
| | ): |
| | with torch.autocast(device_type=accelerator.device.type, dtype=torch_dtype): |
| | images = pipeline(prompt=example["prompt"]).images |
| |
|
| | for i, image in enumerate(images): |
| | hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() |
| | image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" |
| | image.save(image_filename) |
| |
|
| | del pipeline |
| | free_memory() |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | model_id = args.hub_model_id or Path(args.output_dir).name |
| | repo_id = None |
| | if args.push_to_hub: |
| | repo_id = create_repo( |
| | repo_id=model_id, |
| | exist_ok=True, |
| | ).repo_id |
| |
|
| | |
| | tokenizer_one = CLIPTokenizer.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="tokenizer", |
| | revision=args.revision, |
| | ) |
| | tokenizer_two = T5TokenizerFast.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="tokenizer_2", |
| | revision=args.revision, |
| | ) |
| |
|
| | |
| | text_encoder_cls_one = import_model_class_from_model_name_or_path( |
| | args.pretrained_model_name_or_path, args.revision |
| | ) |
| | text_encoder_cls_two = import_model_class_from_model_name_or_path( |
| | args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" |
| | ) |
| |
|
| | |
| | noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="scheduler" |
| | ) |
| | noise_scheduler_copy = copy.deepcopy(noise_scheduler) |
| | text_encoder_one, text_encoder_two = load_text_encoders(text_encoder_cls_one, text_encoder_cls_two) |
| | vae = AutoencoderKL.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="vae", |
| | revision=args.revision, |
| | variant=args.variant, |
| | ) |
| | transformer = FluxTransformer2DModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant |
| | ) |
| |
|
| | if args.train_text_encoder_ti: |
| | |
| | |
| | token_abstraction_list = [place_holder.strip() for place_holder in re.split(r",\s*", args.token_abstraction)] |
| | logger.info(f"list of token identifiers: {token_abstraction_list}") |
| |
|
| | if args.initializer_concept is None: |
| | num_new_tokens_per_abstraction = ( |
| | 2 if args.num_new_tokens_per_abstraction is None else args.num_new_tokens_per_abstraction |
| | ) |
| | |
| | else: |
| | token_ids = tokenizer_one.encode(args.initializer_concept, add_special_tokens=False) |
| | num_new_tokens_per_abstraction = len(token_ids) |
| | if args.enable_t5_ti: |
| | token_ids_t5 = tokenizer_two.encode(args.initializer_concept, add_special_tokens=False) |
| | num_new_tokens_per_abstraction = max(len(token_ids), len(token_ids_t5)) |
| | logger.info( |
| | f"initializer_concept: {args.initializer_concept}, num_new_tokens_per_abstraction: {num_new_tokens_per_abstraction}" |
| | ) |
| |
|
| | token_abstraction_dict = {} |
| | token_idx = 0 |
| | for i, token in enumerate(token_abstraction_list): |
| | token_abstraction_dict[token] = [f"<s{token_idx + i + j}>" for j in range(num_new_tokens_per_abstraction)] |
| | token_idx += num_new_tokens_per_abstraction - 1 |
| |
|
| | |
| | for token_abs, token_replacement in token_abstraction_dict.items(): |
| | new_instance_prompt = args.instance_prompt.replace(token_abs, "".join(token_replacement)) |
| | if args.instance_prompt == new_instance_prompt: |
| | logger.warning( |
| | "Note! the instance prompt provided in --instance_prompt does not include the token abstraction specified " |
| | "--token_abstraction. This may lead to incorrect optimization of text embeddings during pivotal tuning" |
| | ) |
| | args.instance_prompt = new_instance_prompt |
| | if args.with_prior_preservation: |
| | args.class_prompt = args.class_prompt.replace(token_abs, "".join(token_replacement)) |
| | if args.validation_prompt: |
| | args.validation_prompt = args.validation_prompt.replace(token_abs, "".join(token_replacement)) |
| |
|
| | |
| | text_encoders = [text_encoder_one, text_encoder_two] if args.enable_t5_ti else [text_encoder_one] |
| | tokenizers = [tokenizer_one, tokenizer_two] if args.enable_t5_ti else [tokenizer_one] |
| | embedding_handler = TokenEmbeddingsHandler(text_encoders, tokenizers) |
| | inserting_toks = [] |
| | for new_tok in token_abstraction_dict.values(): |
| | inserting_toks.extend(new_tok) |
| | embedding_handler.initialize_new_tokens(inserting_toks=inserting_toks) |
| |
|
| | |
| | transformer.requires_grad_(False) |
| | vae.requires_grad_(False) |
| | text_encoder_one.requires_grad_(False) |
| | text_encoder_two.requires_grad_(False) |
| |
|
| | |
| | |
| | weight_dtype = torch.float32 |
| | if accelerator.mixed_precision == "fp16": |
| | weight_dtype = torch.float16 |
| | elif accelerator.mixed_precision == "bf16": |
| | weight_dtype = torch.bfloat16 |
| |
|
| | if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: |
| | |
| | raise ValueError( |
| | "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." |
| | ) |
| |
|
| | vae.to(accelerator.device, dtype=weight_dtype) |
| | transformer.to(accelerator.device, dtype=weight_dtype) |
| | text_encoder_one.to(accelerator.device, dtype=weight_dtype) |
| | text_encoder_two.to(accelerator.device, dtype=weight_dtype) |
| |
|
| | if args.gradient_checkpointing: |
| | transformer.enable_gradient_checkpointing() |
| | if args.train_text_encoder: |
| | text_encoder_one.gradient_checkpointing_enable() |
| |
|
| | if args.lora_layers is not None: |
| | target_modules = [layer.strip() for layer in args.lora_layers.split(",")] |
| | else: |
| | target_modules = [ |
| | "attn.to_k", |
| | "attn.to_q", |
| | "attn.to_v", |
| | "attn.to_out.0", |
| | "attn.add_k_proj", |
| | "attn.add_q_proj", |
| | "attn.add_v_proj", |
| | "attn.to_add_out", |
| | "ff.net.0.proj", |
| | "ff.net.2", |
| | "ff_context.net.0.proj", |
| | "ff_context.net.2", |
| | ] |
| | |
| | transformer_lora_config = LoraConfig( |
| | r=args.rank, |
| | lora_alpha=args.lora_alpha, |
| | lora_dropout=args.lora_dropout, |
| | init_lora_weights="gaussian", |
| | target_modules=target_modules, |
| | ) |
| | transformer.add_adapter(transformer_lora_config) |
| | if args.train_text_encoder: |
| | text_lora_config = LoraConfig( |
| | r=args.rank, |
| | lora_alpha=args.lora_alpha, |
| | lora_dropout=args.lora_dropout, |
| | init_lora_weights="gaussian", |
| | target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], |
| | ) |
| | text_encoder_one.add_adapter(text_lora_config) |
| |
|
| | def unwrap_model(model): |
| | model = accelerator.unwrap_model(model) |
| | model = model._orig_mod if is_compiled_module(model) else model |
| | return model |
| |
|
| | |
| | def save_model_hook(models, weights, output_dir): |
| | if accelerator.is_main_process: |
| | transformer_lora_layers_to_save = None |
| | text_encoder_one_lora_layers_to_save = None |
| | modules_to_save = {} |
| | for model in models: |
| | if isinstance(model, type(unwrap_model(transformer))): |
| | transformer_lora_layers_to_save = get_peft_model_state_dict(model) |
| | modules_to_save["transformer"] = model |
| | elif isinstance(model, type(unwrap_model(text_encoder_one))): |
| | if args.train_text_encoder: |
| | text_encoder_one_lora_layers_to_save = get_peft_model_state_dict(model) |
| | modules_to_save["text_encoder"] = model |
| | elif isinstance(model, type(unwrap_model(text_encoder_two))): |
| | pass |
| | else: |
| | raise ValueError(f"unexpected save model: {model.__class__}") |
| |
|
| | |
| | weights.pop() |
| |
|
| | FluxPipeline.save_lora_weights( |
| | output_dir, |
| | transformer_lora_layers=transformer_lora_layers_to_save, |
| | text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, |
| | **_collate_lora_metadata(modules_to_save), |
| | ) |
| | if args.train_text_encoder_ti: |
| | embedding_handler.save_embeddings(f"{args.output_dir}/{Path(args.output_dir).name}_emb.safetensors") |
| |
|
| | def load_model_hook(models, input_dir): |
| | transformer_ = None |
| | text_encoder_one_ = None |
| |
|
| | while len(models) > 0: |
| | model = models.pop() |
| |
|
| | if isinstance(model, type(unwrap_model(transformer))): |
| | transformer_ = model |
| | elif isinstance(model, type(unwrap_model(text_encoder_one))): |
| | text_encoder_one_ = model |
| | else: |
| | raise ValueError(f"unexpected save model: {model.__class__}") |
| |
|
| | lora_state_dict = FluxPipeline.lora_state_dict(input_dir) |
| |
|
| | transformer_state_dict = { |
| | f"{k.replace('transformer.', '')}": v for k, v in lora_state_dict.items() if k.startswith("transformer.") |
| | } |
| | transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict) |
| | incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default") |
| | if incompatible_keys is not None: |
| | |
| | unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) |
| | if unexpected_keys: |
| | logger.warning( |
| | f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " |
| | f" {unexpected_keys}. " |
| | ) |
| | if args.train_text_encoder: |
| | |
| | _set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_) |
| |
|
| | |
| | |
| | |
| | if args.mixed_precision == "fp16": |
| | models = [transformer_] |
| | if args.train_text_encoder: |
| | models.extend([text_encoder_one_]) |
| | |
| | cast_training_params(models) |
| |
|
| | accelerator.register_save_state_pre_hook(save_model_hook) |
| | accelerator.register_load_state_pre_hook(load_model_hook) |
| |
|
| | |
| | |
| | if args.allow_tf32 and torch.cuda.is_available(): |
| | torch.backends.cuda.matmul.allow_tf32 = True |
| |
|
| | if args.scale_lr: |
| | args.learning_rate = ( |
| | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| | ) |
| |
|
| | |
| | if args.mixed_precision == "fp16": |
| | models = [transformer] |
| | if args.train_text_encoder: |
| | models.extend([text_encoder_one]) |
| | |
| | cast_training_params(models, dtype=torch.float32) |
| |
|
| | transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters())) |
| | if args.train_text_encoder: |
| | text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) |
| | |
| | |
| | elif args.train_text_encoder_ti: |
| | text_lora_parameters_one = [] |
| | for name, param in text_encoder_one.named_parameters(): |
| | if "token_embedding" in name: |
| | |
| | if args.mixed_precision == "fp16": |
| | param.data = param.to(dtype=torch.float32) |
| | param.requires_grad = True |
| | text_lora_parameters_one.append(param) |
| | else: |
| | param.requires_grad = False |
| | if args.enable_t5_ti: |
| | text_lora_parameters_two = [] |
| | for name, param in text_encoder_two.named_parameters(): |
| | if "shared" in name: |
| | |
| | if args.mixed_precision == "fp16": |
| | param.data = param.to(dtype=torch.float32) |
| | param.requires_grad = True |
| | text_lora_parameters_two.append(param) |
| | else: |
| | param.requires_grad = False |
| |
|
| | |
| | freeze_text_encoder = not (args.train_text_encoder or args.train_text_encoder_ti) |
| |
|
| | |
| | |
| | pure_textual_inversion = args.train_text_encoder_ti and args.train_transformer_frac == 0 |
| |
|
| | |
| | transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate} |
| | if not freeze_text_encoder: |
| | |
| | text_parameters_one_with_lr = { |
| | "params": text_lora_parameters_one, |
| | "weight_decay": args.adam_weight_decay_text_encoder |
| | if args.adam_weight_decay_text_encoder |
| | else args.adam_weight_decay, |
| | "lr": args.text_encoder_lr, |
| | } |
| | if not args.enable_t5_ti: |
| | |
| | if pure_textual_inversion: |
| | params_to_optimize = [text_parameters_one_with_lr] |
| | te_idx = 0 |
| | else: |
| | params_to_optimize = [transformer_parameters_with_lr, text_parameters_one_with_lr] |
| | te_idx = 1 |
| | elif args.enable_t5_ti: |
| | |
| | text_parameters_two_with_lr = { |
| | "params": text_lora_parameters_two, |
| | "weight_decay": args.adam_weight_decay_text_encoder |
| | if args.adam_weight_decay_text_encoder |
| | else args.adam_weight_decay, |
| | "lr": args.text_encoder_lr, |
| | } |
| | |
| | if pure_textual_inversion: |
| | params_to_optimize = [text_parameters_one_with_lr, text_parameters_two_with_lr] |
| | te_idx = 0 |
| | else: |
| | params_to_optimize = [ |
| | transformer_parameters_with_lr, |
| | text_parameters_one_with_lr, |
| | text_parameters_two_with_lr, |
| | ] |
| | te_idx = 1 |
| | else: |
| | params_to_optimize = [transformer_parameters_with_lr] |
| |
|
| | |
| | if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): |
| | logger.warning( |
| | f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." |
| | "Defaulting to adamW" |
| | ) |
| | args.optimizer = "adamw" |
| |
|
| | if args.use_8bit_adam and not args.optimizer.lower() == "adamw": |
| | logger.warning( |
| | f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " |
| | f"set to {args.optimizer.lower()}" |
| | ) |
| |
|
| | if args.optimizer.lower() == "adamw": |
| | if args.use_8bit_adam: |
| | try: |
| | import bitsandbytes as bnb |
| | except ImportError: |
| | raise ImportError( |
| | "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." |
| | ) |
| |
|
| | optimizer_class = bnb.optim.AdamW8bit |
| | else: |
| | optimizer_class = torch.optim.AdamW |
| |
|
| | optimizer = optimizer_class( |
| | params_to_optimize, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | ) |
| |
|
| | if args.optimizer.lower() == "prodigy": |
| | try: |
| | import prodigyopt |
| | except ImportError: |
| | raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`") |
| |
|
| | optimizer_class = prodigyopt.Prodigy |
| |
|
| | if args.learning_rate <= 0.1: |
| | logger.warning( |
| | "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" |
| | ) |
| | if not freeze_text_encoder and args.text_encoder_lr: |
| | logger.warning( |
| | f"Learning rates were provided both for the transformer and the text encoder- e.g. text_encoder_lr:" |
| | f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " |
| | f"When using prodigy only learning_rate is used as the initial learning rate." |
| | ) |
| | |
| | |
| |
|
| | params_to_optimize[te_idx]["lr"] = args.learning_rate |
| | params_to_optimize[-1]["lr"] = args.learning_rate |
| | optimizer = optimizer_class( |
| | params_to_optimize, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | beta3=args.prodigy_beta3, |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | decouple=args.prodigy_decouple, |
| | use_bias_correction=args.prodigy_use_bias_correction, |
| | safeguard_warmup=args.prodigy_safeguard_warmup, |
| | ) |
| |
|
| | |
| | train_dataset = DreamBoothDataset( |
| | args=args, |
| | instance_data_root=args.instance_data_dir, |
| | instance_prompt=args.instance_prompt, |
| | train_text_encoder_ti=args.train_text_encoder_ti, |
| | token_abstraction_dict=token_abstraction_dict if args.train_text_encoder_ti else None, |
| | class_prompt=args.class_prompt, |
| | class_data_root=args.class_data_dir if args.with_prior_preservation else None, |
| | class_num=args.num_class_images, |
| | size=args.resolution, |
| | repeats=args.repeats, |
| | ) |
| |
|
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, |
| | batch_size=args.train_batch_size, |
| | shuffle=True, |
| | collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), |
| | num_workers=args.dataloader_num_workers, |
| | ) |
| |
|
| | if freeze_text_encoder: |
| | tokenizers = [tokenizer_one, tokenizer_two] |
| | text_encoders = [text_encoder_one, text_encoder_two] |
| |
|
| | def compute_text_embeddings(prompt, text_encoders, tokenizers): |
| | with torch.no_grad(): |
| | prompt_embeds, pooled_prompt_embeds, text_ids = encode_prompt( |
| | text_encoders, tokenizers, prompt, args.max_sequence_length |
| | ) |
| | prompt_embeds = prompt_embeds.to(accelerator.device) |
| | pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device) |
| | text_ids = text_ids.to(accelerator.device) |
| | return prompt_embeds, pooled_prompt_embeds, text_ids |
| |
|
| | |
| | |
| | |
| | if freeze_text_encoder and not train_dataset.custom_instance_prompts: |
| | instance_prompt_hidden_states, instance_pooled_prompt_embeds, instance_text_ids = compute_text_embeddings( |
| | args.instance_prompt, text_encoders, tokenizers |
| | ) |
| |
|
| | |
| | if args.with_prior_preservation: |
| | if freeze_text_encoder: |
| | class_prompt_hidden_states, class_pooled_prompt_embeds, class_text_ids = compute_text_embeddings( |
| | args.class_prompt, text_encoders, tokenizers |
| | ) |
| |
|
| | |
| | if freeze_text_encoder and not train_dataset.custom_instance_prompts: |
| | del tokenizers, text_encoders, text_encoder_one, text_encoder_two |
| | free_memory() |
| |
|
| | |
| | add_special_tokens_clip = True if args.train_text_encoder_ti else False |
| | add_special_tokens_t5 = True if (args.train_text_encoder_ti and args.enable_t5_ti) else False |
| |
|
| | |
| | |
| | |
| |
|
| | if not train_dataset.custom_instance_prompts: |
| | if freeze_text_encoder: |
| | prompt_embeds = instance_prompt_hidden_states |
| | pooled_prompt_embeds = instance_pooled_prompt_embeds |
| | text_ids = instance_text_ids |
| | if args.with_prior_preservation: |
| | prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0) |
| | pooled_prompt_embeds = torch.cat([pooled_prompt_embeds, class_pooled_prompt_embeds], dim=0) |
| | text_ids = torch.cat([text_ids, class_text_ids], dim=0) |
| | |
| | |
| | else: |
| | tokens_one = tokenize_prompt( |
| | tokenizer_one, args.instance_prompt, max_sequence_length=77, add_special_tokens=add_special_tokens_clip |
| | ) |
| | tokens_two = tokenize_prompt( |
| | tokenizer_two, |
| | args.instance_prompt, |
| | max_sequence_length=args.max_sequence_length, |
| | add_special_tokens=add_special_tokens_t5, |
| | ) |
| | if args.with_prior_preservation: |
| | class_tokens_one = tokenize_prompt( |
| | tokenizer_one, |
| | args.class_prompt, |
| | max_sequence_length=77, |
| | add_special_tokens=add_special_tokens_clip, |
| | ) |
| | class_tokens_two = tokenize_prompt( |
| | tokenizer_two, |
| | args.class_prompt, |
| | max_sequence_length=args.max_sequence_length, |
| | add_special_tokens=add_special_tokens_t5, |
| | ) |
| | tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0) |
| | tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0) |
| |
|
| | vae_config_shift_factor = vae.config.shift_factor |
| | vae_config_scaling_factor = vae.config.scaling_factor |
| | vae_config_block_out_channels = vae.config.block_out_channels |
| | if args.cache_latents: |
| | latents_cache = [] |
| | for batch in tqdm(train_dataloader, desc="Caching latents"): |
| | with torch.no_grad(): |
| | batch["pixel_values"] = batch["pixel_values"].to( |
| | accelerator.device, non_blocking=True, dtype=weight_dtype |
| | ) |
| | latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) |
| |
|
| | if args.validation_prompt is None: |
| | del vae |
| | free_memory() |
| |
|
| | |
| | |
| | num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes |
| | if args.max_train_steps is None: |
| | len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) |
| | num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) |
| | num_training_steps_for_scheduler = ( |
| | args.num_train_epochs * accelerator.num_processes * num_update_steps_per_epoch |
| | ) |
| | else: |
| | num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes |
| |
|
| | lr_scheduler = get_scheduler( |
| | args.lr_scheduler, |
| | optimizer=optimizer, |
| | num_warmup_steps=num_warmup_steps_for_scheduler, |
| | num_training_steps=num_training_steps_for_scheduler, |
| | num_cycles=args.lr_num_cycles, |
| | power=args.lr_power, |
| | ) |
| |
|
| | |
| | if not freeze_text_encoder: |
| | if args.enable_t5_ti: |
| | ( |
| | transformer, |
| | text_encoder_one, |
| | text_encoder_two, |
| | optimizer, |
| | train_dataloader, |
| | lr_scheduler, |
| | ) = accelerator.prepare( |
| | transformer, |
| | text_encoder_one, |
| | text_encoder_two, |
| | optimizer, |
| | train_dataloader, |
| | lr_scheduler, |
| | ) |
| | else: |
| | transformer, text_encoder_one, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | transformer, text_encoder_one, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | else: |
| | transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | transformer, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | if num_training_steps_for_scheduler != args.max_train_steps: |
| | logger.warning( |
| | f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " |
| | f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " |
| | f"This inconsistency may result in the learning rate scheduler not functioning properly." |
| | ) |
| | |
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | |
| | |
| | if accelerator.is_main_process: |
| | tracker_name = "dreambooth-flux-dev-lora-advanced" |
| | accelerator.init_trackers(tracker_name, config=vars(args)) |
| |
|
| | |
| | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num batches each epoch = {len(train_dataloader)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| | global_step = 0 |
| | first_epoch = 0 |
| |
|
| | |
| | if args.resume_from_checkpoint: |
| | if args.resume_from_checkpoint != "latest": |
| | path = os.path.basename(args.resume_from_checkpoint) |
| | else: |
| | |
| | dirs = os.listdir(args.output_dir) |
| | dirs = [d for d in dirs if d.startswith("checkpoint")] |
| | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| | path = dirs[-1] if len(dirs) > 0 else None |
| |
|
| | if path is None: |
| | logger.info(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.") |
| | args.resume_from_checkpoint = None |
| | initial_global_step = 0 |
| | else: |
| | logger.info(f"Resuming from checkpoint {path}") |
| | accelerator.load_state(os.path.join(args.output_dir, path)) |
| | global_step = int(path.split("-")[1]) |
| |
|
| | initial_global_step = global_step |
| | first_epoch = global_step // num_update_steps_per_epoch |
| |
|
| | else: |
| | initial_global_step = 0 |
| |
|
| | progress_bar = tqdm( |
| | range(0, args.max_train_steps), |
| | initial=initial_global_step, |
| | desc="Steps", |
| | |
| | disable=not accelerator.is_local_main_process, |
| | ) |
| |
|
| | def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): |
| | sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) |
| | schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) |
| | timesteps = timesteps.to(accelerator.device) |
| | step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] |
| |
|
| | sigma = sigmas[step_indices].flatten() |
| | while len(sigma.shape) < n_dim: |
| | sigma = sigma.unsqueeze(-1) |
| | return sigma |
| |
|
| | if args.train_text_encoder: |
| | num_train_epochs_text_encoder = int(args.train_text_encoder_frac * args.num_train_epochs) |
| | num_train_epochs_transformer = int(args.train_transformer_frac * args.num_train_epochs) |
| | elif args.train_text_encoder_ti: |
| | num_train_epochs_text_encoder = int(args.train_text_encoder_ti_frac * args.num_train_epochs) |
| | num_train_epochs_transformer = int(args.train_transformer_frac * args.num_train_epochs) |
| |
|
| | |
| | pivoted_te = False |
| | pivoted_tr = False |
| | for epoch in range(first_epoch, args.num_train_epochs): |
| | transformer.train() |
| | |
| | if args.train_text_encoder or args.train_text_encoder_ti: |
| | if epoch == num_train_epochs_text_encoder: |
| | |
| | logger.info(f"PIVOT TE {epoch}") |
| | pivoted_te = True |
| | else: |
| | |
| | if args.train_text_encoder: |
| | text_encoder_one.train() |
| | |
| | unwrap_model(text_encoder_one).text_model.embeddings.requires_grad_(True) |
| | elif args.train_text_encoder_ti: |
| | text_encoder_one.train() |
| | if args.enable_t5_ti: |
| | text_encoder_two.train() |
| |
|
| | if epoch == num_train_epochs_transformer: |
| | |
| | logger.info(f"PIVOT TRANSFORMER {epoch}") |
| | pivoted_tr = True |
| |
|
| | for step, batch in enumerate(train_dataloader): |
| | models_to_accumulate = [transformer] |
| | if not freeze_text_encoder: |
| | models_to_accumulate.extend([text_encoder_one]) |
| | if args.enable_t5_ti: |
| | models_to_accumulate.extend([text_encoder_two]) |
| | if pivoted_te: |
| | |
| | optimizer.param_groups[te_idx]["lr"] = 0.0 |
| | optimizer.param_groups[-1]["lr"] = 0.0 |
| | elif pivoted_tr and not pure_textual_inversion: |
| | logger.info(f"PIVOT TRANSFORMER {epoch}") |
| | optimizer.param_groups[0]["lr"] = 0.0 |
| |
|
| | with accelerator.accumulate(models_to_accumulate): |
| | prompts = batch["prompts"] |
| |
|
| | |
| | if train_dataset.custom_instance_prompts: |
| | elems_to_repeat = 1 |
| | if freeze_text_encoder: |
| | prompt_embeds, pooled_prompt_embeds, text_ids = compute_text_embeddings( |
| | prompts, text_encoders, tokenizers |
| | ) |
| | else: |
| | tokens_one = tokenize_prompt( |
| | tokenizer_one, prompts, max_sequence_length=77, add_special_tokens=add_special_tokens_clip |
| | ) |
| | tokens_two = tokenize_prompt( |
| | tokenizer_two, |
| | prompts, |
| | max_sequence_length=args.max_sequence_length, |
| | add_special_tokens=add_special_tokens_t5, |
| | ) |
| | else: |
| | elems_to_repeat = len(prompts) |
| |
|
| | if not freeze_text_encoder: |
| | prompt_embeds, pooled_prompt_embeds, text_ids = encode_prompt( |
| | text_encoders=[text_encoder_one, text_encoder_two], |
| | tokenizers=[None, None], |
| | text_input_ids_list=[ |
| | tokens_one.repeat(elems_to_repeat, 1), |
| | tokens_two.repeat(elems_to_repeat, 1), |
| | ], |
| | max_sequence_length=args.max_sequence_length, |
| | device=accelerator.device, |
| | prompt=prompts, |
| | ) |
| | |
| | if args.cache_latents: |
| | model_input = latents_cache[step].sample() |
| | else: |
| | pixel_values = batch["pixel_values"].to(dtype=vae.dtype) |
| | model_input = vae.encode(pixel_values).latent_dist.sample() |
| | model_input = (model_input - vae_config_shift_factor) * vae_config_scaling_factor |
| | model_input = model_input.to(dtype=weight_dtype) |
| |
|
| | vae_scale_factor = 2 ** (len(vae_config_block_out_channels) - 1) |
| |
|
| | latent_image_ids = FluxPipeline._prepare_latent_image_ids( |
| | model_input.shape[0], |
| | model_input.shape[2] // 2, |
| | model_input.shape[3] // 2, |
| | accelerator.device, |
| | weight_dtype, |
| | ) |
| | |
| | noise = torch.randn_like(model_input) |
| | bsz = model_input.shape[0] |
| |
|
| | |
| | |
| | u = compute_density_for_timestep_sampling( |
| | weighting_scheme=args.weighting_scheme, |
| | batch_size=bsz, |
| | logit_mean=args.logit_mean, |
| | logit_std=args.logit_std, |
| | mode_scale=args.mode_scale, |
| | ) |
| | indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() |
| | timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device) |
| |
|
| | |
| | |
| | sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) |
| | noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise |
| |
|
| | packed_noisy_model_input = FluxPipeline._pack_latents( |
| | noisy_model_input, |
| | batch_size=model_input.shape[0], |
| | num_channels_latents=model_input.shape[1], |
| | height=model_input.shape[2], |
| | width=model_input.shape[3], |
| | ) |
| |
|
| | |
| | if unwrap_model(transformer).config.guidance_embeds: |
| | guidance = torch.tensor([args.guidance_scale], device=accelerator.device) |
| | guidance = guidance.expand(model_input.shape[0]) |
| | else: |
| | guidance = None |
| |
|
| | |
| | model_pred = transformer( |
| | hidden_states=packed_noisy_model_input, |
| | |
| | timestep=timesteps / 1000, |
| | guidance=guidance, |
| | pooled_projections=pooled_prompt_embeds, |
| | encoder_hidden_states=prompt_embeds, |
| | txt_ids=text_ids, |
| | img_ids=latent_image_ids, |
| | return_dict=False, |
| | )[0] |
| | model_pred = FluxPipeline._unpack_latents( |
| | model_pred, |
| | height=model_input.shape[2] * vae_scale_factor, |
| | width=model_input.shape[3] * vae_scale_factor, |
| | vae_scale_factor=vae_scale_factor, |
| | ) |
| |
|
| | |
| | |
| | weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) |
| |
|
| | |
| | target = noise - model_input |
| |
|
| | if args.with_prior_preservation: |
| | |
| | model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) |
| | target, target_prior = torch.chunk(target, 2, dim=0) |
| |
|
| | |
| | prior_loss = torch.mean( |
| | (weighting.float() * (model_pred_prior.float() - target_prior.float()) ** 2).reshape( |
| | target_prior.shape[0], -1 |
| | ), |
| | 1, |
| | ) |
| | prior_loss = prior_loss.mean() |
| |
|
| | |
| | loss = torch.mean( |
| | (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), |
| | 1, |
| | ) |
| | loss = loss.mean() |
| |
|
| | if args.with_prior_preservation: |
| | |
| | loss = loss + args.prior_loss_weight * prior_loss |
| |
|
| | accelerator.backward(loss) |
| | if accelerator.sync_gradients: |
| | if not freeze_text_encoder: |
| | if args.train_text_encoder: |
| | params_to_clip = itertools.chain(transformer.parameters(), text_encoder_one.parameters()) |
| | elif pure_textual_inversion: |
| | if args.enable_t5_ti: |
| | params_to_clip = itertools.chain( |
| | text_encoder_one.parameters(), text_encoder_two.parameters() |
| | ) |
| | else: |
| | params_to_clip = itertools.chain(text_encoder_one.parameters()) |
| | else: |
| | if args.enable_t5_ti: |
| | params_to_clip = itertools.chain( |
| | transformer.parameters(), |
| | text_encoder_one.parameters(), |
| | text_encoder_two.parameters(), |
| | ) |
| | else: |
| | params_to_clip = itertools.chain( |
| | transformer.parameters(), text_encoder_one.parameters() |
| | ) |
| | else: |
| | params_to_clip = itertools.chain(transformer.parameters()) |
| | accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) |
| |
|
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad() |
| |
|
| | |
| | if args.train_text_encoder_ti: |
| | embedding_handler.retract_embeddings() |
| |
|
| | |
| | if accelerator.sync_gradients: |
| | progress_bar.update(1) |
| | global_step += 1 |
| |
|
| | if accelerator.is_main_process: |
| | if global_step % args.checkpointing_steps == 0: |
| | |
| | if args.checkpoints_total_limit is not None: |
| | checkpoints = os.listdir(args.output_dir) |
| | checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| | checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
| |
|
| | |
| | if len(checkpoints) >= args.checkpoints_total_limit: |
| | num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| | removing_checkpoints = checkpoints[0:num_to_remove] |
| |
|
| | logger.info( |
| | f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
| | ) |
| | logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
| |
|
| | for removing_checkpoint in removing_checkpoints: |
| | removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| | shutil.rmtree(removing_checkpoint) |
| |
|
| | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| | accelerator.save_state(save_path) |
| | if args.train_text_encoder_ti: |
| | embedding_handler.save_embeddings( |
| | f"{args.output_dir}/{Path(args.output_dir).name}_emb_checkpoint_{global_step}.safetensors" |
| | ) |
| | logger.info(f"Saved state to {save_path}") |
| |
|
| | logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| | progress_bar.set_postfix(**logs) |
| | accelerator.log(logs, step=global_step) |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| |
|
| | if accelerator.is_main_process: |
| | if args.validation_prompt is not None and epoch % args.validation_epochs == 0: |
| | |
| | if freeze_text_encoder: |
| | text_encoder_one, text_encoder_two = load_text_encoders(text_encoder_cls_one, text_encoder_cls_two) |
| | text_encoder_one.to(weight_dtype) |
| | text_encoder_two.to(weight_dtype) |
| | pipeline = FluxPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | vae=vae, |
| | text_encoder=unwrap_model(text_encoder_one), |
| | text_encoder_2=unwrap_model(text_encoder_two), |
| | transformer=unwrap_model(transformer), |
| | revision=args.revision, |
| | variant=args.variant, |
| | torch_dtype=weight_dtype, |
| | ) |
| | pipeline_args = {"prompt": args.validation_prompt} |
| | images = log_validation( |
| | pipeline=pipeline, |
| | args=args, |
| | accelerator=accelerator, |
| | pipeline_args=pipeline_args, |
| | epoch=epoch, |
| | torch_dtype=weight_dtype, |
| | ) |
| | if freeze_text_encoder: |
| | del text_encoder_one, text_encoder_two |
| | free_memory() |
| |
|
| | images = None |
| | del pipeline |
| |
|
| | |
| | accelerator.wait_for_everyone() |
| | if accelerator.is_main_process: |
| | modules_to_save = {} |
| | transformer = unwrap_model(transformer) |
| | if args.upcast_before_saving: |
| | transformer.to(torch.float32) |
| | else: |
| | transformer = transformer.to(weight_dtype) |
| | transformer_lora_layers = get_peft_model_state_dict(transformer) |
| | modules_to_save["transformer"] = transformer |
| |
|
| | if args.train_text_encoder: |
| | text_encoder_one = unwrap_model(text_encoder_one) |
| | text_encoder_lora_layers = get_peft_model_state_dict(text_encoder_one.to(torch.float32)) |
| | modules_to_save["text_encoder"] = text_encoder_one |
| | else: |
| | text_encoder_lora_layers = None |
| |
|
| | if not pure_textual_inversion: |
| | FluxPipeline.save_lora_weights( |
| | save_directory=args.output_dir, |
| | transformer_lora_layers=transformer_lora_layers, |
| | text_encoder_lora_layers=text_encoder_lora_layers, |
| | **_collate_lora_metadata(modules_to_save), |
| | ) |
| |
|
| | if args.train_text_encoder_ti: |
| | embeddings_path = f"{args.output_dir}/{os.path.basename(args.output_dir)}_emb.safetensors" |
| | embedding_handler.save_embeddings(embeddings_path) |
| |
|
| | |
| | |
| | pipeline = FluxPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | revision=args.revision, |
| | variant=args.variant, |
| | torch_dtype=weight_dtype, |
| | ) |
| | if not pure_textual_inversion: |
| | |
| | pipeline.load_lora_weights(args.output_dir) |
| |
|
| | |
| | images = [] |
| | if args.validation_prompt and args.num_validation_images > 0: |
| | pipeline_args = {"prompt": args.validation_prompt} |
| | images = log_validation( |
| | pipeline=pipeline, |
| | args=args, |
| | accelerator=accelerator, |
| | pipeline_args=pipeline_args, |
| | epoch=epoch, |
| | is_final_validation=True, |
| | torch_dtype=weight_dtype, |
| | ) |
| |
|
| | save_model_card( |
| | model_id if not args.push_to_hub else repo_id, |
| | images=images, |
| | base_model=args.pretrained_model_name_or_path, |
| | train_text_encoder=args.train_text_encoder, |
| | train_text_encoder_ti=args.train_text_encoder_ti, |
| | enable_t5_ti=args.enable_t5_ti, |
| | pure_textual_inversion=pure_textual_inversion, |
| | token_abstraction_dict=train_dataset.token_abstraction_dict, |
| | instance_prompt=args.instance_prompt, |
| | validation_prompt=args.validation_prompt, |
| | repo_folder=args.output_dir, |
| | ) |
| | if args.push_to_hub: |
| | upload_folder( |
| | repo_id=repo_id, |
| | folder_path=args.output_dir, |
| | commit_message="End of training", |
| | ignore_patterns=["step_*", "epoch_*"], |
| | ) |
| |
|
| | images = None |
| | del pipeline |
| |
|
| | accelerator.end_training() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | args = parse_args() |
| | main(args) |
| |
|