| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import argparse |
| | import logging |
| | import math |
| | import os |
| | import random |
| | from pathlib import Path |
| |
|
| | import numpy as np |
| | import PIL |
| | import torch |
| | import torch.nn.functional as F |
| | import torch.utils.checkpoint |
| | import transformers |
| | from accelerate import Accelerator |
| | from accelerate.logging import get_logger |
| | from accelerate.utils import ProjectConfiguration, set_seed |
| | from huggingface_hub import create_repo, upload_folder |
| | from multi_token_clip import MultiTokenCLIPTokenizer |
| |
|
| | |
| | from packaging import version |
| | from PIL import Image |
| | from torch.utils.data import Dataset |
| | from torchvision import transforms |
| | from tqdm.auto import tqdm |
| | from transformers import CLIPTextModel |
| |
|
| | import diffusers |
| | from diffusers import ( |
| | AutoencoderKL, |
| | DDPMScheduler, |
| | DiffusionPipeline, |
| | DPMSolverMultistepScheduler, |
| | StableDiffusionPipeline, |
| | UNet2DConditionModel, |
| | ) |
| | from diffusers.optimization import get_scheduler |
| | from diffusers.utils import check_min_version, is_wandb_available |
| | from diffusers.utils.import_utils import is_xformers_available |
| |
|
| |
|
| | if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): |
| | PIL_INTERPOLATION = { |
| | "linear": PIL.Image.Resampling.BILINEAR, |
| | "bilinear": PIL.Image.Resampling.BILINEAR, |
| | "bicubic": PIL.Image.Resampling.BICUBIC, |
| | "lanczos": PIL.Image.Resampling.LANCZOS, |
| | "nearest": PIL.Image.Resampling.NEAREST, |
| | } |
| | else: |
| | PIL_INTERPOLATION = { |
| | "linear": PIL.Image.LINEAR, |
| | "bilinear": PIL.Image.BILINEAR, |
| | "bicubic": PIL.Image.BICUBIC, |
| | "lanczos": PIL.Image.LANCZOS, |
| | "nearest": PIL.Image.NEAREST, |
| | } |
| | |
| |
|
| |
|
| | |
| | check_min_version("0.14.0.dev0") |
| |
|
| | logger = get_logger(__name__) |
| |
|
| |
|
| | def add_tokens(tokenizer, text_encoder, placeholder_token, num_vec_per_token=1, initializer_token=None): |
| | """ |
| | Add tokens to the tokenizer and set the initial value of token embeddings |
| | """ |
| | tokenizer.add_placeholder_tokens(placeholder_token, num_vec_per_token=num_vec_per_token) |
| | text_encoder.resize_token_embeddings(len(tokenizer)) |
| | token_embeds = text_encoder.get_input_embeddings().weight.data |
| | placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) |
| | if initializer_token: |
| | token_ids = tokenizer.encode(initializer_token, add_special_tokens=False) |
| | for i, placeholder_token_id in enumerate(placeholder_token_ids): |
| | token_embeds[placeholder_token_id] = token_embeds[token_ids[i * len(token_ids) // num_vec_per_token]] |
| | else: |
| | for i, placeholder_token_id in enumerate(placeholder_token_ids): |
| | token_embeds[placeholder_token_id] = torch.randn_like(token_embeds[placeholder_token_id]) |
| | return placeholder_token |
| |
|
| |
|
| | def save_progress(tokenizer, text_encoder, accelerator, save_path): |
| | for placeholder_token in tokenizer.token_map: |
| | placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) |
| | learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_ids] |
| | if len(placeholder_token_ids) == 1: |
| | learned_embeds = learned_embeds[None] |
| | learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()} |
| | torch.save(learned_embeds_dict, save_path) |
| |
|
| |
|
| | def load_multitoken_tokenizer(tokenizer, text_encoder, learned_embeds_dict): |
| | for placeholder_token in learned_embeds_dict: |
| | placeholder_embeds = learned_embeds_dict[placeholder_token] |
| | num_vec_per_token = placeholder_embeds.shape[0] |
| | placeholder_embeds = placeholder_embeds.to(dtype=text_encoder.dtype) |
| | add_tokens(tokenizer, text_encoder, placeholder_token, num_vec_per_token=num_vec_per_token) |
| | placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) |
| | token_embeds = text_encoder.get_input_embeddings().weight.data |
| | for i, placeholder_token_id in enumerate(placeholder_token_ids): |
| | token_embeds[placeholder_token_id] = placeholder_embeds[i] |
| |
|
| |
|
| | def load_multitoken_tokenizer_from_automatic(tokenizer, text_encoder, automatic_dict, placeholder_token): |
| | """ |
| | Automatic1111's tokens have format |
| | {'string_to_token': {'*': 265}, 'string_to_param': {'*': tensor([[ 0.0833, 0.0030, 0.0057, ..., -0.0264, -0.0616, -0.0529], |
| | [ 0.0058, -0.0190, -0.0584, ..., -0.0025, -0.0945, -0.0490], |
| | [ 0.0916, 0.0025, 0.0365, ..., -0.0685, -0.0124, 0.0728], |
| | [ 0.0812, -0.0199, -0.0100, ..., -0.0581, -0.0780, 0.0254]], |
| | requires_grad=True)}, 'name': 'FloralMarble-400', 'step': 399, 'sd_checkpoint': '4bdfc29c', 'sd_checkpoint_name': 'SD2.1-768'} |
| | """ |
| | learned_embeds_dict = {} |
| | learned_embeds_dict[placeholder_token] = automatic_dict["string_to_param"]["*"] |
| | load_multitoken_tokenizer(tokenizer, text_encoder, learned_embeds_dict) |
| |
|
| |
|
| | def get_mask(tokenizer, accelerator): |
| | |
| | mask = torch.ones(len(tokenizer)).to(accelerator.device, dtype=torch.bool) |
| | for placeholder_token in tokenizer.token_map: |
| | placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) |
| | for i in range(len(placeholder_token_ids)): |
| | mask = mask & (torch.arange(len(tokenizer)) != placeholder_token_ids[i]).to(accelerator.device) |
| | return mask |
| |
|
| |
|
| | def parse_args(): |
| | parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| | parser.add_argument( |
| | "--progressive_tokens_max_steps", |
| | type=int, |
| | default=2000, |
| | help="The number of steps until all tokens will be used.", |
| | ) |
| | parser.add_argument( |
| | "--progressive_tokens", |
| | action="store_true", |
| | help="Progressively train the tokens. For example, first train for 1 token, then 2 tokens and so on.", |
| | ) |
| | parser.add_argument("--vector_shuffle", action="store_true", help="Shuffling tokens durint training") |
| | parser.add_argument( |
| | "--num_vec_per_token", |
| | type=int, |
| | default=1, |
| | help=( |
| | "The number of vectors used to represent the placeholder token. The higher the number, the better the" |
| | " result at the cost of editability. This can be fixed by prompt editing." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--save_steps", |
| | type=int, |
| | default=500, |
| | help="Save learned_embeds.bin every X updates steps.", |
| | ) |
| | parser.add_argument( |
| | "--only_save_embeds", |
| | action="store_true", |
| | default=False, |
| | help="Save only the embeddings for the new concept.", |
| | ) |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="Revision of pretrained model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--tokenizer_name", |
| | type=str, |
| | default=None, |
| | help="Pretrained tokenizer name or path if not the same as model_name", |
| | ) |
| | parser.add_argument( |
| | "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." |
| | ) |
| | parser.add_argument( |
| | "--placeholder_token", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="A token to use as a placeholder for the concept.", |
| | ) |
| | parser.add_argument( |
| | "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." |
| | ) |
| | parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") |
| | parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="text-inversion-model", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--resolution", |
| | type=int, |
| | default=512, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." |
| | ) |
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=100) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=5000, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_accumulation_steps", |
| | type=int, |
| | default=1, |
| | help="Number of updates steps to accumulate before performing a backward/update pass.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_checkpointing", |
| | action="store_true", |
| | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=1e-4, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=False, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument( |
| | "--dataloader_num_workers", |
| | type=int, |
| | default=0, |
| | help=( |
| | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." |
| | ), |
| | ) |
| | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default="no", |
| | choices=["no", "fp16", "bf16"], |
| | help=( |
| | "Whether to use mixed precision. Choose" |
| | "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." |
| | "and an Nvidia Ampere GPU." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--allow_tf32", |
| | action="store_true", |
| | help=( |
| | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--report_to", |
| | type=str, |
| | default="tensorboard", |
| | help=( |
| | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--validation_prompt", |
| | type=str, |
| | default=None, |
| | help="A prompt that is used during validation to verify that the model is learning.", |
| | ) |
| | parser.add_argument( |
| | "--num_validation_images", |
| | type=int, |
| | default=4, |
| | help="Number of images that should be generated during validation with `validation_prompt`.", |
| | ) |
| | parser.add_argument( |
| | "--validation_epochs", |
| | type=int, |
| | default=50, |
| | help=( |
| | "Run validation every X epochs. Validation consists of running the prompt" |
| | " `args.validation_prompt` multiple times: `args.num_validation_images`" |
| | " and logging the images." |
| | ), |
| | ) |
| | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| | parser.add_argument( |
| | "--checkpointing_steps", |
| | type=int, |
| | default=500, |
| | help=( |
| | "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" |
| | " training using `--resume_from_checkpoint`." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--checkpoints_total_limit", |
| | type=int, |
| | default=None, |
| | help=( |
| | "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." |
| | " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" |
| | " for more docs" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--resume_from_checkpoint", |
| | type=str, |
| | default=None, |
| | help=( |
| | "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." |
| | ) |
| |
|
| | args = parser.parse_args() |
| | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| | if env_local_rank != -1 and env_local_rank != args.local_rank: |
| | args.local_rank = env_local_rank |
| |
|
| | if args.train_data_dir is None: |
| | raise ValueError("You must specify a train data directory.") |
| |
|
| | return args |
| |
|
| |
|
| | imagenet_templates_small = [ |
| | "a photo of a {}", |
| | "a rendering of a {}", |
| | "a cropped photo of the {}", |
| | "the photo of a {}", |
| | "a photo of a clean {}", |
| | "a photo of a dirty {}", |
| | "a dark photo of the {}", |
| | "a photo of my {}", |
| | "a photo of the cool {}", |
| | "a close-up photo of a {}", |
| | "a bright photo of the {}", |
| | "a cropped photo of a {}", |
| | "a photo of the {}", |
| | "a good photo of the {}", |
| | "a photo of one {}", |
| | "a close-up photo of the {}", |
| | "a rendition of the {}", |
| | "a photo of the clean {}", |
| | "a rendition of a {}", |
| | "a photo of a nice {}", |
| | "a good photo of a {}", |
| | "a photo of the nice {}", |
| | "a photo of the small {}", |
| | "a photo of the weird {}", |
| | "a photo of the large {}", |
| | "a photo of a cool {}", |
| | "a photo of a small {}", |
| | ] |
| |
|
| | imagenet_style_templates_small = [ |
| | "a painting in the style of {}", |
| | "a rendering in the style of {}", |
| | "a cropped painting in the style of {}", |
| | "the painting in the style of {}", |
| | "a clean painting in the style of {}", |
| | "a dirty painting in the style of {}", |
| | "a dark painting in the style of {}", |
| | "a picture in the style of {}", |
| | "a cool painting in the style of {}", |
| | "a close-up painting in the style of {}", |
| | "a bright painting in the style of {}", |
| | "a cropped painting in the style of {}", |
| | "a good painting in the style of {}", |
| | "a close-up painting in the style of {}", |
| | "a rendition in the style of {}", |
| | "a nice painting in the style of {}", |
| | "a small painting in the style of {}", |
| | "a weird painting in the style of {}", |
| | "a large painting in the style of {}", |
| | ] |
| |
|
| |
|
| | class TextualInversionDataset(Dataset): |
| | def __init__( |
| | self, |
| | data_root, |
| | tokenizer, |
| | learnable_property="object", |
| | size=512, |
| | repeats=100, |
| | interpolation="bicubic", |
| | flip_p=0.5, |
| | set="train", |
| | placeholder_token="*", |
| | center_crop=False, |
| | vector_shuffle=False, |
| | progressive_tokens=False, |
| | ): |
| | self.data_root = data_root |
| | self.tokenizer = tokenizer |
| | self.learnable_property = learnable_property |
| | self.size = size |
| | self.placeholder_token = placeholder_token |
| | self.center_crop = center_crop |
| | self.flip_p = flip_p |
| | self.vector_shuffle = vector_shuffle |
| | self.progressive_tokens = progressive_tokens |
| | self.prop_tokens_to_load = 0 |
| |
|
| | self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] |
| |
|
| | self.num_images = len(self.image_paths) |
| | self._length = self.num_images |
| |
|
| | if set == "train": |
| | self._length = self.num_images * repeats |
| |
|
| | self.interpolation = { |
| | "linear": PIL_INTERPOLATION["linear"], |
| | "bilinear": PIL_INTERPOLATION["bilinear"], |
| | "bicubic": PIL_INTERPOLATION["bicubic"], |
| | "lanczos": PIL_INTERPOLATION["lanczos"], |
| | }[interpolation] |
| |
|
| | self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small |
| | self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) |
| |
|
| | def __len__(self): |
| | return self._length |
| |
|
| | def __getitem__(self, i): |
| | example = {} |
| | image = Image.open(self.image_paths[i % self.num_images]) |
| |
|
| | if not image.mode == "RGB": |
| | image = image.convert("RGB") |
| |
|
| | placeholder_string = self.placeholder_token |
| | text = random.choice(self.templates).format(placeholder_string) |
| |
|
| | example["input_ids"] = self.tokenizer.encode( |
| | text, |
| | padding="max_length", |
| | truncation=True, |
| | max_length=self.tokenizer.model_max_length, |
| | return_tensors="pt", |
| | vector_shuffle=self.vector_shuffle, |
| | prop_tokens_to_load=self.prop_tokens_to_load if self.progressive_tokens else 1.0, |
| | )[0] |
| |
|
| | |
| | img = np.array(image).astype(np.uint8) |
| |
|
| | if self.center_crop: |
| | crop = min(img.shape[0], img.shape[1]) |
| | ( |
| | h, |
| | w, |
| | ) = ( |
| | img.shape[0], |
| | img.shape[1], |
| | ) |
| | img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] |
| |
|
| | image = Image.fromarray(img) |
| | image = image.resize((self.size, self.size), resample=self.interpolation) |
| |
|
| | image = self.flip_transform(image) |
| | image = np.array(image).astype(np.uint8) |
| | image = (image / 127.5 - 1.0).astype(np.float32) |
| |
|
| | example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) |
| | return example |
| |
|
| |
|
| | def main(): |
| | args = parse_args() |
| | if args.report_to == "wandb" and args.hub_token is not None: |
| | raise ValueError( |
| | "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| | " Please use `hf auth login` to authenticate with the Hub." |
| | ) |
| |
|
| | logging_dir = os.path.join(args.output_dir, args.logging_dir) |
| | accelerator_project_config = ProjectConfiguration( |
| | total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir |
| | ) |
| |
|
| | accelerator = Accelerator( |
| | gradient_accumulation_steps=args.gradient_accumulation_steps, |
| | mixed_precision=args.mixed_precision, |
| | log_with=args.report_to, |
| | project_config=accelerator_project_config, |
| | ) |
| |
|
| | |
| | if torch.backends.mps.is_available(): |
| | accelerator.native_amp = False |
| |
|
| | if args.report_to == "wandb": |
| | if not is_wandb_available(): |
| | raise ImportError("Make sure to install wandb if you want to use it for logging during training.") |
| | import wandb |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | level=logging.INFO, |
| | ) |
| | logger.info(accelerator.state, main_process_only=False) |
| | if accelerator.is_local_main_process: |
| | transformers.utils.logging.set_verbosity_warning() |
| | diffusers.utils.logging.set_verbosity_info() |
| | else: |
| | transformers.utils.logging.set_verbosity_error() |
| | diffusers.utils.logging.set_verbosity_error() |
| |
|
| | |
| | if args.seed is not None: |
| | set_seed(args.seed) |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | if args.push_to_hub: |
| | repo_id = create_repo( |
| | repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token |
| | ).repo_id |
| |
|
| | |
| | if args.tokenizer_name: |
| | tokenizer = MultiTokenCLIPTokenizer.from_pretrained(args.tokenizer_name) |
| | elif args.pretrained_model_name_or_path: |
| | tokenizer = MultiTokenCLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") |
| |
|
| | |
| | noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") |
| | text_encoder = CLIPTextModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision |
| | ) |
| | vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) |
| | unet = UNet2DConditionModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision |
| | ) |
| | if is_xformers_available(): |
| | try: |
| | unet.enable_xformers_memory_efficient_attention() |
| | except Exception as e: |
| | logger.warning( |
| | "Could not enable memory efficient attention. Make sure xformers is installed" |
| | f" correctly and a GPU is available: {e}" |
| | ) |
| | add_tokens(tokenizer, text_encoder, args.placeholder_token, args.num_vec_per_token, args.initializer_token) |
| |
|
| | |
| | vae.requires_grad_(False) |
| | unet.requires_grad_(False) |
| | |
| | text_encoder.text_model.encoder.requires_grad_(False) |
| | text_encoder.text_model.final_layer_norm.requires_grad_(False) |
| | text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) |
| |
|
| | if args.gradient_checkpointing: |
| | |
| | |
| | unet.train() |
| | text_encoder.gradient_checkpointing_enable() |
| | unet.enable_gradient_checkpointing() |
| |
|
| | if args.enable_xformers_memory_efficient_attention: |
| | if is_xformers_available(): |
| | import xformers |
| |
|
| | xformers_version = version.parse(xformers.__version__) |
| | if xformers_version == version.parse("0.0.16"): |
| | logger.warning( |
| | "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." |
| | ) |
| | unet.enable_xformers_memory_efficient_attention() |
| | else: |
| | raise ValueError("xformers is not available. Make sure it is installed correctly") |
| |
|
| | |
| | |
| | if args.allow_tf32: |
| | torch.backends.cuda.matmul.allow_tf32 = True |
| |
|
| | if args.scale_lr: |
| | args.learning_rate = ( |
| | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| | ) |
| |
|
| | |
| | optimizer = torch.optim.AdamW( |
| | text_encoder.get_input_embeddings().parameters(), |
| | lr=args.learning_rate, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | ) |
| |
|
| | |
| | train_dataset = TextualInversionDataset( |
| | data_root=args.train_data_dir, |
| | tokenizer=tokenizer, |
| | size=args.resolution, |
| | placeholder_token=args.placeholder_token, |
| | repeats=args.repeats, |
| | learnable_property=args.learnable_property, |
| | center_crop=args.center_crop, |
| | set="train", |
| | ) |
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers |
| | ) |
| |
|
| | |
| | overrode_max_train_steps = False |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | overrode_max_train_steps = True |
| |
|
| | lr_scheduler = get_scheduler( |
| | args.lr_scheduler, |
| | optimizer=optimizer, |
| | num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, |
| | num_training_steps=args.max_train_steps * accelerator.num_processes, |
| | ) |
| |
|
| | |
| | text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | text_encoder, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | |
| | |
| | weight_dtype = torch.float32 |
| | if accelerator.mixed_precision == "fp16": |
| | weight_dtype = torch.float16 |
| | elif accelerator.mixed_precision == "bf16": |
| | weight_dtype = torch.bfloat16 |
| |
|
| | |
| | unet.to(accelerator.device, dtype=weight_dtype) |
| | vae.to(accelerator.device, dtype=weight_dtype) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if overrode_max_train_steps: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | |
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | |
| | |
| | if accelerator.is_main_process: |
| | accelerator.init_trackers("textual_inversion", config=vars(args)) |
| |
|
| | |
| | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| | global_step = 0 |
| | first_epoch = 0 |
| |
|
| | |
| | if args.resume_from_checkpoint: |
| | if args.resume_from_checkpoint != "latest": |
| | path = os.path.basename(args.resume_from_checkpoint) |
| | else: |
| | |
| | dirs = os.listdir(args.output_dir) |
| | dirs = [d for d in dirs if d.startswith("checkpoint")] |
| | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| | path = dirs[-1] if len(dirs) > 0 else None |
| |
|
| | if path is None: |
| | accelerator.print( |
| | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." |
| | ) |
| | args.resume_from_checkpoint = None |
| | else: |
| | accelerator.print(f"Resuming from checkpoint {path}") |
| | accelerator.load_state(os.path.join(args.output_dir, path)) |
| | global_step = int(path.split("-")[1]) |
| |
|
| | resume_global_step = global_step * args.gradient_accumulation_steps |
| | first_epoch = global_step // num_update_steps_per_epoch |
| | resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) |
| |
|
| | |
| | progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) |
| | progress_bar.set_description("Steps") |
| |
|
| | |
| | orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone() |
| |
|
| | for epoch in range(first_epoch, args.num_train_epochs): |
| | text_encoder.train() |
| | for step, batch in enumerate(train_dataloader): |
| | |
| | if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: |
| | if step % args.gradient_accumulation_steps == 0: |
| | progress_bar.update(1) |
| | continue |
| | if args.progressive_tokens: |
| | train_dataset.prop_tokens_to_load = float(global_step) / args.progressive_tokens_max_steps |
| |
|
| | with accelerator.accumulate(text_encoder): |
| | |
| | latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach() |
| | latents = latents * vae.config.scaling_factor |
| |
|
| | |
| | noise = torch.randn_like(latents) |
| | bsz = latents.shape[0] |
| | |
| | timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) |
| | timesteps = timesteps.long() |
| |
|
| | |
| | |
| | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) |
| |
|
| | |
| | encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype) |
| |
|
| | |
| | model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample |
| |
|
| | |
| | if noise_scheduler.config.prediction_type == "epsilon": |
| | target = noise |
| | elif noise_scheduler.config.prediction_type == "v_prediction": |
| | target = noise_scheduler.get_velocity(latents, noise, timesteps) |
| | else: |
| | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") |
| |
|
| | loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") |
| |
|
| | accelerator.backward(loss) |
| |
|
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad() |
| |
|
| | |
| | index_no_updates = get_mask(tokenizer, accelerator) |
| | with torch.no_grad(): |
| | accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( |
| | orig_embeds_params[index_no_updates] |
| | ) |
| |
|
| | |
| | if accelerator.sync_gradients: |
| | progress_bar.update(1) |
| | global_step += 1 |
| | if global_step % args.save_steps == 0: |
| | save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") |
| | save_progress(tokenizer, text_encoder, accelerator, save_path) |
| |
|
| | if global_step % args.checkpointing_steps == 0: |
| | if accelerator.is_main_process: |
| | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| | accelerator.save_state(save_path) |
| | logger.info(f"Saved state to {save_path}") |
| |
|
| | logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| | progress_bar.set_postfix(**logs) |
| | accelerator.log(logs, step=global_step) |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| |
|
| | if accelerator.is_main_process and args.validation_prompt is not None and epoch % args.validation_epochs == 0: |
| | logger.info( |
| | f"Running validation... \n Generating {args.num_validation_images} images with prompt:" |
| | f" {args.validation_prompt}." |
| | ) |
| | |
| | pipeline = DiffusionPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | text_encoder=accelerator.unwrap_model(text_encoder), |
| | tokenizer=tokenizer, |
| | unet=unet, |
| | vae=vae, |
| | revision=args.revision, |
| | torch_dtype=weight_dtype, |
| | ) |
| | pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) |
| | pipeline = pipeline.to(accelerator.device) |
| | pipeline.set_progress_bar_config(disable=True) |
| |
|
| | |
| | generator = ( |
| | None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| | ) |
| | images = [] |
| | for _ in range(args.num_validation_images): |
| | with torch.autocast("cuda"): |
| | image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] |
| | images.append(image) |
| |
|
| | for tracker in accelerator.trackers: |
| | if tracker.name == "tensorboard": |
| | np_images = np.stack([np.asarray(img) for img in images]) |
| | tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") |
| | if tracker.name == "wandb": |
| | tracker.log( |
| | { |
| | "validation": [ |
| | wandb.Image(image, caption=f"{i}: {args.validation_prompt}") |
| | for i, image in enumerate(images) |
| | ] |
| | } |
| | ) |
| |
|
| | del pipeline |
| | torch.cuda.empty_cache() |
| |
|
| | |
| | accelerator.wait_for_everyone() |
| | if accelerator.is_main_process: |
| | if args.push_to_hub and args.only_save_embeds: |
| | logger.warning("Enabling full model saving because --push_to_hub=True was specified.") |
| | save_full_model = True |
| | else: |
| | save_full_model = not args.only_save_embeds |
| | if save_full_model: |
| | pipeline = StableDiffusionPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | text_encoder=accelerator.unwrap_model(text_encoder), |
| | vae=vae, |
| | unet=unet, |
| | tokenizer=tokenizer, |
| | ) |
| | pipeline.save_pretrained(args.output_dir) |
| | |
| | save_path = os.path.join(args.output_dir, "learned_embeds.bin") |
| | save_progress(tokenizer, text_encoder, accelerator, save_path) |
| |
|
| | if args.push_to_hub: |
| | upload_folder( |
| | repo_id=repo_id, |
| | folder_path=args.output_dir, |
| | commit_message="End of training", |
| | ignore_patterns=["step_*", "epoch_*"], |
| | ) |
| |
|
| | accelerator.end_training() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|