| | import argparse |
| | import itertools |
| | import math |
| | import os |
| | import random |
| | from pathlib import Path |
| |
|
| | import intel_extension_for_pytorch as ipex |
| | import numpy as np |
| | import PIL |
| | import torch |
| | import torch.nn.functional as F |
| | import torch.utils.checkpoint |
| | from accelerate import Accelerator |
| | from accelerate.logging import get_logger |
| | from accelerate.utils import ProjectConfiguration, set_seed |
| | from huggingface_hub import create_repo, upload_folder |
| |
|
| | |
| | from packaging import version |
| | from PIL import Image |
| | from torch.utils.data import Dataset |
| | from torchvision import transforms |
| | from tqdm.auto import tqdm |
| | from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer |
| |
|
| | from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel |
| | from diffusers.optimization import get_scheduler |
| | from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker |
| | from diffusers.utils import check_min_version |
| |
|
| |
|
| | if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): |
| | PIL_INTERPOLATION = { |
| | "linear": PIL.Image.Resampling.BILINEAR, |
| | "bilinear": PIL.Image.Resampling.BILINEAR, |
| | "bicubic": PIL.Image.Resampling.BICUBIC, |
| | "lanczos": PIL.Image.Resampling.LANCZOS, |
| | "nearest": PIL.Image.Resampling.NEAREST, |
| | } |
| | else: |
| | PIL_INTERPOLATION = { |
| | "linear": PIL.Image.LINEAR, |
| | "bilinear": PIL.Image.BILINEAR, |
| | "bicubic": PIL.Image.BICUBIC, |
| | "lanczos": PIL.Image.LANCZOS, |
| | "nearest": PIL.Image.NEAREST, |
| | } |
| | |
| |
|
| |
|
| | |
| | check_min_version("0.13.0.dev0") |
| |
|
| |
|
| | logger = get_logger(__name__) |
| |
|
| |
|
| | def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path): |
| | logger.info("Saving embeddings") |
| | learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] |
| | learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} |
| | torch.save(learned_embeds_dict, save_path) |
| |
|
| |
|
| | def parse_args(): |
| | parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| | parser.add_argument( |
| | "--save_steps", |
| | type=int, |
| | default=500, |
| | help="Save learned_embeds.bin every X updates steps.", |
| | ) |
| | parser.add_argument( |
| | "--only_save_embeds", |
| | action="store_true", |
| | default=False, |
| | help="Save only the embeddings for the new concept.", |
| | ) |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="Revision of pretrained model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--tokenizer_name", |
| | type=str, |
| | default=None, |
| | help="Pretrained tokenizer name or path if not the same as model_name", |
| | ) |
| | parser.add_argument( |
| | "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." |
| | ) |
| | parser.add_argument( |
| | "--placeholder_token", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="A token to use as a placeholder for the concept.", |
| | ) |
| | parser.add_argument( |
| | "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." |
| | ) |
| | parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") |
| | parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="text-inversion-model", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--resolution", |
| | type=int, |
| | default=512, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." |
| | ) |
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=100) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=5000, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_accumulation_steps", |
| | type=int, |
| | default=1, |
| | help="Number of updates steps to accumulate before performing a backward/update pass.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=1e-4, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=True, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default="no", |
| | choices=["no", "fp16", "bf16"], |
| | help=( |
| | "Whether to use mixed precision. Choose" |
| | "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." |
| | "and an Nvidia Ampere GPU." |
| | ), |
| | ) |
| | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| |
|
| | args = parser.parse_args() |
| | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| | if env_local_rank != -1 and env_local_rank != args.local_rank: |
| | args.local_rank = env_local_rank |
| |
|
| | if args.train_data_dir is None: |
| | raise ValueError("You must specify a train data directory.") |
| |
|
| | return args |
| |
|
| |
|
| | imagenet_templates_small = [ |
| | "a photo of a {}", |
| | "a rendering of a {}", |
| | "a cropped photo of the {}", |
| | "the photo of a {}", |
| | "a photo of a clean {}", |
| | "a photo of a dirty {}", |
| | "a dark photo of the {}", |
| | "a photo of my {}", |
| | "a photo of the cool {}", |
| | "a close-up photo of a {}", |
| | "a bright photo of the {}", |
| | "a cropped photo of a {}", |
| | "a photo of the {}", |
| | "a good photo of the {}", |
| | "a photo of one {}", |
| | "a close-up photo of the {}", |
| | "a rendition of the {}", |
| | "a photo of the clean {}", |
| | "a rendition of a {}", |
| | "a photo of a nice {}", |
| | "a good photo of a {}", |
| | "a photo of the nice {}", |
| | "a photo of the small {}", |
| | "a photo of the weird {}", |
| | "a photo of the large {}", |
| | "a photo of a cool {}", |
| | "a photo of a small {}", |
| | ] |
| |
|
| | imagenet_style_templates_small = [ |
| | "a painting in the style of {}", |
| | "a rendering in the style of {}", |
| | "a cropped painting in the style of {}", |
| | "the painting in the style of {}", |
| | "a clean painting in the style of {}", |
| | "a dirty painting in the style of {}", |
| | "a dark painting in the style of {}", |
| | "a picture in the style of {}", |
| | "a cool painting in the style of {}", |
| | "a close-up painting in the style of {}", |
| | "a bright painting in the style of {}", |
| | "a cropped painting in the style of {}", |
| | "a good painting in the style of {}", |
| | "a close-up painting in the style of {}", |
| | "a rendition in the style of {}", |
| | "a nice painting in the style of {}", |
| | "a small painting in the style of {}", |
| | "a weird painting in the style of {}", |
| | "a large painting in the style of {}", |
| | ] |
| |
|
| |
|
| | class TextualInversionDataset(Dataset): |
| | def __init__( |
| | self, |
| | data_root, |
| | tokenizer, |
| | learnable_property="object", |
| | size=512, |
| | repeats=100, |
| | interpolation="bicubic", |
| | flip_p=0.5, |
| | set="train", |
| | placeholder_token="*", |
| | center_crop=False, |
| | ): |
| | self.data_root = data_root |
| | self.tokenizer = tokenizer |
| | self.learnable_property = learnable_property |
| | self.size = size |
| | self.placeholder_token = placeholder_token |
| | self.center_crop = center_crop |
| | self.flip_p = flip_p |
| |
|
| | self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] |
| |
|
| | self.num_images = len(self.image_paths) |
| | self._length = self.num_images |
| |
|
| | if set == "train": |
| | self._length = self.num_images * repeats |
| |
|
| | self.interpolation = { |
| | "linear": PIL_INTERPOLATION["linear"], |
| | "bilinear": PIL_INTERPOLATION["bilinear"], |
| | "bicubic": PIL_INTERPOLATION["bicubic"], |
| | "lanczos": PIL_INTERPOLATION["lanczos"], |
| | }[interpolation] |
| |
|
| | self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small |
| | self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) |
| |
|
| | def __len__(self): |
| | return self._length |
| |
|
| | def __getitem__(self, i): |
| | example = {} |
| | image = Image.open(self.image_paths[i % self.num_images]) |
| |
|
| | if not image.mode == "RGB": |
| | image = image.convert("RGB") |
| |
|
| | placeholder_string = self.placeholder_token |
| | text = random.choice(self.templates).format(placeholder_string) |
| |
|
| | example["input_ids"] = self.tokenizer( |
| | text, |
| | padding="max_length", |
| | truncation=True, |
| | max_length=self.tokenizer.model_max_length, |
| | return_tensors="pt", |
| | ).input_ids[0] |
| |
|
| | |
| | img = np.array(image).astype(np.uint8) |
| |
|
| | if self.center_crop: |
| | crop = min(img.shape[0], img.shape[1]) |
| | ( |
| | h, |
| | w, |
| | ) = ( |
| | img.shape[0], |
| | img.shape[1], |
| | ) |
| | img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] |
| |
|
| | image = Image.fromarray(img) |
| | image = image.resize((self.size, self.size), resample=self.interpolation) |
| |
|
| | image = self.flip_transform(image) |
| | image = np.array(image).astype(np.uint8) |
| | image = (image / 127.5 - 1.0).astype(np.float32) |
| |
|
| | example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) |
| | return example |
| |
|
| |
|
| | def freeze_params(params): |
| | for param in params: |
| | param.requires_grad = False |
| |
|
| |
|
| | def main(): |
| | args = parse_args() |
| | logging_dir = os.path.join(args.output_dir, args.logging_dir) |
| | accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
| | accelerator = Accelerator( |
| | gradient_accumulation_steps=args.gradient_accumulation_steps, |
| | mixed_precision=args.mixed_precision, |
| | log_with=args.report_to, |
| | project_config=accelerator_project_config, |
| | ) |
| |
|
| | |
| | if args.seed is not None: |
| | set_seed(args.seed) |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | if args.push_to_hub: |
| | repo_id = create_repo( |
| | repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token |
| | ).repo_id |
| |
|
| | |
| | if args.tokenizer_name: |
| | tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) |
| | elif args.pretrained_model_name_or_path: |
| | tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") |
| |
|
| | |
| | num_added_tokens = tokenizer.add_tokens(args.placeholder_token) |
| | if num_added_tokens == 0: |
| | raise ValueError( |
| | f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" |
| | " `placeholder_token` that is not already in the tokenizer." |
| | ) |
| |
|
| | |
| | token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) |
| | |
| | if len(token_ids) > 1: |
| | raise ValueError("The initializer token must be a single token.") |
| |
|
| | initializer_token_id = token_ids[0] |
| | placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) |
| |
|
| | |
| | text_encoder = CLIPTextModel.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="text_encoder", |
| | revision=args.revision, |
| | ) |
| | vae = AutoencoderKL.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="vae", |
| | revision=args.revision, |
| | ) |
| | unet = UNet2DConditionModel.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="unet", |
| | revision=args.revision, |
| | ) |
| |
|
| | |
| | text_encoder.resize_token_embeddings(len(tokenizer)) |
| |
|
| | |
| | token_embeds = text_encoder.get_input_embeddings().weight.data |
| | token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] |
| |
|
| | |
| | freeze_params(vae.parameters()) |
| | freeze_params(unet.parameters()) |
| | |
| | params_to_freeze = itertools.chain( |
| | text_encoder.text_model.encoder.parameters(), |
| | text_encoder.text_model.final_layer_norm.parameters(), |
| | text_encoder.text_model.embeddings.position_embedding.parameters(), |
| | ) |
| | freeze_params(params_to_freeze) |
| |
|
| | if args.scale_lr: |
| | args.learning_rate = ( |
| | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| | ) |
| |
|
| | |
| | optimizer = torch.optim.AdamW( |
| | text_encoder.get_input_embeddings().parameters(), |
| | lr=args.learning_rate, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | ) |
| |
|
| | noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") |
| |
|
| | train_dataset = TextualInversionDataset( |
| | data_root=args.train_data_dir, |
| | tokenizer=tokenizer, |
| | size=args.resolution, |
| | placeholder_token=args.placeholder_token, |
| | repeats=args.repeats, |
| | learnable_property=args.learnable_property, |
| | center_crop=args.center_crop, |
| | set="train", |
| | ) |
| | train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) |
| |
|
| | |
| | overrode_max_train_steps = False |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | overrode_max_train_steps = True |
| |
|
| | lr_scheduler = get_scheduler( |
| | args.lr_scheduler, |
| | optimizer=optimizer, |
| | num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, |
| | num_training_steps=args.max_train_steps * accelerator.num_processes, |
| | ) |
| |
|
| | text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | text_encoder, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | |
| | vae.to(accelerator.device) |
| | unet.to(accelerator.device) |
| |
|
| | |
| | vae.eval() |
| | unet.eval() |
| |
|
| | unet = ipex.optimize(unet, dtype=torch.bfloat16, inplace=True) |
| | vae = ipex.optimize(vae, dtype=torch.bfloat16, inplace=True) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if overrode_max_train_steps: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | |
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | |
| | |
| | if accelerator.is_main_process: |
| | accelerator.init_trackers("textual_inversion", config=vars(args)) |
| |
|
| | |
| | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| | |
| | progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) |
| | progress_bar.set_description("Steps") |
| | global_step = 0 |
| |
|
| | text_encoder.train() |
| | text_encoder, optimizer = ipex.optimize(text_encoder, optimizer=optimizer, dtype=torch.bfloat16) |
| |
|
| | for epoch in range(args.num_train_epochs): |
| | for step, batch in enumerate(train_dataloader): |
| | with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): |
| | with accelerator.accumulate(text_encoder): |
| | |
| | latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() |
| | latents = latents * vae.config.scaling_factor |
| |
|
| | |
| | noise = torch.randn(latents.shape).to(latents.device) |
| | bsz = latents.shape[0] |
| | |
| | timesteps = torch.randint( |
| | 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device |
| | ).long() |
| |
|
| | |
| | |
| | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) |
| |
|
| | |
| | encoder_hidden_states = text_encoder(batch["input_ids"])[0] |
| |
|
| | |
| | model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample |
| |
|
| | |
| | if noise_scheduler.config.prediction_type == "epsilon": |
| | target = noise |
| | elif noise_scheduler.config.prediction_type == "v_prediction": |
| | target = noise_scheduler.get_velocity(latents, noise, timesteps) |
| | else: |
| | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") |
| |
|
| | loss = F.mse_loss(model_pred, target, reduction="none").mean([1, 2, 3]).mean() |
| | accelerator.backward(loss) |
| |
|
| | |
| | |
| | if accelerator.num_processes > 1: |
| | grads = text_encoder.module.get_input_embeddings().weight.grad |
| | else: |
| | grads = text_encoder.get_input_embeddings().weight.grad |
| | |
| | index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id |
| | grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0) |
| |
|
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad() |
| |
|
| | |
| | if accelerator.sync_gradients: |
| | progress_bar.update(1) |
| | global_step += 1 |
| | if global_step % args.save_steps == 0: |
| | save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") |
| | save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) |
| |
|
| | logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| | progress_bar.set_postfix(**logs) |
| | accelerator.log(logs, step=global_step) |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| |
|
| | accelerator.wait_for_everyone() |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.push_to_hub and args.only_save_embeds: |
| | logger.warn("Enabling full model saving because --push_to_hub=True was specified.") |
| | save_full_model = True |
| | else: |
| | save_full_model = not args.only_save_embeds |
| | if save_full_model: |
| | pipeline = StableDiffusionPipeline( |
| | text_encoder=accelerator.unwrap_model(text_encoder), |
| | vae=vae, |
| | unet=unet, |
| | tokenizer=tokenizer, |
| | scheduler=PNDMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler"), |
| | safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"), |
| | feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), |
| | ) |
| | pipeline.save_pretrained(args.output_dir) |
| | |
| | save_path = os.path.join(args.output_dir, "learned_embeds.bin") |
| | save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) |
| |
|
| | if args.push_to_hub: |
| | upload_folder( |
| | repo_id=repo_id, |
| | folder_path=args.output_dir, |
| | commit_message="End of training", |
| | ignore_patterns=["step_*", "epoch_*"], |
| | ) |
| |
|
| | accelerator.end_training() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|