| | import argparse |
| | import logging |
| | import math |
| | import os |
| | import random |
| | from pathlib import Path |
| |
|
| | import jax |
| | import jax.numpy as jnp |
| | import numpy as np |
| | import optax |
| | import PIL |
| | import torch |
| | import torch.utils.checkpoint |
| | import transformers |
| | from flax import jax_utils |
| | from flax.training import train_state |
| | from flax.training.common_utils import shard |
| | from huggingface_hub import create_repo, upload_folder |
| |
|
| | |
| | from packaging import version |
| | from PIL import Image |
| | from torch.utils.data import Dataset |
| | from torchvision import transforms |
| | from tqdm.auto import tqdm |
| | from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed |
| |
|
| | from diffusers import ( |
| | FlaxAutoencoderKL, |
| | FlaxDDPMScheduler, |
| | FlaxPNDMScheduler, |
| | FlaxStableDiffusionPipeline, |
| | FlaxUNet2DConditionModel, |
| | ) |
| | from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker |
| | from diffusers.utils import check_min_version |
| |
|
| |
|
| | if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): |
| | PIL_INTERPOLATION = { |
| | "linear": PIL.Image.Resampling.BILINEAR, |
| | "bilinear": PIL.Image.Resampling.BILINEAR, |
| | "bicubic": PIL.Image.Resampling.BICUBIC, |
| | "lanczos": PIL.Image.Resampling.LANCZOS, |
| | "nearest": PIL.Image.Resampling.NEAREST, |
| | } |
| | else: |
| | PIL_INTERPOLATION = { |
| | "linear": PIL.Image.LINEAR, |
| | "bilinear": PIL.Image.BILINEAR, |
| | "bicubic": PIL.Image.BICUBIC, |
| | "lanczos": PIL.Image.LANCZOS, |
| | "nearest": PIL.Image.NEAREST, |
| | } |
| | |
| |
|
| | |
| | check_min_version("0.37.0.dev0") |
| |
|
| | logger = logging.getLogger(__name__) |
| |
|
| |
|
| | def parse_args(): |
| | parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--tokenizer_name", |
| | type=str, |
| | default=None, |
| | help="Pretrained tokenizer name or path if not the same as model_name", |
| | ) |
| | parser.add_argument( |
| | "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." |
| | ) |
| | parser.add_argument( |
| | "--placeholder_token", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="A token to use as a placeholder for the concept.", |
| | ) |
| | parser.add_argument( |
| | "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." |
| | ) |
| | parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") |
| | parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="text-inversion-model", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--resolution", |
| | type=int, |
| | default=512, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." |
| | ) |
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=100) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=5000, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--save_steps", |
| | type=int, |
| | default=500, |
| | help="Save learned_embeds.bin every X updates steps.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=1e-4, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=True, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="Revision of pretrained model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument( |
| | "--use_auth_token", |
| | action="store_true", |
| | help=( |
| | "Will use the token generated when running `hf auth login` (necessary to use this script with" |
| | " private models)." |
| | ), |
| | ) |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| |
|
| | args = parser.parse_args() |
| | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| | if env_local_rank != -1 and env_local_rank != args.local_rank: |
| | args.local_rank = env_local_rank |
| |
|
| | if args.train_data_dir is None: |
| | raise ValueError("You must specify a train data directory.") |
| |
|
| | return args |
| |
|
| |
|
| | imagenet_templates_small = [ |
| | "a photo of a {}", |
| | "a rendering of a {}", |
| | "a cropped photo of the {}", |
| | "the photo of a {}", |
| | "a photo of a clean {}", |
| | "a photo of a dirty {}", |
| | "a dark photo of the {}", |
| | "a photo of my {}", |
| | "a photo of the cool {}", |
| | "a close-up photo of a {}", |
| | "a bright photo of the {}", |
| | "a cropped photo of a {}", |
| | "a photo of the {}", |
| | "a good photo of the {}", |
| | "a photo of one {}", |
| | "a close-up photo of the {}", |
| | "a rendition of the {}", |
| | "a photo of the clean {}", |
| | "a rendition of a {}", |
| | "a photo of a nice {}", |
| | "a good photo of a {}", |
| | "a photo of the nice {}", |
| | "a photo of the small {}", |
| | "a photo of the weird {}", |
| | "a photo of the large {}", |
| | "a photo of a cool {}", |
| | "a photo of a small {}", |
| | ] |
| |
|
| | imagenet_style_templates_small = [ |
| | "a painting in the style of {}", |
| | "a rendering in the style of {}", |
| | "a cropped painting in the style of {}", |
| | "the painting in the style of {}", |
| | "a clean painting in the style of {}", |
| | "a dirty painting in the style of {}", |
| | "a dark painting in the style of {}", |
| | "a picture in the style of {}", |
| | "a cool painting in the style of {}", |
| | "a close-up painting in the style of {}", |
| | "a bright painting in the style of {}", |
| | "a cropped painting in the style of {}", |
| | "a good painting in the style of {}", |
| | "a close-up painting in the style of {}", |
| | "a rendition in the style of {}", |
| | "a nice painting in the style of {}", |
| | "a small painting in the style of {}", |
| | "a weird painting in the style of {}", |
| | "a large painting in the style of {}", |
| | ] |
| |
|
| |
|
| | class TextualInversionDataset(Dataset): |
| | def __init__( |
| | self, |
| | data_root, |
| | tokenizer, |
| | learnable_property="object", |
| | size=512, |
| | repeats=100, |
| | interpolation="bicubic", |
| | flip_p=0.5, |
| | set="train", |
| | placeholder_token="*", |
| | center_crop=False, |
| | ): |
| | self.data_root = data_root |
| | self.tokenizer = tokenizer |
| | self.learnable_property = learnable_property |
| | self.size = size |
| | self.placeholder_token = placeholder_token |
| | self.center_crop = center_crop |
| | self.flip_p = flip_p |
| |
|
| | self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] |
| |
|
| | self.num_images = len(self.image_paths) |
| | self._length = self.num_images |
| |
|
| | if set == "train": |
| | self._length = self.num_images * repeats |
| |
|
| | self.interpolation = { |
| | "linear": PIL_INTERPOLATION["linear"], |
| | "bilinear": PIL_INTERPOLATION["bilinear"], |
| | "bicubic": PIL_INTERPOLATION["bicubic"], |
| | "lanczos": PIL_INTERPOLATION["lanczos"], |
| | }[interpolation] |
| |
|
| | self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small |
| | self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) |
| |
|
| | def __len__(self): |
| | return self._length |
| |
|
| | def __getitem__(self, i): |
| | example = {} |
| | image = Image.open(self.image_paths[i % self.num_images]) |
| |
|
| | if not image.mode == "RGB": |
| | image = image.convert("RGB") |
| |
|
| | placeholder_string = self.placeholder_token |
| | text = random.choice(self.templates).format(placeholder_string) |
| |
|
| | example["input_ids"] = self.tokenizer( |
| | text, |
| | padding="max_length", |
| | truncation=True, |
| | max_length=self.tokenizer.model_max_length, |
| | return_tensors="pt", |
| | ).input_ids[0] |
| |
|
| | |
| | img = np.array(image).astype(np.uint8) |
| |
|
| | if self.center_crop: |
| | crop = min(img.shape[0], img.shape[1]) |
| | ( |
| | h, |
| | w, |
| | ) = ( |
| | img.shape[0], |
| | img.shape[1], |
| | ) |
| | img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] |
| |
|
| | image = Image.fromarray(img) |
| | image = image.resize((self.size, self.size), resample=self.interpolation) |
| |
|
| | image = self.flip_transform(image) |
| | image = np.array(image).astype(np.uint8) |
| | image = (image / 127.5 - 1.0).astype(np.float32) |
| |
|
| | example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) |
| | return example |
| |
|
| |
|
| | def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): |
| | if model.config.vocab_size == new_num_tokens or new_num_tokens is None: |
| | return |
| | model.config.vocab_size = new_num_tokens |
| |
|
| | params = model.params |
| | old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] |
| | old_num_tokens, emb_dim = old_embeddings.shape |
| |
|
| | initializer = jax.nn.initializers.normal() |
| |
|
| | new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) |
| | new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) |
| | new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) |
| | params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings |
| |
|
| | model.params = params |
| | return model |
| |
|
| |
|
| | def get_params_to_save(params): |
| | return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) |
| |
|
| |
|
| | def main(): |
| | args = parse_args() |
| |
|
| | if args.seed is not None: |
| | set_seed(args.seed) |
| |
|
| | if jax.process_index() == 0: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | if args.push_to_hub: |
| | repo_id = create_repo( |
| | repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token |
| | ).repo_id |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | level=logging.INFO, |
| | ) |
| | |
| | logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) |
| | if jax.process_index() == 0: |
| | transformers.utils.logging.set_verbosity_info() |
| | else: |
| | transformers.utils.logging.set_verbosity_error() |
| |
|
| | |
| | if args.tokenizer_name: |
| | tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) |
| | elif args.pretrained_model_name_or_path: |
| | tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") |
| |
|
| | |
| | num_added_tokens = tokenizer.add_tokens(args.placeholder_token) |
| | if num_added_tokens == 0: |
| | raise ValueError( |
| | f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" |
| | " `placeholder_token` that is not already in the tokenizer." |
| | ) |
| |
|
| | |
| | token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) |
| | |
| | if len(token_ids) > 1: |
| | raise ValueError("The initializer token must be a single token.") |
| |
|
| | initializer_token_id = token_ids[0] |
| | placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) |
| |
|
| | |
| | text_encoder = FlaxCLIPTextModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision |
| | ) |
| | vae, vae_params = FlaxAutoencoderKL.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision |
| | ) |
| | unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision |
| | ) |
| |
|
| | |
| | rng = jax.random.PRNGKey(args.seed) |
| | rng, _ = jax.random.split(rng) |
| | |
| | text_encoder = resize_token_embeddings( |
| | text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng |
| | ) |
| | original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] |
| |
|
| | train_dataset = TextualInversionDataset( |
| | data_root=args.train_data_dir, |
| | tokenizer=tokenizer, |
| | size=args.resolution, |
| | placeholder_token=args.placeholder_token, |
| | repeats=args.repeats, |
| | learnable_property=args.learnable_property, |
| | center_crop=args.center_crop, |
| | set="train", |
| | ) |
| |
|
| | def collate_fn(examples): |
| | pixel_values = torch.stack([example["pixel_values"] for example in examples]) |
| | input_ids = torch.stack([example["input_ids"] for example in examples]) |
| |
|
| | batch = {"pixel_values": pixel_values, "input_ids": input_ids} |
| | batch = {k: v.numpy() for k, v in batch.items()} |
| |
|
| | return batch |
| |
|
| | total_train_batch_size = args.train_batch_size * jax.local_device_count() |
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn |
| | ) |
| |
|
| | |
| | if args.scale_lr: |
| | args.learning_rate = args.learning_rate * total_train_batch_size |
| |
|
| | constant_scheduler = optax.constant_schedule(args.learning_rate) |
| |
|
| | optimizer = optax.adamw( |
| | learning_rate=constant_scheduler, |
| | b1=args.adam_beta1, |
| | b2=args.adam_beta2, |
| | eps=args.adam_epsilon, |
| | weight_decay=args.adam_weight_decay, |
| | ) |
| |
|
| | def create_mask(params, label_fn): |
| | def _map(params, mask, label_fn): |
| | for k in params: |
| | if label_fn(k): |
| | mask[k] = "token_embedding" |
| | else: |
| | if isinstance(params[k], dict): |
| | mask[k] = {} |
| | _map(params[k], mask[k], label_fn) |
| | else: |
| | mask[k] = "zero" |
| |
|
| | mask = {} |
| | _map(params, mask, label_fn) |
| | return mask |
| |
|
| | def zero_grads(): |
| | |
| | def init_fn(_): |
| | return () |
| |
|
| | def update_fn(updates, state, params=None): |
| | return jax.tree_util.tree_map(jnp.zeros_like, updates), () |
| |
|
| | return optax.GradientTransformation(init_fn, update_fn) |
| |
|
| | |
| | tx = optax.multi_transform( |
| | {"token_embedding": optimizer, "zero": zero_grads()}, |
| | create_mask(text_encoder.params, lambda s: s == "token_embedding"), |
| | ) |
| |
|
| | state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) |
| |
|
| | noise_scheduler = FlaxDDPMScheduler( |
| | beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 |
| | ) |
| | noise_scheduler_state = noise_scheduler.create_state() |
| |
|
| | |
| | train_rngs = jax.random.split(rng, jax.local_device_count()) |
| |
|
| | |
| | def train_step(state, vae_params, unet_params, batch, train_rng): |
| | dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) |
| |
|
| | def compute_loss(params): |
| | vae_outputs = vae.apply( |
| | {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode |
| | ) |
| | latents = vae_outputs.latent_dist.sample(sample_rng) |
| | |
| | latents = jnp.transpose(latents, (0, 3, 1, 2)) |
| | latents = latents * vae.config.scaling_factor |
| |
|
| | noise_rng, timestep_rng = jax.random.split(sample_rng) |
| | noise = jax.random.normal(noise_rng, latents.shape) |
| | bsz = latents.shape[0] |
| | timesteps = jax.random.randint( |
| | timestep_rng, |
| | (bsz,), |
| | 0, |
| | noise_scheduler.config.num_train_timesteps, |
| | ) |
| | noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) |
| | encoder_hidden_states = state.apply_fn( |
| | batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True |
| | )[0] |
| | |
| | model_pred = unet.apply( |
| | {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False |
| | ).sample |
| |
|
| | |
| | if noise_scheduler.config.prediction_type == "epsilon": |
| | target = noise |
| | elif noise_scheduler.config.prediction_type == "v_prediction": |
| | target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) |
| | else: |
| | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") |
| |
|
| | loss = (target - model_pred) ** 2 |
| | loss = loss.mean() |
| |
|
| | return loss |
| |
|
| | grad_fn = jax.value_and_grad(compute_loss) |
| | loss, grad = grad_fn(state.params) |
| | grad = jax.lax.pmean(grad, "batch") |
| | new_state = state.apply_gradients(grads=grad) |
| |
|
| | |
| | |
| | token_embeds = original_token_embeds.at[placeholder_token_id].set( |
| | new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] |
| | ) |
| | new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds |
| |
|
| | metrics = {"loss": loss} |
| | metrics = jax.lax.pmean(metrics, axis_name="batch") |
| | return new_state, metrics, new_train_rng |
| |
|
| | |
| | p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) |
| |
|
| | |
| | state = jax_utils.replicate(state) |
| | vae_params = jax_utils.replicate(vae_params) |
| | unet_params = jax_utils.replicate(unet_params) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader)) |
| |
|
| | |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| |
|
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| |
|
| | global_step = 0 |
| |
|
| | epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) |
| | for epoch in epochs: |
| | |
| |
|
| | train_metrics = [] |
| |
|
| | steps_per_epoch = len(train_dataset) // total_train_batch_size |
| | train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) |
| | |
| | for batch in train_dataloader: |
| | batch = shard(batch) |
| | state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) |
| | train_metrics.append(train_metric) |
| |
|
| | train_step_progress_bar.update(1) |
| | global_step += 1 |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| | if global_step % args.save_steps == 0: |
| | learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][ |
| | "embedding" |
| | ][placeholder_token_id] |
| | learned_embeds_dict = {args.placeholder_token: learned_embeds} |
| | jnp.save( |
| | os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict |
| | ) |
| |
|
| | train_metric = jax_utils.unreplicate(train_metric) |
| |
|
| | train_step_progress_bar.close() |
| | epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") |
| |
|
| | |
| | if jax.process_index() == 0: |
| | scheduler = FlaxPNDMScheduler( |
| | beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True |
| | ) |
| | safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( |
| | "CompVis/stable-diffusion-safety-checker", from_pt=True |
| | ) |
| | pipeline = FlaxStableDiffusionPipeline( |
| | text_encoder=text_encoder, |
| | vae=vae, |
| | unet=unet, |
| | tokenizer=tokenizer, |
| | scheduler=scheduler, |
| | safety_checker=safety_checker, |
| | feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), |
| | ) |
| |
|
| | pipeline.save_pretrained( |
| | args.output_dir, |
| | params={ |
| | "text_encoder": get_params_to_save(state.params), |
| | "vae": get_params_to_save(vae_params), |
| | "unet": get_params_to_save(unet_params), |
| | "safety_checker": safety_checker.params, |
| | }, |
| | ) |
| |
|
| | |
| | learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ |
| | placeholder_token_id |
| | ] |
| | learned_embeds_dict = {args.placeholder_token: learned_embeds} |
| | jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) |
| |
|
| | if args.push_to_hub: |
| | upload_folder( |
| | repo_id=repo_id, |
| | folder_path=args.output_dir, |
| | commit_message="End of training", |
| | ignore_patterns=["step_*", "epoch_*"], |
| | ) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|